diff --git a/README.md b/README.md index e69161c..becca58 100644 --- a/README.md +++ b/README.md @@ -97,6 +97,7 @@ - [ ] ![weight_gradient translated 0%](https://geps.dev/progress/0?dangerColor=c9f2dc&warningColor=6cc570&successColor=00ff7f) [weight_gradient](https://github.com/DingoBite/weight_gradient) - [ ] ![stable-diffusion-webui-composable-lora translated 93%](https://geps.dev/progress/93?dangerColor=c9f2dc&warningColor=6cc570&successColor=00ff7f) [stable-diffusion-webui-composable-lora](https://github.com/a2569875/stable-diffusion-webui-composable-lora) - [ ] ![sd-webui-agent-scheduler translated 0%](https://geps.dev/progress/0?dangerColor=c9f2dc&warningColor=6cc570&successColor=00ff7f) [sd-webui-agent-scheduler](https://github.com/ArtVentureX/sd-webui-agent-scheduler) +- [ ] ![zh-Hans (Stable) translated 0%](https://geps.dev/progress/0?dangerColor=c9f2dc&warningColor=6cc570&successColor=00ff7f) [zh-Hans (Stable)]() # 安裝說明 diff --git a/localizations/zh_TW.json b/localizations/zh_TW.json index 44db6cf..e4dd31a 100644 --- a/localizations/zh_TW.json +++ b/localizations/zh_TW.json @@ -1627,384 +1627,633 @@ "Extension for Automatic1111 to automatically create masks for Background/Hair/Body/Face/Clothes in Img2Img": "Extension for Automatic1111 to automatically create masks for Background/Hair/Body/Face/Clothes in Img2Img", "Style variables": "Style variables", "Use existing styles as variables in prompts": "Use existing styles as variables in prompts", - "Tagger": "Tagger", - "Single process": "Single process", - "Batch from directory": "Batch from directory", - "Use recursive with glob pattern": "Use recursive with glob pattern", - "Output filename format": "Output filename format", - "Output filename formats": "Output filename formats", - "Related to original file": "Related to original file", - "[name]": "[name]", - ": Original filename without extension": ": Original filename without extension", - "[extension]": "[extension]", - ": Original extension": ": Original extension", - "[hash:]": "[hash:]", - ": Original extension\nAvailable algorithms:": ": Original extension\nAvailable algorithms:", - "sha1, blake2s, shake_256, sha256, md5-sha1, sha512_256, shake_128, mdc2, ripemd160, whirlpool, md5, sha3_384, sha512, sha3_512, blake2b, sha224, sm3, sha512_224, sha3_224, sha384, md4, sha3_256": "sha1, blake2s, shake_256, sha256, md5-sha1, sha512_256, shake_128, mdc2, ripemd160, whirlpool, md5, sha3_384, sha512, sha3_512, blake2b, sha224, sm3, sha512_224, sha3_224, sha384, md4, sha3_256", - "Related to output file": "Related to output file", - "[output_extension]": "[output_extension]", - ": Output extension (has no dot)": ": Output extension (has no dot)", - "Examples": "Examples", - "Original filename without extension": "Original filename without extension", - "[name].[output_extension]": "[name].[output_extension]", - "Original file's hash (good for deleting duplication)": "Original file's hash (good for deleting duplication)", - "[hash:sha1].[output_extension]": "[hash:sha1].[output_extension]", - "Action on existing caption": "Action on existing caption", - "Remove duplicated tag": "Remove duplicated tag", - "Save with JSON": "Save with JSON", - "Preset": "Preset", - "default.json": "default.json", - "Interrogator": "Interrogator", - "wd14-convnext": "wd14-convnext", - "wd14-convnext-v2": "wd14-convnext-v2", - "wd14-convnext-v2-git": "wd14-convnext-v2-git", - "wd14-swinv2-v2": "wd14-swinv2-v2", - "wd14-swinv2-v2-git": "wd14-swinv2-v2-git", - "wd14-vit": "wd14-vit", - "wd14-vit-v2": "wd14-vit-v2", - "wd14-vit-v2-git": "wd14-vit-v2-git", - "Unload all interrogate models": "Unload all interrogate models", - "Threshold": "Threshold", - "Additional tags (split by comma)": "Additional tags (split by comma)", - "Exclude tags (split by comma)": "Exclude tags (split by comma)", - "Sort by alphabetical order": "Sort by alphabetical order", - "Include confident of tags matches in results": "Include confident of tags matches in results", - "Use spaces instead of underscore": "Use spaces instead of underscore", - "Excudes (split by comma)": "Excudes (split by comma)", - "Escape brackets": "Escape brackets", - "Unload model after running": "Unload model after running", - "Tags": "標記", - "Rating confidents": "Rating confidents", - "Tag confidents": "Tag confidents", - "stable-diffusion-webui-wd14-tagger": "stable-diffusion-webui-wd14-tagger", - "https://github.com/toriato/stable-diffusion-webui-wd14-tagger.git": "https://github.com/toriato/stable-diffusion-webui-wd14-tagger.git", - "/path/to/images or /path/to/images/**/*": "/path/to/images or /path/to/images/**/*", - "Leave blank to save images to the same path.": "Leave blank to save images to the same path.", - "Leave blank to use same filename as original.": "Leave blank to use same filename as original.", - "Reconstruct prompt from existing image and put it into the prompt field.": "從現有的圖像中重構出提示詞,並將其放入提示詞的輸入文字方塊", - "Found tags": "Found tags", - "Training Picker": "訓練圖挑選器", - "Video to extract frames from:": "要從中提取幀的影片:", - "Only extract keyframes (recommended)": "只提取關鍵幀(推薦)", - "Extract every nth frame": "每第 n 幀提取一次", - "Extract Frames": "提取幀", - "Extracted Frame Set": "提取好的幀", - "Resize crops to 512x512": "縮放裁剪至 512x512", - "Outfill method:": "填充方法:", - "Don't outfill": "不進行填充", - "Stretch image": "拉伸圖像", - "Transparent": "透明", - "Solid color": "純色", - "Average image color": "平均圖像顏色", - "Dominant image color": "圖像主色", - "Stretch pixels at border": "延伸邊緣的畫素", - "Reflect image around border": "從邊緣鏡像圖像內容", - "Blurred & stretched overlay": "模糊拉伸的疊加層", - "Reuse original image": "復用原圖", - "Reset Aspect Ratio": "重置縱橫比", - "Process": "行程, 程序", - "Image border outfill method:": "圖像邊緣的填充方法:", - "Black outfill": "填黑", - "Outfill border color:": "填充顏色:", - "Blur amount:": "模糊量:", - "Number of clusters:": "簇數:", - "Save crops to:": "儲存裁剪好的成品到:", - "Image": "映像檔", - "Number": "數量", - "Fixed size to resize images to": "調整圖像大小到固定大小", - "Path to read videos from": "讀取影片的路徑", - "Path to store extracted frame sets in": "儲存截取幀的路徑", - "Default cropped image output directory": "裁切後的成品的默認輸出目錄", - "https://github.com/Maurdekye/training-picker.git": "https://github.com/Maurdekye/training-picker.git", - "Tag Autocomplete": "標記自動補齊", - "Tag filename": "標記檔檔名", - "Enable Tag Autocompletion": "啟用標記自動補齊", - "Active in txt2img (Requires restart)": "在文生圖中啟用(需要儲存設定並重新啟動)", - "Active in img2img (Requires restart)": "在圖生圖中啟用(需要儲存設定並重新啟動)", - "Active in negative prompts (Requires restart)": "在反向提示詞中啟用(需要儲存設定並重新啟動)", - "Active in third party textboxes [Dataset Tag Editor] (Requires restart)": "在第三方擴充功能「數據集標記編輯器」的文字方塊中啟用(需要儲存設定並重新啟動)", - "List of model names (with file extension) or their hashes to use as black/whitelist, separated by commas.": "要用作黑名單/白名單的模型名稱清單(包括檔案副檔名)或其雜湊值,用逗號分隔。", - "Mode to use for model list": "模型名稱清單的使用模式", - "Blacklist": "黑名單", - "Whitelist": "白名單", - "Move completion popup together with text cursor": "移動彈出視窗至文字游標處", - "Maximum results": "最大結果", - "Show all results": "顯示所有結果", - "How many results to load at once": "一次載入多少個結果", - "Time in ms to wait before triggering completion again (Requires restart)": "在再次觸發完成之前等待的毫秒數(需要儲存設定並重新啟動)", - "Search for wildcards": "搜尋萬用字元", - "Search for embeddings": "搜尋嵌入", - "Search for hypernetworks": "搜尋超網絡", - "Search for Loras": "搜尋 LoRA", - "Show '?' next to tags, linking to its Danbooru or e621 wiki page (Warning: This is an external site and very likely contains NSFW examples!)": "在標記旁顯示「?」,連結到其 Danbooru 或 e621 wiki 頁面(警告:這是外部網站,很可能包含 NSFW 內容!)", - "Replace underscores with spaces on insertion": "插入時將下橫線替換成空格", - "Escape parentheses on insertion": "插入時轉義括號", - "Append comma on tag autocompletion": "自動完成標記時加入逗號", - "Search by alias": "以別名搜尋", - "Only show alias": "僅顯示別名", - "Translation filename": "翻譯檔檔名", - "Translation file uses old 3-column translation format instead of the new 2-column one": "翻譯檔使用舊的三欄位翻譯格式,而非新的兩欄位格式", - "Search by translation": "以翻譯搜尋", - "Extra filename (for small sets of custom tags)": "追加標記檔檔名(用於小型的自定義標記集)", - "Mode to add the extra tags to the main tag list": "將追加標記加入主標記清單的模式", - "Insert before": "前綴插入", - "Insert after": "後綴插入", - "a1111-sd-webui-tagcomplete": "a1111-sd-webui-tagcomplete", - "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git": "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git", - "Records list": "Records list", - "Reload": "Reload", - "Download All": "Download All", - "Add": "加入", - "Display options": "Display options", - "Import/Export": "Import/Export", - "Back": "Back", - "Remove": "Remove", - "Edit": "Edit", - "Download": "Download", - "Description:": "Description:", - "Name:": "Name:", - "Model title to display (Required)": "Model title to display (Required)", - "Model type:": "Model type:", - "Model type (Required)": "Model type (Required)", - "Download URL:": "Download URL:", - "Link to the model file (Required)": "Link to the model file (Required)", - "Preview image URL:": "Preview image URL:", - "Link to the image for preview (Optional)": "Link to the image for preview (Optional)", - "Model page URL:": "Model page URL:", - "Link to the model page (Optional)": "Link to the model page (Optional)", - "Groups": "Groups", - "Select existing groups or add new.": "Select existing groups or add new.", - "Add groups": "Add groups", - "Bind with existing model": "Bind with existing model", - "Download options": "Download options", - "Prompts": "提示詞", - "description_output_widget": "description_output_widget", - "Record removal": "Record removal", - "Remove record": "Remove record", - "Remove files": "Remove files", - "Downloads": "Downloads", - "Start Download": "Start Download", - "Cancel Download": "Cancel Download", - "Layout Type:": "Layout Type:", - "Cards": "Cards", - "Table": "Table", - "Card width (default if 0):": "Card width (default if 0):", - "Card height (default if 0):": "Card height (default if 0):", - "Storage Type:": "Storage Type:", - "SQLite": "SQLite", - "Firebase": "Firebase", - "Download Preview": "Download Preview", - "sd-model-organizer": "sd-model-organizer", - "https://github.com/alexandersokol/sd-model-organizer": "https://github.com/alexandersokol/sd-model-organizer", - "652580de (Mon May 8 11:00:16 2023)": "652580de (Mon May 8 11:00:16 2023)", - "Sort By": "Sort By", - "Downloaded first": "Downloaded first", - "Search by name": "Search by name", - "Model types": "Model types", - "Show downloaded": "Show downloaded", - "Show not downloaded": "Show not downloaded", - "Time Added": "Time Added", - "Time Added Reversed": "Time Added Reversed", - "Name Reversed": "Name Reversed", - "Hyper Network": "Hyper Network", - "LyCORIS": "LyCORIS", - "Other": "其他", - "Import .json file": "Import .json file", - "Export": "Export", - "Add model": "Add model", - "Download Path:": "Download Path:", - "Path to the download dir, default if empty. (Required for \"Other\" model type)": "Path to the download dir, default if empty. (Required for \"Other\" model type)", - "Download File Name:": "Download File Name:", - "Downloaded file name. Default if empty (Optional)": "Downloaded file name. Default if empty (Optional)", - "Subdir": "Subdir", - "Download file into sub directory (Optional)": "Download file into sub directory (Optional)", - "Add new group": "Add new group", - "Type comma-separated group names": "Type comma-separated group names", - "Add Group": "Add Group", - "Positive prompts:": "Positive prompts:", - "Model positive prompts (Optional)": "Model positive prompts (Optional)", - "Negative prompts:": "Negative prompts:", - "Model negative prompts (Optional)": "Model negative prompts (Optional)", - "Wildcards Manager": "萬用字元管理器", - "Dynamic Prompts enabled": "啟用動態提示詞", - "Combinatorial generation": "組合生成", - "Max generations (0 = all combinations - the batch count value is ignored)": "最大產生數(0 = 所有組合 - 忽略批次數值)", - "Combinatorial batches": "組合批次", - "Prompt Magic": "提示詞魔法", - "Magic prompt": "魔法提示詞", - "Max magic prompt length": "魔法提示詞最大長度", - "Magic prompt creativity": "魔法提示詞創意", - "Magic prompt model": "魔法提示詞模型", - "Gustavosta/MagicPrompt-Stable-Diffusion": "Gustavosta/MagicPrompt-Stable-Diffusion", - "daspartho/prompt-extend": "daspartho/prompt-extend", - "succinctly/text2image-prompt-generator": "succinctly/text2image-prompt-generator", - "microsoft/Promptist": "microsoft/Promptist", - "AUTOMATIC/promptgen-lexart": "AUTOMATIC/promptgen-lexart", - "AUTOMATIC/promptgen-majinai-safe": "AUTOMATIC/promptgen-majinai-safe", - "AUTOMATIC/promptgen-majinai-unsafe": "AUTOMATIC/promptgen-majinai-unsafe", - "kmewhort/stable-diffusion-prompt-bolster": "kmewhort/stable-diffusion-prompt-bolster", - "Gustavosta/MagicPrompt-Dalle": "Gustavosta/MagicPrompt-Dalle", - "Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator": "Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator", - "Ar4ikov/gpt2-medium-650k-stable-diffusion-prompt-generator": "Ar4ikov/gpt2-medium-650k-stable-diffusion-prompt-generator", - "crumb/bloom-560m-RLHF-SD2-prompter-aesthetic": "crumb/bloom-560m-RLHF-SD2-prompter-aesthetic", - "Meli/GPT2-Prompt": "Meli/GPT2-Prompt", - "DrishtiSharma/StableDiffusion-Prompt-Generator-GPT-Neo-125M": "DrishtiSharma/StableDiffusion-Prompt-Generator-GPT-Neo-125M", - "Magic prompt blocklist regex": "魔法提示詞黑名單。", - "Magic Prompt batch size": "魔法提示詞批次大小", - "I'm feeling lucky": "手氣不錯", - "Attention grabber": "隨機關鍵詞吸引註意力", - "Minimum attention": "最小注意力", - "Maximum attention": "最大注意力", - "Don't apply to negative prompts": "不用於反向提示詞。", - "Need help?": "需要幫助?", - "Syntax cheatsheet": "語法速查表", - "Tutorial": "教學", - "Discussions": "討論串", - "Report a bug": "回報錯誤", - "Combinations": "組合", - "Choose a number of terms from a list, in this case we choose two artists:": "從列表中選幾項,這裡選了兩個藝術家", - "{2$$artist1|artist2|artist3}": "{2$$artist1|artist2|artist3}", - "If $$ is not provided, then 1$$ is assumed.": "若沒提供 $$,默認為 1$$", - "If the chosen number of terms is greater than the available terms, then some terms will be duplicated, otherwise chosen terms will be unique. This is useful in the case of wildcards, e.g.": "選的項數多於提供的項數時,有些項會重複,其餘情況各選項會保持唯一;\n重複對於萬用字元很有用,例如:", - "{2$$__artist__}": "{2$$__artist__}", - "is equivalent to": "等同於", - "{2$$__artist__|__artist__}": "{2$$__artist__|__artist__}", - "A range can be provided:": "項數可以有範圍", - "{1-3$$artist1|artist2|artist3}": "{1-3$$artist1|artist2|artist3}", - "In this case, a random number of artists between 1 and 3 is chosen.": "此例中,會從中隨機選 1 至 3 個藝術家", - "Options can be given weights:": "可以給選項權重:", - "{2::artist1|artist2}": "{2::artist1|artist2}", - "In this case, artist1 will be chosen twice as often as artist2.": "此例中,藝術家 1 將會比藝術家 2 高兩倍的機會被選中", - "Wildcards can be used and the joiner can also be specified:": "可以用萬用字元,也可以指定拼接符", - "{{1-$$and$$__adjective__}}": "{{1-3$$and$$__adjective__}}", - "Here, a random number between 1 and 3 words from adjective.txt will be chosen and joined together with the word 'and' instead of the default comma.": "此處,會從 adjective.txt 中選取隨機 1 至 3 行,以 'and'(而不是默認的逗號)拼接", - "Find and manage wildcards in the Wildcards Manager tab.": "在萬用字元管理器分頁中尋找並管理萬用字元", - "__/mywildcards__": "__/mywildcards__", - "will then become available.": "目錄內的文字檔案將可以被讀取。", - "Find more settings on the": "尋找更多設定請前往", - "Jinja2 templates": "Jinja2 模板", - "Enable Jinja2 templates": "啟用 Jinja2 模板", - "Help for Jinja2 templates": "Jinja2 模板幫助", - "Jinja2 templates is an experimental feature for advanced template generation. It is not recommended for general use unless you are comfortable with writing scripts.": "Jinja2 範本是一個用於進階範本產生的實驗性特性。如果不熟悉編寫指令碼,正常使用時不建議啟用", - "Literals": "字面值", - "I love red roses": "I love red roses", - "Random choices": "隨機選擇", - "I love {{ choice('red', 'blue', 'green') }} roses": "I love {{ choice('red', 'blue', 'green') }} roses", - "This will randomly choose one of the three colors.": "會隨機從三種顏色中選一個", - "Iterations": "迭代次數", - "{% for colour in ['red', 'blue', 'green'] %}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% endfor %}": "{% for colour in ['red', 'blue', 'green'] %}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% endfor %}", - "This will produce three prompts, one for each color. The prompt tag is used to mark the text that will be used as the prompt. If no prompt tag is present then only one prompt is assumed": "會產生三條提示詞,每個顏色各一條;\n 提示詞標籤用於標記作為提示詞的文字;\n 如果沒有提示詞標籤則默認為僅一條提示詞", - "{% for colour in wildcard(\"__colours__\") %}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% endfor %}": "{% for colour in wildcard(\"__colours__\") %}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% endfor %}", - "This will produce one prompt for each colour in the wildcard.txt file.": "會為 colours.txt 中的每個顏色產生一條提示詞", - "Conditionals": "條件", - "{% for colour in [\"red\", \"blue\", \"green\"] %}\n {% if colour == \"red\"}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% else %}\n {% prompt %}I hate {{ colour }} roses{% endprompt %}\n {% endif %}\n {% endfor %}": "{% for colour in [\"red\", \"blue\", \"green\"] %}\n {% if colour == \"red\"}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% else %}\n {% prompt %}I hate {{ colour }} roses{% endprompt %}\n {% endif %}\n {% endfor %}", - "This will produce the following prompts:": "會產生下列提示詞", - "I hate blue roses": "I hate blue roses", - "I hate green roses": "I hate green roses", - "Jinja2 templates are based on the Jinja2 template engine. For more information see the": "Jinja2 模板基於 Jinja2 模板引擎,更多訊息參考", - "Jinja2 documentation.": "Jinja2 文件資料。", - "If you are using these templates, please let me know if they are useful.": "如果你在用這些模板,請告訴我它們是否有用", - "Advanced options": "進階選項", - "Some settings have been moved to the settings tab. Find them in the Dynamic Prompts section.": "一些選像已移至設定。 位於動態提示詞部分。", - "Unlink seed from prompt": "將隨機種子與提示詞解綁", - "Fixed seed": "固定隨機種子", - "Write raw prompt to image": "將提示詞寫入圖像", - "Don't generate images": "不產生圖像", - "Write prompts to file": "將提示詞寫入檔案", - "Manage wildcards for Dynamic Prompts": "管理動態提示詞擴充功能的萬用字元", - "1. Create your wildcard library by copying a collection using the dropdown below.": "1. 使用下方的下拉選單選擇一個選集,並按下複製選集來創建您的萬用字源庫。", - "2. Click on any of the files that appear in the tree to edit them.": "2. 點擊上方選項中出現的文件進行編輯。", - "3. Use the wildcard in your script by typing the name of the file or copying the text from the Wildcards file text box": "3. 在提示詞中使用萬用字元,輸入檔案名稱或從萬用字元檔案檔案編輯器中複製文字。", - "Select a collection": "選擇選集", - "artists": "藝術家", - "devilkkw": "devilkkw", - "jumbo": "jumbo", - "nai": "nai", - "nsp": "nsp", - "parrotzone": "parrotzone", - "Copy collection": "複製選集", - "Overwrite existing": "覆寫既有", - "Refresh wildcards": "重新整理萬用字元", - "Delete all wildcards": "刪除全部萬用字元", - "Wildcards file": "萬用字元檔案", - "File editor": "檔案編輯器", - "Save wildcards": "儲存萬用字元", - "Ignore whitespace in prompts: All newlines, tabs, and multiple spaces are replaced by a single space": "忽略提示中的空格:所有換行符號、定位點和多個空格都會被替換為一個空格。", - "Save template to metadata: Write prompt template into the PNG metadata": "將範本保存至元數據:將提示詞範本寫入PNG數據中", - "Write prompts to file: Create a new .txt file for every batch containing the prompt template as well as the generated prompts.": "將提示寫入文件:為每個批次創建一個新的 .txt 文件,其中包含提示範本和產生的提示詞。", - "String to use as left bracket for parser variants, .e.g {variant1|variant2|variant3}": "作為解析器變體左括號的字符串,例如 {variant1|variant2|variant3}", - "String to use as right bracket for parser variants, .e.g {variant1|variant2|variant3}": "作為解析器變體右括號的字符串,例如 {variant1|variant2|variant3}", - "String to use as wrap for parser wildcard, .e.g __wildcard__": "用作解析萬用字元的字串命令,例如 __wildcard__", - "Limit Jinja prompts: Limit the number of prompts to batch_count * batch_size. The default is to generate batch_count * barch_size * number of prompts generated by Jinja": "將提示的數量限制為 batch_count * batch_size。預設是產生 batch_count * batch_size * Jinja 產生的提示數量。", - "sd-dynamic-prompts": "sd-dynamic-prompts", - "https://github.com/adieyal/sd-dynamic-prompts.git": "https://github.com/adieyal/sd-dynamic-prompts.git", - "Disable dynamic prompts by unchecking this box.": "取消勾選以停用動態提示詞", - "Instead of generating random prompts from a template, combinatorial generation produces every possible prompt from the given string.\nThe prompt 'I {love|hate} {New York|Chicago} in {June|July|August}' will produce 12 variants in total.\n\nThe value of the 'Seed' field is only used for the first image. To change this, look for 'Fixed seed' in the 'Advanced options' section.": "使用組合產生而非從範本產生隨機提示,會從給定的字串中產生所有可能的提示。\n例如提示字串'I {love|hate} {New York|Chicago} in {June|July|August}',會總共產生12種不同的提示。\n\n「Seed」欄位的值僅會用於生成第一張圖片。若要更改此值,請尋找「Advanced options」區塊中的「Fixed seed」選項。", - "Limit the maximum number of prompts generated. 0 (default) will generate all images. Useful to prevent an unexpected combinatorial explosion.": "限制產生的提示詞數量的上限,預設為0,表示產生所有圖像。這個選項可以防止過多的組合。", - "Re-run your combinatorial batch this many times with a different seed each time.": "這個選項表示用不同的種子重新運行組合產生的批次數據。在每次運行中,種子值都會改變,這樣可以產生不同的隨機順序或排列組合。", - "Magic Prompt adds interesting modifiers to your prompt for a little bit of extra spice.\nThe first time you use it, the MagicPrompt model is downloaded so be patient.\nIf you're running low on VRAM, you might get a CUDA error.": "魔法提示詞在提示詞中加入有趣的修飾,額外增添趣味。\n首次使用時會下載 MagicPrompt 模型,請耐心等待。\n在低 VRAM 情況下可能會導致 CUDA 報錯。", - "Controls the maximum length in tokens of the generated prompt.": "按標記數控制已產生的提示詞最大長度", - "Adjusts the generated prompt. You will need to experiment with this setting.": "調整已產生的提示詞,使用時要嘗試調整此設定", - "Regular expression pattern for blocking terms out of the generated prompt. Applied case-insensitively. For instance, to block both \"purple\" and \"interdimensional\", you could use the pattern \"purple|interdimensional\".": "用於排除提示詞語的表達式。忽略大小寫。例如,要封鎖 'purple' 和 'interdimensional',可以使用 'purple | interdimensional'", - "The number of prompts to generate per batch. Increasing this can speed up prompt generation at the expense of slightly increased VRAM usage.": "每個批次要產生的提示詞數量。增加此數量可以加快提示詞產生速度,但會略微增加 VRAM 的使用。", - "Uses the lexica.art API to create random prompts.\nThe prompt in the main prompt box is used as a search string.\nLeaving the prompt box blank returns a list of completely randomly chosen prompts.\nTry it out, it can be quite fun.": "用 lexica.art API 生成隨機提示詞\n提示詞框中的內容會作為搜索字串\n留空提示詞框會得到一組完全隨機選擇的提示詞\n用用看,它會很有趣", - "Randomly selects a keyword from the prompt and adds emphasis to it. Try this with Fixed Seed enabled.": "隨機強調提示詞中的一個關鍵詞,嘗試前要啟用固定隨機種子", - "Don't use prompt magic on negative prompts.": "不要對反向提示詞使用提示詞魔法。", - "Jinja2 templates are an expressive alternative to the standard syntax. See the Help section below for instructions.": "Jinja2 模板是標準語法富有表現力的一種替代品,相關說明參見下方幫助欄", - "Check this if you want to generate random prompts, even if your seed is fixed": "勾選此選項以在固定隨機種子的情況下依然產生隨機提示詞", - "Select this if you want to use the same seed for every generated image.\nThis is useful if you want to test prompt variations while using the same seed.\nIf there are no wildcards then all the images will be identical.": "勾選此選項以對每張產生的圖像用同樣的隨機種子。\n這在想用同樣的隨機種子測試提示詞變化時會有用。\n沒有萬用字元則所有圖像會相同。", - "Write the prompt template into the image metadata": "將提示詞範本寫入圖片數據。", - "Be sure to check the 'Write prompts to file' checkbox if you don't want to lose the generated prompts. Note, one image is still generated.": "不想失去產生的提示詞的話,需確保勾選 「將提示詞寫入檔案」。注意,依然會生成一張圖像。", - "The generated file is a slugified version of the prompt and can be found in the same directory as the generated images.\nE.g. in ./outputs/txt2img-images/.": "產生的檔案包含處理過的提示詞,和產生的圖像在同一目錄。\n例如 ./outputs/txt2img-images/", - "Complete documentation is available at https://github.com/adieyal/sd-dynamic-prompts. Please report any issues on GitHub.": "完整說明請在 https://github.com/adieyal/sd-dynamic-prompts 上取得。 任何問提請在 GitHub 上報告。", - "Generate all possible prompt combinations.": "產生所有可能的提示詞組合。", - "Automatically update your prompt with interesting modifiers. (Runs slowly the first time)": "使用有趣的修飾符自動更新你的提示詞。(第一次運行會比較慢)", - "Generate random prompts from lexica.art (your prompt is used as a search query).": "從 lexica.art 產生隨機提示詞(你的提示詞會被用作搜尋查詢)", - "Use the same seed for all prompts in this batch": "對這批次中的所有提示詞使用相同的種子", - "Write all generated prompts to a file": "將所有產生的提示詞寫入檔案", - "If this is set, then random prompts are generated, even if the seed is the same.": "如果設定了此項,則會產生隨機提示詞,即使種子相同。", - "Disable image generation. Useful if you only want to generate text prompts. (1 image will still be generated to keep Auto1111 happy.).": "停用圖像產生。這很有用,如果你只想產生文字提示。(仍將生成 1 張圖像以使 Auto1111 保持運行)", - "Add emphasis to a randomly selected keyword in the prompt.": "在提示詞中隨機選擇一個關鍵字加上強調符", - "Write template into image metadata.": "將範本寫入圖像中繼資料。", - "Note: Each model will download between 300mb and 1.4gb of data on first use.": "註記:每個模型第一次使用時會下載 300MB 到 1.4GB 的檔案。", - "Enable Dynamic Thresholding (CFG Scale Fix)": "Enable Dynamic Thresholding (CFG Scale Fix)", - "View": "View", - "the wiki for usage tips.": "the wiki for usage tips.", - "Mimic CFG Scale": "Mimic CFG Scale", - "Dynamic Thresholding Advanced Options": "Dynamic Thresholding Advanced Options", - "sd-dynamic-thresholding": "sd-dynamic-thresholding", - "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding.git": "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding.git", - "Top percentile of latents to clamp": "Top percentile of latents to clamp", - "Mimic Scale Scheduler": "Mimic Scale Scheduler", - "Constant": "Constant", - "Minimum value of the Mimic Scale Scheduler": "Minimum value of the Mimic Scale Scheduler", - "CFG Scale Scheduler": "CFG Scale Scheduler", - "Minimum value of the CFG Scale Scheduler": "Minimum value of the CFG Scale Scheduler", - "Power Scheduler Value": "Power Scheduler Value", - "Linear Down": "Linear Down", - "Cosine Down": "Cosine Down", - "Half Cosine Down": "Half Cosine Down", - "Linear Up": "Linear Up", - "Cosine Up": "Cosine Up", - "Half Cosine Up": "Half Cosine Up", - "Power Up": "Power Up", - "Power Down": "Power Down", + "Attention Heatmap": "注意力熱度圖", + "Attention texts for visualization. (comma separated)": "視覺化的注意文字(以逗號分隔)", + "Hide heatmap images": "隱藏熱度圖", + "Do not save heatmap images": "不儲存熱度圖", + "Hide caption": "隱藏描述", + "Use grid (output to grid dir)": "使用網格(輸出到網格目錄)", + "Grid layout": "網格布局", + "Auto": "自動", + "Prevent Empty Spot": "防止空白區域", + "Batch Length As Row": "批次長度作為一列", + "Heatmap blend alpha": "熱度圖混合透明度", + "Heatmap image scale": "熱度圖縮放比例", + "Trace each layers": "追蹤每個層級", + "Use layers as row instead of Batch Length": "將圖層作為行而非批次長度使用", + "stable-diffusion-webui-daam": "stable-diffusion-webui-daam", + "https://github.com/toriato/stable-diffusion-webui-daam.git": "https://github.com/toriato/stable-diffusion-webui-daam.git", + "Save score as EXIF or PNG Info Chunk": "將分數儲存為 EXIF 或 PNG Info Chunk", + "aesthetic_score": "美學分數", + "cfg_scale": "提示詞相關性(CFG)", + "sd_model_hash": "SD模型雜湊值", + "hash": "雜湊值", + "Save tags (Windows only)": "儲存標籤(僅限Windows)", + "Save category (Windows only)": "儲存類別(僅限Windows)", + "Save generation params text": "儲存生成參數文本", + "Force CPU (Requires Custom Script Reload)": "強制使用 CPU(需要重新加載自定義腳本)", + "stable-diffusion-webui-aesthetic-image-scorer": "stable-diffusion-webui-aesthetic-image-scorer", + "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer.git": "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer.git", + "State": "State", + "Saved main elements": "Saved main elements", + "tabs": "tabs", + "Saved elements from txt2img": "Saved elements from txt2img", + "prompt": "提示詞", + "negative_prompt": "反相提示詞", + "sampling": "sampling", + "sampling_steps": "sampling_steps", + "width": "寬度", + "height": "高度", + "batch_count": "生成批次", + "batch_size": "每批數量", + "restore_faces": "面部修復", + "tiling": "可平鋪", + "hires_upscaler": "高解析度放大工具", + "hires_steps": "高解析步驟", + "hires_scale": "hires_scale", + "hires_resize_x": "hires_resize_x", + "hires_resize_y": "hires_resize_y", + "hires_denoising_strength": "高解析度修正重繪幅度", + "Saved elements from img2img": "Saved elements from img2img", + "resize_mode": "縮放模式", + "denoising_strength": "重繪幅度", + "https://github.com/ilian6806/stable-diffusion-webui-state.git": "https://github.com/ilian6806/stable-diffusion-webui-state.git", + "Text2Prompt": "文生提示詞", + "Input Theme": "輸入情境", + "Input Negative Theme": "輸入反向情境", + "Negative strength": "反向情境強度", + "Replace underscore in tag with whitespace": "將標記內下橫線替換成空格", + "Escape brackets in tag": "轉義標記內括號", + "Output": "輸出", + "Generation Settings": "生成設定", + "Database": "資料庫", + "Tag count filter": "Tag count filter", + "Tag range:": "Tag range:", + "≥ 0 tagged": "≥ 0 tagged", + "(14589 tags total)": "(14589 tags total)", + "Method to convert similarity into probability": "Method to convert similarity into probability", + "Cutoff and Power": "Cutoff and Power", + "Softmax": "Softmax", + "Power": "Power", + "NONE": "NONE", + "Top-k": "Top-k", + "Top-p (Nucleus)": "Top-p (Nucleus)", + "Max number of tags": "標記最大數量", + "k value": "k 值", + "p value": "p 值", + "Use weighted choice": "Use weighted choice", + "stable-diffusion-webui-text2prompt": "stable-diffusion-webui-text2prompt", + "https://github.com/toshiaki1729/stable-diffusion-webui-text2prompt.git": "https://github.com/toshiaki1729/stable-diffusion-webui-text2prompt.git", "openOutpaint": "開源圖像擴展", "Send to openOutpaint": ">> openOutpaint", "openOutpaint-webUI-extension": "openOutpaint-webUI-extension", "Refresh openOutpaint": "重新整理開源圖像擴展", "https://github.com/zero01101/openOutpaint-webUI-extension.git": "https://github.com/zero01101/openOutpaint-webUI-extension.git", - "Composable Lora": "Composable Lora", - "stable-diffusion-webui-composable-lora": "stable-diffusion-webui-composable-lora", - "https://github.com/a2569875/stable-diffusion-webui-composable-lora": "https://github.com/a2569875/stable-diffusion-webui-composable-lora", - "caeae16a (Fri Jun 23 04:35:21 2023)": "caeae16a (Fri Jun 23 04:35:21 2023)", - "Enabled": "啟用", - "Composable LoRA with step": "啟用在迭代步數上使用LoRA或動態權重的功能", - "Use Lora in uc text model encoder": "在反向提示詞的text model encoder使用LoRA", - "Use Lora in uc diffusion model": "在反向提示詞的diffusion model使用LoRA", - "Plot the LoRA weight in all steps": "產生LoRA在每一個迭代步數的權重的圖表", - "Don't use LoRA in uc if there're no subprompts": "如果沒有使用「AND」語法不要在反向提示詞中使用LoRA", - "Error! Composable Lora install failed! Please reinstall composable_lora and restart the WebUI.": "錯誤! Composable Lora安裝發生問題。請嘗試重新安裝Composable Lora並重新啟動WebUI和終端機。", + "3D Openpose": "3D Openpose", + "Edit Openpose": "Edit Openpose", + "Send to ControlNet": ">> ControlNet", + "Original:": "Original:", + "Online 3D Openpose Editor": "Online 3D Openpose Editor", + "Pose": "Pose", + "Control Model number": "Control Model number", + "Download": "Download", + "Depth": "Depth", + "Canny": "Canny", + "Use online version": "Use online version", + "sd-webui-3d-open-pose-editor": "sd-webui-3d-open-pose-editor", + "https://github.com/nonnonstop/sd-webui-3d-open-pose-editor.git": "https://github.com/nonnonstop/sd-webui-3d-open-pose-editor.git", + "Enable pixelization": "啟用像素化", + "Keep resolution": "保持解析度", + "Pixel size": "像素尺寸", + "stable-diffusion-webui-pixelization": "stable-diffusion-webui-pixelization", + "https://github.com/AUTOMATIC1111/stable-diffusion-webui-pixelization.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-pixelization.git", + "DreamArtist Create embedding": "夢想家(DreamArtist)創建嵌入", + "DreamArtist Train": "夢想家(DreamArtist)訓練", + "Process Att-Map": "處理注意力圖(Att-Map)", + "Initialization text (negative)": "初始化文本(反向)", + "Number of negative vectors per token": "每個標記的反向向量數", + "Unet Learning rate": "Unet 模型的學習率", + "Train with DreamArtist": "使用夢想家(DreamArtist)訓練", + "Train with reconstruction": "訓練時開啟重建", + "Attention Map": "注意力圖(Att-Map)", + "Train U-Net": "訓練 U-Net", + "CFG scale (dynamic cfg: low,high:type e.g. 1.0-3.5:cos)": "提示詞相關性(動態 CFG:low,high:type,例如 1.0-3.5:cos)", + "Reconstruction loss weight": "重建損失權重", + "Negative lr weight": "反向的學習率權重", + "Classifier path": "分類器(Classifier)的路徑", + "Accumulation steps": "累加步數", + "Prompt template file": "提示詞模版檔案", + "Positive \"filewords\" only": "僅使用正向「詞彙」", + "Experimental features (May be solve the problem of erratic training and difficult to reproduce [set EMA to 0.97])": "實驗性功能(可能解決訓練不穩定和難以重現的問題 [將 EMA 設定為 0.97])", + "EMA (positive)": "EMA (正)", + "EMA replace steps (positive)": "EMA 替換步數 (正)", + "EMA (nagetive)": "EMA (負)", + "EMA replace steps (nagative)": "EMA 替換步數 (負)", + "beta1": "β1", + "beta2": "β2", + "Since there is a self-attention operation in VAE, it may change the distribution of features. This processing will superimpose the attention map of self-attention on the original Att-Map.": "由於 VAE 中使用了自我注意力機制,這可能會改變特徵的分佈。這種處理會將自我注意力產生的注意力圖與原始的注意力圖疊加在一起。", + "Data directory": "資料目錄", + "Process": "行程, 程序", + "Image": "映像檔", + "DreamArtist-sd-webui-extension": "DreamArtist-sd-webui-extension", + "Path to classifier ckpt, can be empty": "分類器的路徑,可以是空白", + "https://github.com/7eu7d7/DreamArtist-sd-webui-extension.git": "https://github.com/7eu7d7/DreamArtist-sd-webui-extension.git", + "Travel mode": "Travel mode", + "replace": "replace", + "Linear interp method": "Linear interp method", + "lerp": "lerp", + "Replace dimension": "Replace dimension", + "token": "token", + "Replace order": "Replace order", + "Travel steps between stages": "Travel steps between stages", + "Frame genesis": "Frame genesis", + "fixed": "fixed", + "Denoise strength": "Denoise strength", + "Denoise steps for embryo": "Denoise steps for embryo", + "Depth image file": "Depth image file", + "Upscale ratio": "放大比率", + "Upscale width": "Upscale width", + "Upscale height": "Upscale height", + "Video file format": "Video file format", + "mp4": "mp4", + "Video FPS": "Video FPS", + "Pad begin/end frames": "Pad begin/end frames", + "Pick frame by slice": "Pick frame by slice", + "Ext. export video": "Ext. export video", + "Ext. upscale": "Ext. upscale", + "Ext. depth-image-io (for depth2img models)": "Ext. depth-image-io (for depth2img models)", + "stable-diffusion-webui-prompt-travel": "stable-diffusion-webui-prompt-travel", + "https://github.com/Kahsolt/stable-diffusion-webui-prompt-travel.git": "https://github.com/Kahsolt/stable-diffusion-webui-prompt-travel.git", + "Prompt Travel": "Prompt Travel", + "slerp": "slerp", + "successive": "successive", + "embryo": "embryo", + "gif": "gif", + "webm": "webm", + "Load Settings": "載入設定", + "Save Settings": "儲存設定", + "Generate Ckpt": "產生 Ckpt", + "Save Weights": "儲存權重", + "Generate Samples": "產生樣本", + "Select or create a model to begin.": "選擇或建立一個模型", + "Model": "模型", + "Select": "選擇模型", + "Create": "建立", + "Snapshot to Resume": "從 Snapshot 恢復", + "Lora Model": "LoRA 模型", + "Loaded Model:": "載入模型:", + "Model Revision:": "模型修正:", + "Model Epoch:": "模型訓練週期:", + "V2 Model:": "V2 模型:", + "Has EMA:": "有 EMA:", + "Source Checkpoint:": "來源模型權重存檔點:", + "Create Model": "建立模型", + "Create From Hub": "從 huggingface 建立", + "512x Model": "512x 模型", + "Model Path": "模型路徑", + "HuggingFace Token": "HuggingFace 標記", + "Source Checkpoint": "源模型權重存檔點", + "Extract EMA Weights": "提取 EMA 權重", + "Unfreeze Model": "解凍模型", + "Resources": "Resources", + "Beginners guide": "Beginners guide", + "Release notes": "發布說明", + "Input": "輸入", + "Concepts": "概念", + "Saving": "儲存", + "Testing": "測試", + "Performance Wizard (WIP)": "效能嚮導(半成品)", + "Basic": "基本設定", + "General": "一般的", + "Use LORA": "使用 LoRA", + "Use Lora Extended": "使用 LoRA 擴充功能(Locon)", + "Train Imagic Only": "僅意象訓練", + "Train Inpainting Model": "訓練局部重繪模型", + "Intervals": "訓練週期 / 間隔", + "Training Steps Per Image (Epochs)": "每張圖像的訓練步數(訓練週期)", + "Pause After N Epochs": "N 階段後暫停", + "Amount of time to pause between Epochs (s)": "每訓練週期之間暫停的時間(秒)", + "Save Model Frequency (Epochs)": "儲存模型頻率(訓練週期)", + "Save Preview(s) Frequency (Epochs)": "儲存預覽頻率(訓練週期)", + "Batching": "批次", + "Batch Size": "每批數量", + "Gradient Accumulation Steps": "梯度累積疊代步數", + "Class Batch Size": "類別每批數量", + "Set Gradients to None When Zeroing": "將梯度設定為 0 的時候設定為無", + "Gradient Checkpointing": "梯度進度記錄", + "Learning Rate": "學習率", + "Lora UNET Learning Rate": "LoRA UNET 學習率", + "Lora Text Encoder Learning Rate": "LoRA Text Encoder 學習率", + "Learning Rate Scheduler": "學習率調度器", + "linear_with_warmup": "linear_with_warmup", + "cosine": "餘弦(cosine)", + "cosine_annealing": "cosine_annealing", + "cosine_annealing_with_restarts": "cosine_annealing_with_restarts", + "cosine_with_restarts": "含重啟的餘弦(cosine)", + "polynomial": "多項式(polynomial)", + "constant": "常數(constant)", + "constant_with_warmup": "含預熱的常數(constant)", + "Min Learning Rate": "最小學習率", + "Number of Hard Resets": "硬重置數量", + "Constant/Linear Starting Factor": "常數/線性起始因子", + "Polynomial Power": "多項式功率", + "Scale Position": "比例位置", + "Learning Rate Warmup Steps": "學習率預熱步數", + "Image Processing": "圖像處理", + "Max Resolution": "最高解析度", + "Apply Horizontal Flip": "套用水平翻轉", + "Tuning": "調整", + "Use EMA": "使用 EMA", + "Optimizer": "優化器", + "Torch AdamW": "Torch AdamW", + "8bit AdamW": "8bit AdamW", + "Lion": "Lion", + "Mixed Precision": "混合精度", + "no": "否", + "fp16": "fp16", + "Memory Attention": "記憶體注意力", + "default": "default", + "Cache Latents": "快取潛在變數", + "Train UNET": "訓練 UNET", + "Step Ratio of Text Encoder Training": "文字編碼器訓練步驟比率", + "Offset Noise": "噪聲偏移", + "Freeze CLIP Normalization Layers": "凍結 CLIP 正規化層", + "Clip Skip": "Clip 跳過層", + "Weight Decay": "權重衰減", + "Pad Tokens": "填充標記", + "Strict Tokens": "嚴格的標記", + "Shuffle Tags": "洗牌標籤", + "Max Token Length": "最大標記長度", + "Prior Loss": "先前的損失", + "Scale Prior Loss": "縮放先前的損失", + "Prior Loss Weight": "先前損失權重", + "Prior Loss Target": "先前損失目標", + "Minimum Prior Loss Weight": "最小先前損失權重", + "Advanced": "進階的", + "Sanity Sample Prompt": "樣本提示詞", + "Sanity Sample Negative Prompt": "樣本反向提示詞", + "Sanity Sample Seed": "樣本種子", + "Miscellaneous": "雜項", + "Pretrained VAE Name or Path": "預訓練 VAE 名稱或路徑", + "Use Concepts List": "使用概念列表", + "Concepts List": "概念列表", + "API Key": "API 金鑰", + "Discord Webhook": "Discord Webhook", + "Save and Test Webhook": "儲存並測試 Webhook", + "Training Wizard (Person)": "訓練嚮導(人物)", + "Training Wizard (Object/Style)": "訓練嚮導(物件 / 樣式)", + "Concept 1": "概念 1", + "Concept 2": "概念 2", + "Concept 3": "概念 3", + "Concept 4": "概念 4", + "Directories": "目錄", + "Dataset Directory": "實例圖像數據目錄", + "Classification Dataset Directory": "類別/正則數據集目錄", + "Filewords": "風格 / 物品名稱", + "Instance Token": "實例名稱", + "Class Token": "類別/正則名稱", + "Training Prompts": "訓練提示詞", + "Instance Prompt": "實例提示詞", + "Class Prompt": "類別/正則提示詞", + "Classification Image Negative Prompt": "類別(正則) 圖像反向提示詞", + "Sample Prompts": "樣本提示詞", + "Sample Image Prompt": "樣本圖像提示詞", + "Sample Negative Prompt": "樣本反向提示詞", + "Sample Prompt Template File": "樣本提示詞範本檔案", + "Class Image Generation": "生成類別(正則) 圖像", + "Class Images Per Instance Image": "每個實例圖像的類別(正則) 圖片數量", + "Classification CFG Scale": "類別(正則) CFG比例", + "Classification Steps": "類別(正則) 步驟", + "Sample Image Generation": "生成樣本圖像", + "Number of Samples to Generate": "產生樣本的數量", + "Sample Seed": "樣本種子", + "Sample CFG Scale": "樣本CFG比例", + "Sample Steps": "樣本步數", + "Custom Model Name": "自定義模型名稱", + "Save in .safetensors format": "以 .safetensors 格式保存", + "Save EMA Weights to Generated Models": "將 EMA 權重儲存到產生的模型中", + "Use EMA Weights for Inference": "使用EMA權重進行推論", + "Half Model": "半精度模型", + "Save Checkpoint to Subdirectory": "保存檢查點到子目錄", + "Generate a .ckpt file when saving during training.": "在訓練期間儲存時產生 .ckpt 文件。", + "Generate a .ckpt file when training completes.": "在訓練完成時產生 .ckpt 文件。", + "Generate a .ckpt file when training is canceled.": "在訓練取消時產生 .ckpt 文件。", + "Lora UNET Rank": "LoRA UNET等級", + "Lora Text Encoder Rank": "LoRA 文字編碼器等級", + "Lora Weight": "LoRA 權重", + "Lora Text Weight": "LoRA 文本權重", + "Generate lora weights when saving during training.": "在訓練期間儲存時產生 LoRA。", + "Generate lora weights when training completes.": "在訓練完成時產生 LoRA。", + "Generate lora weights when training is canceled.": "在訓練取消時產生 LoRA。", + "Generate lora weights for extra networks.": "產生附加網路的 LoRA。(警告:如有使用 LoCon功能,需先安裝擴充。)", + "Diffusion Weights (training snapshots)": "Diffusion Weights (training snapshots)", + "Save separate diffusers snapshots when saving during training.": "在訓練期間保存獨立的模型。", + "Save separate diffusers snapshots when training completes.": "訓練完成後保存獨立的模型。", + "Save separate diffusers snapshots when training is canceled.": "當訓練被取消時保存獨立的模型。", + "Class Generation Schedulers": "類別(正則) 圖像生成調度器", + "Image Generation Library": "圖像生成", + "A1111 txt2img (Euler a)": "A1111 txt2img (Euler a)", + "Native Diffusers": "Native Diffusers", + "Image Generation Scheduler": "圖像生成採樣方式", + "DDPM": "DDPM", + "PNDM": "PNDM", + "LMSDiscrete": "LMSDiscrete", + "EulerDiscrete": "EulerDiscrete", + "HeunDiscrete": "HeunDiscrete", + "EulerAncestralDiscrete": "EulerAncestralDiscrete", + "DPMSolverMultistep": "DPMSolverMultistep", + "DPMSolverSinglestep": "DPMSolverSinglestep", + "KDPM2Discrete": "KDPM2Discrete", + "KDPM2AncestralDiscrete": "KDPM2AncestralDiscrete", + "DEISMultistep": "DEISMultistep", + "UniPCMultistep": "UniPCMultistep", + "Manual Class Generation": "Manual Class Generation", + "Generate Class Images": "產生類別(正則) 圖片", + "Generate Graph": "產生圖形", + "Graph Smoothing Steps": "圖形平滑步驟", + "Debug Buckets": "除錯", + "Epochs to Simulate": "要模擬的訓練週期", + "Batch Size to Simulate": "模擬的批量大小", + "Generate Sample Images": "產生樣本圖像", + "Sample Prompt": "樣本提示詞", + "Sample Prompt File": "樣本提示文件", + "Sample Width": "樣本寬度", + "Sample Height": "樣本高度", + "Sample Batch Size": "樣本批次大小", + "Swap Sample Faces": "交換 Sample Faces", + "Swap Prompt": "交換提示詞", + "Swap Negative Prompt": "交換反向提示詞", + "Swap Steps": "交換疊代步數", + "Swap Batch": "交換批次", + "Use txt2img": "使用文生圖", + "Experimental Settings": "實驗性設定", + "Deterministic": "確定性的訓練", + "Use EMA for prediction": "使用 EMA 進行預測", + "Calculate Split Loss": "計算分割損失", + "Use TensorFloat 32": "使用TensorFloat 32", + "Noise scheduler": "Noise scheduler", + "DEIS": "DEIS", + "Update Extension and Restart": "更新擴充並重新啟動", + "Bucket Cropping": "批量裁剪", + "Source Path": "來源路徑", + "Dest Path": "目標路徑", + "Max Res": "最大解析度", + "Bucket Steps": "批量Steps", + "Dry Run": "空運行", + "Start Cropping": "開始裁剪", + "Checkbox": "核取方塊", + "Check Progress": "查看進度", + "Update Parameters": "更新參數", + "Changelog": "更新紀錄", + "X": "X", + "sd_dreambooth_extension": "sd_dreambooth_extension", + "https://github.com/d8ahazard/sd_dreambooth_extension.git": "https://github.com/d8ahazard/sd_dreambooth_extension.git", + "runwayml/stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5", + "A generic prompt used to generate a sample image to verify model fidelity.": "用於產生樣本圖像以驗證模型保真度的通用提示。", + "A negative prompt for the generic sample image.": "通用圖像的反向提示詞。", + "Leave blank to use base model VAE.": "留空以使用基本模型 VAE。", + "Path to JSON file with concepts to train.": "帶有要訓練概念的 JSON 檔案的路徑。", + "https://discord.com/api/webhooks/XXX/XXXX": "https://discord.com/api/webhooks/XXX/XXXX", + "(Optional) Path to directory with classification/regularization images": "(可選)帶有類別(正則) 圖像的目錄路徑", + "When using [filewords], this is the subject to use when building prompts.": "使用 [filewords] 時,構建提示時使用的主題。", + "When using [filewords], this is the class to use when building prompts.": "使用 [filewords] 時,構建提示時使用的類別(正則)。", + "Optionally use [filewords] to read image captions from files.": "可以選擇使用 [filewords] 從檔案中讀取圖像標題。", + "Leave blank to use instance prompt. Optionally use [filewords] to base sample captions on instance images.": "留空以使用實例提示。可以選擇使用 [filewords] 以實例圖像為基礎生成樣本標題。", + "Enter the path to a txt file containing sample prompts.": "輸入包含樣本提示的 txt 檔案的路徑。", + "Enter a model name for saving checkpoints and lora models.": "輸入模型名稱以保存檢查點和 lora 模型。", + "Conditioning Highres": "調整高解析度", + "Conditioning Highres.fix strength (for sd-v1-5-inpainting)": "高解析度修復原圖調節強度(專為 sd-v1-5-inpainting 設計)", + "Cond.fix: Disabled (none)": "條件修復:停用(無)", + "Cond.fix: Empty": "條件修復: 無", + "Cond.fix: Lowest": "條件修復: 最小", + "Cond.fix: Low": "條件修復: 小", + "Cond.fix: Medium": "條件修復: 中", + "Cond.fix: High (recommended)": "條件修復: 高(推薦)", + "Cond.fix: Highest": "條件修復: 最高", + "Cond.fix: Full": "條件修復: 完全", + "stable-diffusion-webui-conditioning-highres-fix": "stable-diffusion-webui-conditioning-highres-fix", + "https://github.com/klimaleksus/stable-diffusion-webui-conditioning-highres-fix.git": "https://github.com/klimaleksus/stable-diffusion-webui-conditioning-highres-fix.git", + "Main": "Main", + "LAB Tools": "LAB Tools", + "Guide": "Guide", + "Abysz LAB 0.1.9 Temporal coherence tools": "Abysz LAB 0.1.9 Temporal coherence tools", + "DFI Render": "DFI Render", + "Original frames folder": "Original frames folder", + "Generated frames folder": "Generated frames folder", + "Output folder": "Output folder", + "Info": "資訊", + "The new algorithm will adapt to DFI tolerance to choose the parameters for each frame. IMPORTANT: The algorithm is optimized to maintain a balance between deflicking and corruption, so that it is easier to use StableDiffusion at low denoising to reconstruct lost detail while preserving the stability gained.": "The new algorithm will adapt to DFI tolerance to choose the parameters for each frame. IMPORTANT: The algorithm is optimized to maintain a balance between deflicking and corruption, so that it is easier to use StableDiffusion at low denoising to reconstruct lost detail while preserving the stability gained.", + "Source denoise:": "Source denoise:", + "A noisy source can interfere with the accuracy of the scan. This will reduce noise, but also detail. However, this does not affect the original, and sometimes flatter images are not bad for the process, although you may need to balance by reducing the DFI tolerance.": "A noisy source can interfere with the accuracy of the scan. This will reduce noise, but also detail. However, this does not affect the original, and sometimes flatter images are not bad for the process, although you may need to balance by reducing the DFI tolerance.", + "(This is a demanding algorithm)": "(This is a demanding algorithm)", + "DFI Tolerance:": "DFI Tolerance:", + "Determines the movement tolerance of the scan. Low tolerance will detect even small changes in static areas. High values will detect less movements. Ideally, it should detect the movements that are important to you, and skip the static and useless areas, reducing the flick in those.": "Determines the movement tolerance of the scan. Low tolerance will detect even small changes in static areas. High values will detect less movements. Ideally, it should detect the movements that are important to you, and skip the static and useless areas, reducing the flick in those.", + "This parameter commands the new dynamic algorithm.": "This parameter commands the new dynamic algorithm.", + "DFI Expand:": "DFI Expand:", + "DFI expand fattens the edges of the areas detected by DFI. Note: DFI tolerance modifies the amount of movement detected. This only affects that result, be it big or small. Its a complementary parameter. 0=Off.": "DFI expand fattens the edges of the areas detected by DFI. Note: DFI tolerance modifies the amount of movement detected. This only affects that result, be it big or small. Its a complementary parameter. 0=Off.", + "Source Denoise": "Source Denoise", + "DFI Tolerance": "DFI Tolerance", + "DFI Expand": "DFI Expand", + "Here you can check examples of the motion map for those parameters. It is useful, for example, to adjust denoise if you see that it detects unnecessary graininess. Keep in mind that what you see represents movement between two frames.": "Here you can check examples of the motion map for those parameters. It is useful, for example, to adjust denoise if you see that it detects unnecessary graininess. Keep in mind that what you see represents movement between two frames.", + "The black is basically what it won't process (it will let it through to preserve the movement), and the white what it will try to keep stable in that frame interpolation. Try freely. Here you can also test how the manual smooth works (advanced section).": "The black is basically what it won't process (it will let it through to preserve the movement), and the white what it will try to keep stable in that frame interpolation. Try freely. Here you can also test how the manual smooth works (advanced section).", + "Preview DFI Map": "Preview DFI Map", + "Preview amount. 0 = Quick shoot": "Preview amount. 0 = Quick shoot", + "Inter Denoise:": "Inter Denoise:", + "Reduces render pixelation generated by corruption. However, be careful. It's resource hungry, and might remove excess detail. Not recommended to change size or FPD, but to use Stable Diffusion to remove the pixelation later.": "Reduces render pixelation generated by corruption. However, be careful. It's resource hungry, and might remove excess detail. Not recommended to change size or FPD, but to use Stable Diffusion to remove the pixelation later.", + "Inter Blur:": "Inter Blur:", + "Fine tunes the dynamic blur algorithm for DFI map. Lower = Stronger blur effects. Between 2-3 recommended.": "Fine tunes the dynamic blur algorithm for DFI map. Lower = Stronger blur effects. Between 2-3 recommended.", + "Corruption Refresh:": "Corruption Refresh:", + "To reduce the distortion generated by the process, you can recover original information every X number of frames. Lower number = faster refresh.": "To reduce the distortion generated by the process, you can recover original information every X number of frames. Lower number = faster refresh.", + "Corruption Preserve:": "Corruption Preserve:", + "Here you decide how much corruption keep in each corruption refresh. Low values will recover more of the original frame, with its changes and flickering, in exchange for reducing corruption. You must find the balance that works best for your goal.": "Here you decide how much corruption keep in each corruption refresh. Low values will recover more of the original frame, with its changes and flickering, in exchange for reducing corruption. You must find the balance that works best for your goal.", + "Smooth:": "Smooth:", + "This smoothes the edges of the interpolated areas. Low values are currently recommended until the algorithm is updated.": "This smoothes the edges of the interpolated areas. Low values are currently recommended until the algorithm is updated.", + "Inter Denoise": "Inter Denoise", + "Inter Denoise Size": "Inter Denoise Size", + "Inter Denoise FPD": "Inter Denoise FPD", + "Inter Blur": "Inter Blur", + "The new dynamic algorithm will handle these parameters. Activate them only for manual control.": "The new dynamic algorithm will handle these parameters. Activate them only for manual control.", + "Corruption Refresh (Lower = Faster)": "Corruption Refresh (Lower = Faster)", + "Corruption Preserve": "Corruption Preserve", + "Smooth": "Smooth", + "Frames to render. 0=ALL": "Frames to render. 0=ALL", + "Run DFI": "Run DFI", + "Status": "Status", + "Show output folder video": "Show output folder video", + "|": "|", + "Deflickers Playground": "Deflickers Playground", + "Frames folder": "Frames folder", + "I made this series of deflickers based on the standard that Vegas Pro includes. You can use them together or separately. Be careful when mixing them.": "I made this series of deflickers based on the standard that Vegas Pro includes. You can use them together or separately. Be careful when mixing them.", + "Blend:": "Blend:", + "Blends a percentage between frames. This can soften transitions and highlights. 50 is half of each frame. 80 or 20 are recommended values.": "Blends a percentage between frames. This can soften transitions and highlights. 50 is half of each frame. 80 or 20 are recommended values.", + "Overlay:": "Overlay:", + "Use the overlay image blending mode. Note that it works particularly good at mid-high values, wich will modify the overall contrast. You will have to decide what works for you.": "Use the overlay image blending mode. Note that it works particularly good at mid-high values, wich will modify the overall contrast. You will have to decide what works for you.", + "Normalize:": "Normalize:", + "Calculates the average between frames to merge them. It may be more practical if you don't have a specific Blend deflicker value in mind.": "Calculates the average between frames to merge them. It may be more practical if you don't have a specific Blend deflicker value in mind.", + "BLEND (0=Off)": "BLEND (0=Off)", + "OVERLAY (0=Off)": "OVERLAY (0=Off)", + "NORMALIZE (0=Off))": "NORMALIZE (0=Off))", + "Deflickers": "Deflickers", + "Style Fuse": "Style Fuse", + "With this you can merge two sets of frames with overlay technique. For example, you can take a style video that is just lights and/or colors, and overlay it on top of another video.": "With this you can merge two sets of frames with overlay technique. For example, you can take a style video that is just lights and/or colors, and overlay it on top of another video.", + "The resulting video will be useful for use in Img2Img Batch and that the AI render preserves these added color and lighting details, along with the details of the original video.": "The resulting video will be useful for use in Img2Img Batch and that the AI render preserves these added color and lighting details, along with the details of the original video.", + "Style frames": "Style frames", + "Video frames": "Video frames", + "Fuse Strength": "Fuse Strength", + "Fuse": "Fuse", + "Video extract": "Video extract", + "Video path": "Video path", + "Fps. 0=Original": "Fps. 0=Original", + "Extract": "Extract", + "What DFI does?": "What DFI does?", + "DFI processing analyzes the motion of the original video, and attempts to force that information into the generated video. Demo on https://github.com/AbyszOne/Abysz-LAB-Ext": "DFI processing analyzes the motion of the original video, and attempts to force that information into the generated video. Demo on https://github.com/AbyszOne/Abysz-LAB-Ext", + "In short, this will reduce flicker in areas of the video that don't need to change, but SD does. For example, for a man smoking, leaning against a pole, it will detect that the pole is static, and will try to prevent it from changing as much as possible.": "In short, this will reduce flicker in areas of the video that don't need to change, but SD does. For example, for a man smoking, leaning against a pole, it will detect that the pole is static, and will try to prevent it from changing as much as possible.", + "This is an aggressive process that requires a lot of control for each context. Read the recommended strategies.": "This is an aggressive process that requires a lot of control for each context. Read the recommended strategies.", + "Although Video to Video is the most efficient way, a DFI One Shot method is under experimental development as well.": "Although Video to Video is the most efficient way, a DFI One Shot method is under experimental development as well.", + "Usage strategies": "Usage strategies", + "If you get enough understanding of the tool, you can achieve a much more stable and clean enough rendering. However, this is quite demanding.": "If you get enough understanding of the tool, you can achieve a much more stable and clean enough rendering. However, this is quite demanding.", + "Instead, a much friendlier and faster way to use this tool is as an intermediate step. For this, you can allow a reasonable degree of corruption in exchange for more general stability.": "Instead, a much friendlier and faster way to use this tool is as an intermediate step. For this, you can allow a reasonable degree of corruption in exchange for more general stability.", + "You can then clean up the corruption and recover details with a second step in Stable Diffusion at low denoising (0.2-0.4), using the same parameters and seed.": "You can then clean up the corruption and recover details with a second step in Stable Diffusion at low denoising (0.2-0.4), using the same parameters and seed.", + "In this way, the final result will have the stability that we have gained, maintaining final detail. If you find a balanced workflow, you will get something at least much more coherent and stable than the raw AI render.": "In this way, the final result will have the stability that we have gained, maintaining final detail. If you find a balanced workflow, you will get something at least much more coherent and stable than the raw AI render.", + "Abysz-LAB-Ext": "Abysz-LAB-Ext", + "https://github.com/AbyszOne/Abysz-LAB-Ext": "https://github.com/AbyszOne/Abysz-LAB-Ext", + "The RAW frames you have used as base for IA generation.": "The RAW frames you have used as base for IA generation.", + "The frames of AI generated video": "The frames of AI generated video", + "Remember that each generation overwrites previous frames in the same folder.": "Remember that each generation overwrites previous frames in the same folder.", + "STAND BY...": "STAND BY...", + "Frames to process": "Frames to process", + "Processed frames": "Processed frames", + "Style to fuse": "Style to fuse", + "Remember to use same fps as generated video for DFI": "Remember to use same fps as generated video for DFI", + "Embedding Editor": "嵌入編輯器", + "Vector": "向量", + "Refresh Embeddings": "重新整理多個嵌入", + "Save Embedding": "儲存嵌入", + "Enter words and color hexes to mark weights on the sliders for guidance. Hint: Use the txt2img prompt token counter or": "輸入文字和顏色十六進制代碼以在滑桿上標記權重作為引導。 提示:使用文生圖提示詞標記計數器或使用", + "webui-tokenizer": "標記解析器擴充功能", + "to see which words are constructed using multiple sub-words, e.g. 'computer' doesn't exist in stable diffusion's CLIP dictionary and instead 'compu' and 'ter' are used (1 word but 2 embedding vectors). Currently buggy and needs a moment to process before pressing the button. If it doesn't work after a moment, try adding a random space to refresh it.": "查看哪些詞是使用多個子詞構成的,例如 Stable Diffusion 的 CLIP 字典中不存在 'computer',而是使用 'compu' 以及 'ter'(一個單字但使用兩個嵌入向量)。目前這個擴充功能還有點問題,在按下按鈕之前需要一點時間來處理。如果過了一段時間還是不行,試試隨便加個空格重新整理一下", + "Sampling Steps": "採樣疊代步數", + "Generate Preview": "產生預覽", + "stable-diffusion-webui-embedding-editor": "stable-diffusion-webui-embedding-editor", + "https://github.com/CodeExplode/stable-diffusion-webui-embedding-editor.git": "https://github.com/CodeExplode/stable-diffusion-webui-embedding-editor.git", + "symbol:color-hex, symbol:color-hex, ...": "文字:顏色代碼, 文字:顏色代碼, ...", + "e.g. A portrait photo of embedding_name": "示例: A portrait photo of embedding_name", + "ControlNet v1.1.411": "ControlNet v1.1.411", + "ControlNet Unit 0": "ControlNet Unit 0", + "ControlNet Unit 1": "ControlNet Unit 1", + "ControlNet Unit 2": "ControlNet Unit 2", + "Preprocessor Preview": "預處理器預覽", + "Input Directory": "輸入目錄", + "Open New Canvas": "打開新畫布", + "New Canvas Width": "畫布寬度", + "New Canvas Height": "畫布高度", + "Create New Canvas": "創建新畫布", + "Set the preprocessor to [invert] If your image has white background and black lines.": "如果您的線稿圖像是白色背景和黑色線條,請將預處理器設置為 [invert]。", + "Enable": "啟用", + "Low VRAM": "低 VRAM 模式", + "Pixel Perfect": "完美像素", + "Allow Preview": "開啟預覽", + "Preview as Input": "Preview as Input", + "Control Type": "Control Type", + "All": "所有", + "NormalMap": "NormalMap", + "OpenPose": "OpenPose", + "MLSD": "MLSD", + "Lineart": "Lineart", + "SoftEdge": "SoftEdge", + "Scribble/Sketch": "Scribble/Sketch", + "Segmentation": "Segmentation", + "Shuffle": "Shuffle", + "Tile/Blur": "Tile/Blur", + "InstructP2P": "InstructP2P", + "Reference": "Reference", + "Recolor": "Recolor", + "Revision": "Revision", + "T2I-Adapter": "T2I-Adapter", + "IP-Adapter": "IP-Adapter", + "Preprocessor": "預處理器", + "Control Weight": "控制權重", + "Starting Control Step": "開始控制步數(%)", + "Ending Control Step": "停止控制步數(%)", + "Preprocessor resolution": "預處理器解析度", + "Threshold A": "閾值 A", + "Threshold B": "閾值 B", + "Control Mode": "Control Mode", + "Balanced": "平衡", + "My prompt is more important": "我的提示詞更重要", + "ControlNet is more important": "ControlNet更重要", + "Resize Mode": "縮放模式", + "Just Resize": "拉伸", + "Crop and Resize": "裁剪並調整大小", + "Resize and Fill": "調整大小並填充", + "[Loopback] Automatically send generated images to this ControlNet unit": "[Loopback] 自動將生成的圖像發送回此 ControlNet", + "Presets": "Presets", + "Preset name": "Preset name", + "ControlNet-M2M": "ControlNet-M2M", + "Duration": "持續時間", + "[ControlNet] Enabled": "[ControlNet] 啟用", + "[ControlNet] Model": "[ControlNet] 模型", + "[ControlNet] Weight": "[ControlNet] 權重", + "[ControlNet] Guidance Start": "[ControlNet] 引導開始", + "[ControlNet] Guidance End": "[ControlNet] 引導結束", + "[ControlNet] Resize Mode": "[ControlNet] 縮放模式", + "[ControlNet] Preprocessor": "[ControlNet] 預處理器", + "[ControlNet] Pre Resolution": "[ControlNet] 解析度", + "[ControlNet] Pre Threshold A": "[ControlNet] 閾值 A", + "[ControlNet] Pre Threshold B": "[ControlNet] 閾值 B", + "ControlNet-0": "ControlNet-0", + "ControlNet-1": "ControlNet-1", + "ControlNet-2": "ControlNet-2", + "Movie Input": "Movie Input", + "Image Input": "Image Input", + "Video": "視訊", + "Drop Video Here": "拖曳影片到此", + "Save preprocessed": "儲存預處理", + "Controlnet input directory": "Controlnet輸入目錄", + "Upload independent control image": "Upload independent control image", + "ControlNet": "ControlNet", + "Directory for detected maps auto saving": "檢測圖的自動儲存目錄", + "Extra path to scan for ControlNet models (e.g. training output directory)": "掃描 ControlNet 模型的額外路徑(例如訓練輸出目錄)", + "Path to directory containing annotator model directories (requires restart, overrides corresponding command line flag)": "包含預處理器模型的路徑(需要重新啟動,取代命令行設置)", + "Multi-ControlNet: ControlNet unit number (requires restart)": "Multi-ControlNet: ControlNet unit number (requires restart)", + "Model cache size (requires restart)": "模型緩存大小(需要儲存設定並重新啟動)", + "ControlNet inpainting Gaussian blur sigma": "ControlNet inpainting Gaussian blur sigma", + "Do not apply ControlNet during highres fix": "Do not apply ControlNet during highres fix", + "Do not append detectmap to output": "不要將檢測圖附加到輸出目錄", + "Allow detectmap auto saving": "允許檢測圖自動儲存", + "Allow other script to control this extension": "允許其他指令碼控制此擴充功能", + "Paste ControlNet parameters in infotext": "Paste ControlNet parameters in infotext", + "Show batch images in gradio gallery output": "Show batch images in gradio gallery output", + "Increment seed after each controlnet batch iteration": "在每次 controlnet 批處理迭代後增大種子", + "Disable control type selection": "Disable control type selection", + "Disable openpose edit": "Disable openpose edit", + "Ignore mask on ControlNet input image if control type is not inpaint": "Ignore mask on ControlNet input image if control type is not inpaint", + "https://github.com/Mikubill/sd-webui-controlnet.git": "https://github.com/Mikubill/sd-webui-controlnet.git", + "Leave empty to use img2img batch controlnet input directory": "留空使用img2img批處理controlnet輸入目錄", + "[IP-Adapter]": "[IP-Adapter]", + "Preprocessor Resolution": "Preprocessor Resolution", + "Noise Augmentation": "Noise Augmentation", + "MLSD Distance Threshold": "MLSD Distance Threshold", + "sd-webui-regional-prompter": "sd-webui-regional-prompter", + "https://github.com/hako-mikan/sd-webui-regional-prompter.git": "https://github.com/hako-mikan/sd-webui-regional-prompter.git", + "Active": "Active", + "Divide mode": "Divide mode", + "Horizontal": "水平", + "Vertical": "垂直", + "Generation mode": "Generation mode", + "Attention": "Attention", + "Divide Ratio": "Divide Ratio", + "Base Ratio": "Base Ratio", + "Use base prompt": "Use base prompt", + "Use common prompt": "Use common prompt", + "Use common negative prompt": "Use common negative prompt", + "visualize and make template": "visualize and make template", + "template": "template", + "disable convert 'AND' to 'BREAK'": "disable convert 'AND' to 'BREAK'", + "debug": "debug", + "Apply Presets": "Apply Presets", + "Preset Name": "Preset Name", + "Save to Presets": "Save to Presets", "Parameters": "Parameters", "Directions": "Directions", "Moods": "Moods", @@ -2097,6 +2346,7 @@ "image type, then almost all results will look like Paintings. Selecting": "image type, then almost all results will look like Paintings. Selecting", "will have a certain influence on the overall look in some way (if it’s something humanoid it may show emotion, but also colors and overall feel may change). Setting": "will have a certain influence on the overall look in some way (if it’s something humanoid it may show emotion, but also colors and overall feel may change). Setting", "will change the general tonality of the result. And setting": "will change the general tonality of the result. And setting", + "View": "View", "will attempt to change how the subject is viewed. Attempt, because view appears to be the least reliable keyword. These elements are placed in order of influence and supported by certain strength values. These basic settings produce very quick results close to the general look you want.": "will attempt to change how the subject is viewed. Attempt, because view appears to be the least reliable keyword. These elements are placed in order of influence and supported by certain strength values. These basic settings produce very quick results close to the general look you want.", "![]({path.join(ResourceDir,“Artists.jpg”) ‘’})": "![]({path.join(ResourceDir,“Artists.jpg”) ‘’})", "Moving on, adding a": "Moving on, adding a", @@ -2132,7 +2382,6 @@ "Amateur": "Amateur", "Artificial": "Artificial", "Award Winning": "Award Winning", - "Basic": "基本設定", "Beginner": "Beginner", "Bipolar": "Bipolar", "Boyish": "Boyish", @@ -2730,786 +2979,5307 @@ "Victorian Gothic Art": "Victorian Gothic Art", "Vorticism Art": "Vorticism Art", "Yuccie Art": "Yuccie Art", - "DreamArtist Create embedding": "夢想家(DreamArtist)創建嵌入", - "DreamArtist Train": "夢想家(DreamArtist)訓練", - "Process Att-Map": "處理注意力圖(Att-Map)", - "Initialization text (negative)": "初始化文本(反向)", - "Number of negative vectors per token": "每個標記的反向向量數", - "Unet Learning rate": "Unet 模型的學習率", - "Train with DreamArtist": "使用夢想家(DreamArtist)訓練", - "Train with reconstruction": "訓練時開啟重建", - "Attention Map": "注意力圖(Att-Map)", - "Train U-Net": "訓練 U-Net", - "CFG scale (dynamic cfg: low,high:type e.g. 1.0-3.5:cos)": "提示詞相關性(動態 CFG:low,high:type,例如 1.0-3.5:cos)", - "Reconstruction loss weight": "重建損失權重", - "Negative lr weight": "反向的學習率權重", - "Classifier path": "分類器(Classifier)的路徑", - "Accumulation steps": "累加步數", - "Prompt template file": "提示詞模版檔案", - "Positive \"filewords\" only": "僅使用正向「詞彙」", - "Experimental features (May be solve the problem of erratic training and difficult to reproduce [set EMA to 0.97])": "實驗性功能(可能解決訓練不穩定和難以重現的問題 [將 EMA 設定為 0.97])", - "EMA (positive)": "EMA (正)", - "EMA replace steps (positive)": "EMA 替換步數 (正)", - "EMA (nagetive)": "EMA (負)", - "EMA replace steps (nagative)": "EMA 替換步數 (負)", - "beta1": "β1", - "beta2": "β2", - "Since there is a self-attention operation in VAE, it may change the distribution of features. This processing will superimpose the attention map of self-attention on the original Att-Map.": "由於 VAE 中使用了自我注意力機制,這可能會改變特徵的分佈。這種處理會將自我注意力產生的注意力圖與原始的注意力圖疊加在一起。", - "Data directory": "資料目錄", - "DreamArtist-sd-webui-extension": "DreamArtist-sd-webui-extension", - "Path to classifier ckpt, can be empty": "分類器的路徑,可以是空白", - "https://github.com/7eu7d7/DreamArtist-sd-webui-extension.git": "https://github.com/7eu7d7/DreamArtist-sd-webui-extension.git", - "Multiplication (2^N)": "倍率 (2^N)", - "Weight": "權重", - "Force convert half to float on interpolation (for some platforms)": "在插值時強制將一半賺換為浮點(對於某些平台)", - "I know what I am doing.": "我知道我在做什麼。", - "Layers": "圖層", - "Apply to": "Apply to", - "Resblock": "Resblock", - "Transformer": "Transformer", - "S. Attn.": "S. Attn.", - "X. Attn.": "X. Attn.", - "OUT": "OUT", - "Start steps": "Start steps", - "Bilinear": "Bilinear", - "Bicubic": "Bicubic", - "Enable AA for Upscaling.": "Enable AA for Upscaling.", - "Downscaling": "Downscaling", - "Area": "Area", - "Pooling Max": "Pooling Max", - "Pooling Avg": "Pooling Avg", - "Enable AA for Downscaling.": "Enable AA for Downscaling.", - "interpolation method": "interpolation method", - "Lerp": "Lerp", - "SLerp": "SLerp", - "LLuL Enabled": "LLuL Enabled", - "LLuL Multiply": "LLuL Multiply", - "LLuL Weight": "LLuL Weight", - "LLuL Layers": "LLuL Layers", - "LLuL Apply to": "LLuL Apply to", - "LLuL Start steps": "LLuL Start steps", - "LLuL Max steps": "LLuL Max steps", - "LLuL Upscaler": "LLuL Upscaler", - "LLuL Upscaler AA": "LLuL Upscaler AA", - "LLuL Downscaler": "LLuL Downscaler", - "LLuL Downscaler AA": "LLuL Downscaler AA", - "LLuL Interpolation method": "LLuL Interpolation method", - "sd-webui-llul": "sd-webui-llul", - "https://github.com/hnmr293/sd-webui-llul.git": "https://github.com/hnmr293/sd-webui-llul.git", - "State": "State", - "Saved main elements": "Saved main elements", - "tabs": "tabs", - "Saved elements from txt2img": "Saved elements from txt2img", - "prompt": "提示詞", - "negative_prompt": "反相提示詞", - "sampling": "sampling", - "sampling_steps": "sampling_steps", - "width": "寬度", - "height": "高度", - "batch_count": "生成批次", - "batch_size": "每批數量", - "cfg_scale": "提示詞相關性(CFG)", - "restore_faces": "面部修復", - "tiling": "可平鋪", - "hires_upscaler": "高解析度放大工具", - "hires_steps": "高解析步驟", - "hires_scale": "hires_scale", - "hires_resize_x": "hires_resize_x", - "hires_resize_y": "hires_resize_y", - "hires_denoising_strength": "高解析度修正重繪幅度", - "Saved elements from img2img": "Saved elements from img2img", - "resize_mode": "縮放模式", - "denoising_strength": "重繪幅度", - "https://github.com/ilian6806/stable-diffusion-webui-state.git": "https://github.com/ilian6806/stable-diffusion-webui-state.git", - "stable-diffusion-webui-depthmap-script": "stable-diffusion-webui-depthmap-script", - "Compute on": "計算於", - "Match input size (size is ignored when using boost)": "符合輸入大小(當使用加速器時,大小將被忽略)", - "BOOST (multi-resolution merging)": "提升(多分辨率合併)", - "Invert DepthMap (black=near, white=far)": "Invert DepthMap (black=near, white=far)", - "Clip and renormalize": "Clip and renormalize", - "Far clip": "Far clip", - "Near clip": "Near clip", - "Combine into one image.": "Combine into one image.", - "Combine axis": "Combine axis", - "Vertical": "垂直", - "Horizontal": "水平", - "Save DepthMap": "儲存深度圖", - "Show DepthMap": "顯示深度圖", - "Show HeatMap": "顯示熱度圖", - "Generate Stereo side-by-side image": "Generate Stereo side-by-side image", - "Generate Stereo anaglyph image (red/cyan)": "Generate Stereo anaglyph image (red/cyan)", - "Divergence (3D effect)": "Divergence (3D effect)", - "Gap fill technique": "Gap fill technique", - "Balance between eyes": "Balance between eyes", - "Generate 3D inpainted mesh. (Sloooow)": "Generate 3D inpainted mesh. (Sloooow)", - "Generate 4 demo videos with 3D inpainted mesh.": "Generate 4 demo videos with 3D inpainted mesh.", - "Remove background": "移除背景", - "Save the foreground masks": "儲存前景遮罩", - "pre-depth background removal": "pre-depth background removal", - "Rembg Model": "Rembg Model", - "Information, comment and share @": "Information, comment and share @", - "Input Mesh (.ply)": "Input Mesh (.ply)", - "Generate video from inpainted mesh.": "Generate video from inpainted mesh.", - "Video": "視訊", - "A file on the same machine where the server is running.": "A file on the same machine where the server is running.", - "Number of frames": "Number of frames", - "Framerate": "偵率", - "Format": "格式", - "Trajectory": "Trajectory", - "Translate: x, y, z": "Translate: x, y, z", - "Crop: top, left, bottom, right": "Crop: top, left, bottom, right", - "Dolly": "Dolly", - "Generate Video": "產生影片", - "https://github.com/thygate/stable-diffusion-webui-depthmap-script.git": "https://github.com/thygate/stable-diffusion-webui-depthmap-script.git", - "Model Pre​views": "模型預覽器", - "Embeddings": "嵌入", - "Model": "模型", - "Filter": "過濾器", - "Model Preview XD": "模型預覽器", - "Name matching rule for preview files": "預覽檔的名稱符合規則", - "Loose": "寬鬆", - "Strict": "嚴格", - "Folder": "檔案夾", - "Index": "索引", - "Limit the height of preivews to the height of the browser window (.html preview files are always limited regardless of this setting)": "將預覽的高度限制為瀏覽器視窗高度(.html 預覽檔始終受此設定的限制)", - "No Preview Found": "查無預覽檔", - "https://github.com/CurtisDS/sd-model-preview-xd.git": "https://github.com/CurtisDS/sd-model-preview-xd.git", - "Text2Prompt": "文生提示詞", - "Input Theme": "輸入情境", - "Input Negative Theme": "輸入反向情境", - "Negative strength": "反向情境強度", - "Replace underscore in tag with whitespace": "將標記內下橫線替換成空格", - "Escape brackets in tag": "轉義標記內括號", - "Output": "輸出", - "Generation Settings": "生成設定", - "Database": "資料庫", - "Tag count filter": "Tag count filter", - "Tag range:": "Tag range:", - "≥ 0 tagged": "≥ 0 tagged", - "(14589 tags total)": "(14589 tags total)", - "Method to convert similarity into probability": "Method to convert similarity into probability", - "Cutoff and Power": "Cutoff and Power", - "Softmax": "Softmax", - "Power": "Power", - "NONE": "NONE", - "Top-k": "Top-k", - "Top-p (Nucleus)": "Top-p (Nucleus)", - "Max number of tags": "標記最大數量", - "k value": "k 值", - "p value": "p 值", - "Use weighted choice": "Use weighted choice", - "stable-diffusion-webui-text2prompt": "stable-diffusion-webui-text2prompt", - "https://github.com/toshiaki1729/stable-diffusion-webui-text2prompt.git": "https://github.com/toshiaki1729/stable-diffusion-webui-text2prompt.git", - "3D Openpose": "3D Openpose", - "Edit Openpose": "Edit Openpose", - "Send to ControlNet": ">> ControlNet", - "Original:": "Original:", - "Online 3D Openpose Editor": "Online 3D Openpose Editor", - "Pose": "Pose", - "Control Model number": "Control Model number", - "Depth": "Depth", - "Canny": "Canny", - "Use online version": "Use online version", - "sd-webui-3d-open-pose-editor": "sd-webui-3d-open-pose-editor", - "https://github.com/nonnonstop/sd-webui-3d-open-pose-editor.git": "https://github.com/nonnonstop/sd-webui-3d-open-pose-editor.git", - "Main": "Main", - "Workflow assist": "Workflow assist", - "Advanced": "進階的", - "One Button Run and Upscale": "One Button Run and Upscale", - "Higher levels increases complexity and randomness of generated prompt": "Higher levels increases complexity and randomness of generated prompt", - "Subject Types": "Subject Types", - "type of image": "type of image", - "Place this in front of generated prompt (prefix)": "Place this in front of generated prompt (prefix)", - "Place this at back of generated prompt (suffix)": "Place this at back of generated prompt (suffix)", - "Use this negative prompt": "Use this negative prompt", - "Filter out following properties (comma seperated). Example film grain, purple, cat": "Filter out following properties (comma seperated). Example film grain, purple, cat", - "This generator will generate a complete full prompt for you, based on randomness. You can increase the slider, to include more things to put into the prompt. \nRecommended is keeping it around 3-7. Use 10 at your own risk.": "This generator will generate a complete full prompt for you, based on randomness. You can increase the slider, to include more things to put into the prompt. \nRecommended is keeping it around 3-7. Use 10 at your own risk.", - "There are a lot of special things build in, based on various research papers. Just try it, and let it surprise you.": "There are a lot of special things build in, based on various research papers. Just try it, and let it surprise you.", - "Suggestion is to leave the prompt field empty, anything here will be added at the end of the generated prompt.": "Suggestion is to leave the prompt field empty, anything here will be added at the end of the generated prompt.", - "It doesn’t add anything to the negative prompt field, so feel free to add your favorite negative prompts here.": "It doesn’t add anything to the negative prompt field, so feel free to add your favorite negative prompts here.", - "You can choose a certain subject type, if you want to generate something more specific. It has the following types:": "You can choose a certain subject type, if you want to generate something more specific. It has the following types:", - "object - Can be a random object, a building or a vehicle.": "object - Can be a random object, a building or a vehicle.", - "animal - A random (fictional) animal. Has a chance to have human characteristics, such as clothing added.": "animal - A random (fictional) animal. Has a chance to have human characteristics, such as clothing added.", - "humanoid - A random humanoid, males, females, fantasy types, fictional and non-fictional characters. Can add clothing, features and a bunch of other things.": "humanoid - A random humanoid, males, females, fantasy types, fictional and non-fictional characters. Can add clothing, features and a bunch of other things.", - "landscape - A landscape or a landscape with a building.": "landscape - A landscape or a landscape with a building.", - "concept - Can be a concept, such as “a X of Y”, or an historical event such as “The Trojan War”.": "concept - Can be a concept, such as “a X of Y”, or an historical event such as “The Trojan War”.", - "Artists have a major impact on the result. Automatically, it will select between 0-3 artists out of 3483 artists for your prompt.": "Artists have a major impact on the result. Automatically, it will select between 0-3 artists out of 3483 artists for your prompt.", - "You can turn it off. Add your own artists to the prompt, and they will be added to the end of the prompt.": "You can turn it off. Add your own artists to the prompt, and they will be added to the end of the prompt.", - "There are an immense number of image types, not only paintings and photo's, but also isometric renders and funko pops.\nYou can however, overwrite it with the most popular ones.": "There are an immense number of image types, not only paintings and photo's, but also isometric renders and funko pops.\nYou can however, overwrite it with the most popular ones.", - "all --> normally picks a image type as random. Can choose a ‘other’ more unique type.": "all --> normally picks a image type as random. Can choose a ‘other’ more unique type.", - "all - force multiple --> idea by redditor WestWordHoeDown, it forces to choose between 2 and 3 image types": "all - force multiple --> idea by redditor WestWordHoeDown, it forces to choose between 2 and 3 image types", - "photograph": "photograph", - "octane render": "octane render", - "digital art": "digital art", - "concept art": "concept art", - "painting": "painting", - "portrait": "portrait", - "anime key visual": "anime key visual", - "only other types --> Will pick only from the more unique types, such as stained glass window or a funko pop": "only other types --> Will pick only from the more unique types, such as stained glass window or a funko pop", - "Other prompt fields": "Other prompt fields", - "The existing prompt and negative prompt fields are ignored.": "The existing prompt and negative prompt fields are ignored.", + "Tag Autocomplete": "標記自動補齊", + "Tag filename": "標記檔檔名", + "Enable Tag Autocompletion": "啟用標記自動補齊", + "Active in txt2img (Requires restart)": "在文生圖中啟用(需要儲存設定並重新啟動)", + "Active in img2img (Requires restart)": "在圖生圖中啟用(需要儲存設定並重新啟動)", + "Active in negative prompts (Requires restart)": "在反向提示詞中啟用(需要儲存設定並重新啟動)", + "Active in third party textboxes [Dataset Tag Editor] (Requires restart)": "在第三方擴充功能「數據集標記編輯器」的文字方塊中啟用(需要儲存設定並重新啟動)", + "List of model names (with file extension) or their hashes to use as black/whitelist, separated by commas.": "要用作黑名單/白名單的模型名稱清單(包括檔案副檔名)或其雜湊值,用逗號分隔。", + "Mode to use for model list": "模型名稱清單的使用模式", + "Blacklist": "黑名單", + "Whitelist": "白名單", + "Move completion popup together with text cursor": "移動彈出視窗至文字游標處", + "Maximum results": "最大結果", + "Show all results": "顯示所有結果", + "How many results to load at once": "一次載入多少個結果", + "Time in ms to wait before triggering completion again (Requires restart)": "在再次觸發完成之前等待的毫秒數(需要儲存設定並重新啟動)", + "Search for wildcards": "搜尋萬用字元", + "Search for embeddings": "搜尋嵌入", + "Search for hypernetworks": "搜尋超網絡", + "Search for Loras": "搜尋 LoRA", + "Show '?' next to tags, linking to its Danbooru or e621 wiki page (Warning: This is an external site and very likely contains NSFW examples!)": "在標記旁顯示「?」,連結到其 Danbooru 或 e621 wiki 頁面(警告:這是外部網站,很可能包含 NSFW 內容!)", + "Replace underscores with spaces on insertion": "插入時將下橫線替換成空格", + "Escape parentheses on insertion": "插入時轉義括號", + "Append comma on tag autocompletion": "自動完成標記時加入逗號", + "Search by alias": "以別名搜尋", + "Only show alias": "僅顯示別名", + "Translation filename": "翻譯檔檔名", + "Translation file uses old 3-column translation format instead of the new 2-column one": "翻譯檔使用舊的三欄位翻譯格式,而非新的兩欄位格式", + "Search by translation": "以翻譯搜尋", + "Extra filename (for small sets of custom tags)": "追加標記檔檔名(用於小型的自定義標記集)", + "Mode to add the extra tags to the main tag list": "將追加標記加入主標記清單的模式", + "Insert before": "前綴插入", + "Insert after": "後綴插入", + "a1111-sd-webui-tagcomplete": "a1111-sd-webui-tagcomplete", + "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git": "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git", + "Enabled": "啟用", + "prompt-fusion-extension": "prompt-fusion-extension", + "https://github.com/ljleb/prompt-fusion-extension.git": "https://github.com/ljleb/prompt-fusion-extension.git", + "Create aesthetic embedding": "建立美學嵌入", + "Open for Clip Aesthetic!": "打開以調整 CLIP 美學!", + "Aesthetic weight": "美學權重", + "Aesthetic steps": "美術風格疊代步數", + "Aesthetic learning rate": "美學學習率", + "Slerp interpolation": "球面線性插值角度", + "Aesthetic imgs embedding": "美學圖集嵌入", + "Aesthetic text for imgs": "該圖集的美學描述", + "Slerp angle": "球面線性插值角度", + "Is negative text": "是反向提示詞", + "Create an aesthetic embedding out of any number of images": "從任意數量的圖像中建立美學嵌入", + "Create images embedding": "建立圖集嵌入", + "stable-diffusion-webui-aesthetic-gradients": "stable-diffusion-webui-aesthetic-gradients", + "This text is used to rotate the feature space of the imgs embs": "此文本用於旋轉圖集嵌入的特徵空間", + "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients.git", + "Seed travel": "種子變遷", + "Destination seed(s) (Comma separated)": "目標種子(逗號分割)", + "Only use Random seeds (Unless comparing paths)": "只用隨機種子(除非需要對比變遷軌跡)", + "Number of random seed(s)": "隨機種子數量", + "Compare paths (Separate travels from 1st seed to each destination)": "對比變遷軌跡(從第一個種子分別變遷到每一個目標種子)", + "Steps (Number of images between each seed)": "步數(每個種子之間的圖像數量)", + "Loop back to initial seed": "再變遷回初始種子", + "Save results as video": "儲存結果為影片", + "Frames per second": "每秒多少幀", + "Number of frames for lead in/out": "導入/導出幀數", + "Bump seed (If > 0 do a Compare Paths but only one image. No video will be generated.)": "提高種子值(如果大於 0,則進行對比變遷軌跡,但僅產生一張圖像而非影片。)", + "Use cache": "使用快取", + "Show generated images in ui": "在用戶介面上顯示產生了的圖像", + "Interpolation rate": "插值速率", + "Hug-the-middle": "保留核心(Hug-the-middle)", + "Slow start": "緩慢開始(Slow start)", + "Quick start": "快速開始(Quick start)", + "Rate strength": "速率強度", + "Allow the default Euler a Sampling method. (Does not produce good results)": "允許使用默認的 Eular a 採樣方法.(通常不會產生好的結果)", + "seed_travel": "seed_travel", + "https://github.com/yownas/seed_travel.git": "https://github.com/yownas/seed_travel.git", + "Latent Mirror mode": "鏡像潛在變數模式", + "Alternate Steps": "交替疊代", + "Blend Average": "平均混合", + "Latent Mirror style": "潛在變數鏡像樣式", + "Vertical Mirroring": "垂直鏡像", + "Horizontal Mirroring": "水平鏡像", + "Horizontal+Vertical Mirroring": "垂直+水平鏡像", + "90 Degree Rotation": "90 度旋轉", + "180 Degree Rotation": "180 度旋轉", + "Roll Channels": "三原色頻道輪替", + "X panning": "沿 X 軸滾動", + "Y panning": "沿 Y 軸滾動", + "Maximum steps fraction to mirror at": "鏡像干涉止步於總疊代步數的", + "SD-latent-mirroring": "SD-latent-mirroring", + "https://github.com/dfaker/SD-latent-mirroring.git": "https://github.com/dfaker/SD-latent-mirroring.git", + "Shift attention": "轉移注意力", + "https://github.com/yownas/shift-attention.git": "https://github.com/yownas/shift-attention.git", + "Composable Lora": "Composable Lora", + "stable-diffusion-webui-composable-lora": "stable-diffusion-webui-composable-lora", + "https://github.com/a2569875/stable-diffusion-webui-composable-lora": "https://github.com/a2569875/stable-diffusion-webui-composable-lora", + "caeae16a (Fri Jun 23 04:35:21 2023)": "caeae16a (Fri Jun 23 04:35:21 2023)", + "Composable LoRA with step": "啟用在迭代步數上使用LoRA或動態權重的功能", + "Use Lora in uc text model encoder": "在反向提示詞的text model encoder使用LoRA", + "Use Lora in uc diffusion model": "在反向提示詞的diffusion model使用LoRA", + "Plot the LoRA weight in all steps": "產生LoRA在每一個迭代步數的權重的圖表", + "Don't use LoRA in uc if there're no subprompts": "如果沒有使用「AND」語法不要在反向提示詞中使用LoRA", + "Error! Composable Lora install failed! Please reinstall composable_lora and restart the WebUI.": "錯誤! Composable Lora安裝發生問題。請嘗試重新安裝Composable Lora並重新啟動WebUI和終端機。", + "Tiled Diffusion": "Tiled Diffusion", + "Tiled VAE": "分塊 VAE", + "multidiffusion-upscaler-for-automatic1111": "multidiffusion-upscaler-for-automatic1111", + "https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111.git": "https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111.git", + "Overwrite image size": "覆寫圖像尺寸", + "Keep input image size": "維持輸入圖像尺寸", + "Image width": "圖像寬度", + "Image height": "圖像高度", + "Method": "Method", + "MultiDiffusion": "MultiDiffusion", + "Move ControlNet images to CPU (if applicable)": "將 ControlNet 圖像移至 CPU(如果可使用)", + "Free GPU": "Free GPU", + "Latent tile width": "潛在變數分塊寬度", + "Latent tile height": "潛在變數分塊高度", + "Latent tile overlap": "潛在變數分塊重疊", + "Latent tile batch size": "潛在變數分塊批次數量", + "Noise Inversion": "Noise Inversion", + "Enable Noise Inversion": "Enable Noise Inversion", + "Inversion steps": "Inversion steps", + "Please test on small images before actual upscale. Default params require denoise <= 0.6": "Please test on small images before actual upscale. Default params require denoise <= 0.6", + "Retouch": "Retouch", + "Renoise strength": "Renoise strength", + "Renoise kernel size": "Renoise kernel size", + "Region Prompt Control": "Region Prompt Control", + "Enable Control": "Enable Control", + "Draw full canvas background": "Draw full canvas background", + "Causalize layers": "Causalize layers", + "Create txt2img canvas": "Create txt2img canvas", + "Ref image (for conviently locate regions)": "Ref image (for conviently locate regions)", + "Custom Config File": "Custom Config File", + "Region 1": "Region 1", + "Region 2": "Region 2", + "Region 3": "Region 3", + "Region 4": "Region 4", + "Region 5": "Region 5", + "Region 6": "Region 6", + "Region 7": "Region 7", + "Region 8": "Region 8", + "Move VAE to GPU": "將 VAE 移至 GPU", + "Please use smaller tile size when see CUDA error: out of memory.": "如果看到 CUDA 錯誤:out of memory ,請降低分塊尺寸。", + "Encoder Tile Size": "編碼器分塊尺寸", + "Decoder Tile Size": "解碼器分塊尺寸", + "↻ Reset": "↻ Reset", + "Fast Encoder": "快速編碼器", + "Fast Decoder": "快速解碼器", + "Fast Encoder may change colors; Can fix it with more RAM and lower speed.": "快速編碼器會導致顏色變更;可以使用更多記憶體與時間來修復。", + "Encoder Color Fix": "編碼器顏色修復", + "stable-diffusion-webui-two-shot": "stable-diffusion-webui-two-shot", + "Divisions": "分割", + "Positions": "位置", + "Weights": "權重", + "end at this step": "在此疊代步數停止", + "Visualize": "視覺化", + "Regions": "區域", + "Extra generation params": "附加生成參數", + "https://github.com/opparco/stable-diffusion-webui-two-shot.git": "https://github.com/opparco/stable-diffusion-webui-two-shot.git", + "✕": "✕", + "[NPW] Weight": "[NPW] 權重", + "stable-diffusion-NPW": "stable-diffusion-NPW", + "https://github.com/muerrilla/stable-diffusion-NPW": "https://github.com/muerrilla/stable-diffusion-NPW", + "Wildcards Manager": "萬用字元管理器", + "Dynamic Prompts enabled": "啟用動態提示詞", + "Combinatorial generation": "組合生成", + "Max generations (0 = all combinations - the batch count value is ignored)": "最大產生數(0 = 所有組合 - 忽略批次數值)", + "Combinatorial batches": "組合批次", + "Prompt Magic": "提示詞魔法", + "Magic prompt": "魔法提示詞", + "Max magic prompt length": "魔法提示詞最大長度", + "Magic prompt creativity": "魔法提示詞創意", + "Magic prompt model": "魔法提示詞模型", + "Gustavosta/MagicPrompt-Stable-Diffusion": "Gustavosta/MagicPrompt-Stable-Diffusion", + "daspartho/prompt-extend": "daspartho/prompt-extend", + "succinctly/text2image-prompt-generator": "succinctly/text2image-prompt-generator", + "microsoft/Promptist": "microsoft/Promptist", + "AUTOMATIC/promptgen-lexart": "AUTOMATIC/promptgen-lexart", + "AUTOMATIC/promptgen-majinai-safe": "AUTOMATIC/promptgen-majinai-safe", + "AUTOMATIC/promptgen-majinai-unsafe": "AUTOMATIC/promptgen-majinai-unsafe", + "kmewhort/stable-diffusion-prompt-bolster": "kmewhort/stable-diffusion-prompt-bolster", + "Gustavosta/MagicPrompt-Dalle": "Gustavosta/MagicPrompt-Dalle", + "Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator": "Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator", + "Ar4ikov/gpt2-medium-650k-stable-diffusion-prompt-generator": "Ar4ikov/gpt2-medium-650k-stable-diffusion-prompt-generator", + "crumb/bloom-560m-RLHF-SD2-prompter-aesthetic": "crumb/bloom-560m-RLHF-SD2-prompter-aesthetic", + "Meli/GPT2-Prompt": "Meli/GPT2-Prompt", + "DrishtiSharma/StableDiffusion-Prompt-Generator-GPT-Neo-125M": "DrishtiSharma/StableDiffusion-Prompt-Generator-GPT-Neo-125M", + "Magic prompt blocklist regex": "魔法提示詞黑名單。", + "Magic Prompt batch size": "魔法提示詞批次大小", + "I'm feeling lucky": "手氣不錯", + "Attention grabber": "隨機關鍵詞吸引註意力", + "Minimum attention": "最小注意力", + "Maximum attention": "最大注意力", + "Don't apply to negative prompts": "不用於反向提示詞。", + "Need help?": "需要幫助?", + "Syntax cheatsheet": "語法速查表", + "Tutorial": "教學", + "Discussions": "討論串", + "Report a bug": "回報錯誤", + "Combinations": "組合", + "Choose a number of terms from a list, in this case we choose two artists:": "從列表中選幾項,這裡選了兩個藝術家", + "{2$$artist1|artist2|artist3}": "{2$$artist1|artist2|artist3}", + "If $$ is not provided, then 1$$ is assumed.": "若沒提供 $$,默認為 1$$", + "If the chosen number of terms is greater than the available terms, then some terms will be duplicated, otherwise chosen terms will be unique. This is useful in the case of wildcards, e.g.": "選的項數多於提供的項數時,有些項會重複,其餘情況各選項會保持唯一;\n重複對於萬用字元很有用,例如:", + "{2$$__artist__}": "{2$$__artist__}", + "is equivalent to": "等同於", + "{2$$__artist__|__artist__}": "{2$$__artist__|__artist__}", + "A range can be provided:": "項數可以有範圍", + "{1-3$$artist1|artist2|artist3}": "{1-3$$artist1|artist2|artist3}", + "In this case, a random number of artists between 1 and 3 is chosen.": "此例中,會從中隨機選 1 至 3 個藝術家", + "Options can be given weights:": "可以給選項權重:", + "{2::artist1|artist2}": "{2::artist1|artist2}", + "In this case, artist1 will be chosen twice as often as artist2.": "此例中,藝術家 1 將會比藝術家 2 高兩倍的機會被選中", + "Wildcards can be used and the joiner can also be specified:": "可以用萬用字元,也可以指定拼接符", + "{{1-$$and$$__adjective__}}": "{{1-3$$and$$__adjective__}}", + "Here, a random number between 1 and 3 words from adjective.txt will be chosen and joined together with the word 'and' instead of the default comma.": "此處,會從 adjective.txt 中選取隨機 1 至 3 行,以 'and'(而不是默認的逗號)拼接", + "Find and manage wildcards in the Wildcards Manager tab.": "在萬用字元管理器分頁中尋找並管理萬用字元", + "__/mywildcards__": "__/mywildcards__", + "will then become available.": "目錄內的文字檔案將可以被讀取。", + "Find more settings on the": "尋找更多設定請前往", + "Jinja2 templates": "Jinja2 模板", + "Enable Jinja2 templates": "啟用 Jinja2 模板", + "Help for Jinja2 templates": "Jinja2 模板幫助", + "Jinja2 templates is an experimental feature for advanced template generation. It is not recommended for general use unless you are comfortable with writing scripts.": "Jinja2 範本是一個用於進階範本產生的實驗性特性。如果不熟悉編寫指令碼,正常使用時不建議啟用", + "Literals": "字面值", + "I love red roses": "I love red roses", + "Random choices": "隨機選擇", + "I love {{ choice('red', 'blue', 'green') }} roses": "I love {{ choice('red', 'blue', 'green') }} roses", + "This will randomly choose one of the three colors.": "會隨機從三種顏色中選一個", + "Iterations": "迭代次數", + "{% for colour in ['red', 'blue', 'green'] %}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% endfor %}": "{% for colour in ['red', 'blue', 'green'] %}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% endfor %}", + "This will produce three prompts, one for each color. The prompt tag is used to mark the text that will be used as the prompt. If no prompt tag is present then only one prompt is assumed": "會產生三條提示詞,每個顏色各一條;\n 提示詞標籤用於標記作為提示詞的文字;\n 如果沒有提示詞標籤則默認為僅一條提示詞", + "{% for colour in wildcard(\"__colours__\") %}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% endfor %}": "{% for colour in wildcard(\"__colours__\") %}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% endfor %}", + "This will produce one prompt for each colour in the wildcard.txt file.": "會為 colours.txt 中的每個顏色產生一條提示詞", + "Conditionals": "條件", + "{% for colour in [\"red\", \"blue\", \"green\"] %}\n {% if colour == \"red\"}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% else %}\n {% prompt %}I hate {{ colour }} roses{% endprompt %}\n {% endif %}\n {% endfor %}": "{% for colour in [\"red\", \"blue\", \"green\"] %}\n {% if colour == \"red\"}\n {% prompt %}I love {{ colour }} roses{% endprompt %}\n {% else %}\n {% prompt %}I hate {{ colour }} roses{% endprompt %}\n {% endif %}\n {% endfor %}", + "This will produce the following prompts:": "會產生下列提示詞", + "I hate blue roses": "I hate blue roses", + "I hate green roses": "I hate green roses", + "Jinja2 templates are based on the Jinja2 template engine. For more information see the": "Jinja2 模板基於 Jinja2 模板引擎,更多訊息參考", + "Jinja2 documentation.": "Jinja2 文件資料。", + "If you are using these templates, please let me know if they are useful.": "如果你在用這些模板,請告訴我它們是否有用", + "Advanced options": "進階選項", + "Some settings have been moved to the settings tab. Find them in the Dynamic Prompts section.": "一些選像已移至設定。 位於動態提示詞部分。", + "Unlink seed from prompt": "將隨機種子與提示詞解綁", + "Fixed seed": "固定隨機種子", + "Write raw prompt to image": "將提示詞寫入圖像", + "Don't generate images": "不產生圖像", + "Write prompts to file": "將提示詞寫入檔案", + "Manage wildcards for Dynamic Prompts": "管理動態提示詞擴充功能的萬用字元", + "1. Create your wildcard library by copying a collection using the dropdown below.": "1. 使用下方的下拉選單選擇一個選集,並按下複製選集來創建您的萬用字源庫。", + "2. Click on any of the files that appear in the tree to edit them.": "2. 點擊上方選項中出現的文件進行編輯。", + "3. Use the wildcard in your script by typing the name of the file or copying the text from the Wildcards file text box": "3. 在提示詞中使用萬用字元,輸入檔案名稱或從萬用字元檔案檔案編輯器中複製文字。", + "Select a collection": "選擇選集", + "artists": "藝術家", + "devilkkw": "devilkkw", + "jumbo": "jumbo", + "nai": "nai", + "nsp": "nsp", + "parrotzone": "parrotzone", + "Copy collection": "複製選集", + "Overwrite existing": "覆寫既有", + "Refresh wildcards": "重新整理萬用字元", + "Delete all wildcards": "刪除全部萬用字元", + "Wildcards file": "萬用字元檔案", + "File editor": "檔案編輯器", + "Save wildcards": "儲存萬用字元", + "Ignore whitespace in prompts: All newlines, tabs, and multiple spaces are replaced by a single space": "忽略提示中的空格:所有換行符號、定位點和多個空格都會被替換為一個空格。", + "Save template to metadata: Write prompt template into the PNG metadata": "將範本保存至元數據:將提示詞範本寫入PNG數據中", + "Write prompts to file: Create a new .txt file for every batch containing the prompt template as well as the generated prompts.": "將提示寫入文件:為每個批次創建一個新的 .txt 文件,其中包含提示範本和產生的提示詞。", + "String to use as left bracket for parser variants, .e.g {variant1|variant2|variant3}": "作為解析器變體左括號的字符串,例如 {variant1|variant2|variant3}", + "String to use as right bracket for parser variants, .e.g {variant1|variant2|variant3}": "作為解析器變體右括號的字符串,例如 {variant1|variant2|variant3}", + "String to use as wrap for parser wildcard, .e.g __wildcard__": "用作解析萬用字元的字串命令,例如 __wildcard__", + "Limit Jinja prompts: Limit the number of prompts to batch_count * batch_size. The default is to generate batch_count * barch_size * number of prompts generated by Jinja": "將提示的數量限制為 batch_count * batch_size。預設是產生 batch_count * batch_size * Jinja 產生的提示數量。", + "sd-dynamic-prompts": "sd-dynamic-prompts", + "https://github.com/adieyal/sd-dynamic-prompts.git": "https://github.com/adieyal/sd-dynamic-prompts.git", + "Disable dynamic prompts by unchecking this box.": "取消勾選以停用動態提示詞", + "Instead of generating random prompts from a template, combinatorial generation produces every possible prompt from the given string.\nThe prompt 'I {love|hate} {New York|Chicago} in {June|July|August}' will produce 12 variants in total.\n\nThe value of the 'Seed' field is only used for the first image. To change this, look for 'Fixed seed' in the 'Advanced options' section.": "使用組合產生而非從範本產生隨機提示,會從給定的字串中產生所有可能的提示。\n例如提示字串'I {love|hate} {New York|Chicago} in {June|July|August}',會總共產生12種不同的提示。\n\n「Seed」欄位的值僅會用於生成第一張圖片。若要更改此值,請尋找「Advanced options」區塊中的「Fixed seed」選項。", + "Limit the maximum number of prompts generated. 0 (default) will generate all images. Useful to prevent an unexpected combinatorial explosion.": "限制產生的提示詞數量的上限,預設為0,表示產生所有圖像。這個選項可以防止過多的組合。", + "Re-run your combinatorial batch this many times with a different seed each time.": "這個選項表示用不同的種子重新運行組合產生的批次數據。在每次運行中,種子值都會改變,這樣可以產生不同的隨機順序或排列組合。", + "Magic Prompt adds interesting modifiers to your prompt for a little bit of extra spice.\nThe first time you use it, the MagicPrompt model is downloaded so be patient.\nIf you're running low on VRAM, you might get a CUDA error.": "魔法提示詞在提示詞中加入有趣的修飾,額外增添趣味。\n首次使用時會下載 MagicPrompt 模型,請耐心等待。\n在低 VRAM 情況下可能會導致 CUDA 報錯。", + "Controls the maximum length in tokens of the generated prompt.": "按標記數控制已產生的提示詞最大長度", + "Adjusts the generated prompt. You will need to experiment with this setting.": "調整已產生的提示詞,使用時要嘗試調整此設定", + "Regular expression pattern for blocking terms out of the generated prompt. Applied case-insensitively. For instance, to block both \"purple\" and \"interdimensional\", you could use the pattern \"purple|interdimensional\".": "用於排除提示詞語的表達式。忽略大小寫。例如,要封鎖 'purple' 和 'interdimensional',可以使用 'purple | interdimensional'", + "The number of prompts to generate per batch. Increasing this can speed up prompt generation at the expense of slightly increased VRAM usage.": "每個批次要產生的提示詞數量。增加此數量可以加快提示詞產生速度,但會略微增加 VRAM 的使用。", + "Uses the lexica.art API to create random prompts.\nThe prompt in the main prompt box is used as a search string.\nLeaving the prompt box blank returns a list of completely randomly chosen prompts.\nTry it out, it can be quite fun.": "用 lexica.art API 生成隨機提示詞\n提示詞框中的內容會作為搜索字串\n留空提示詞框會得到一組完全隨機選擇的提示詞\n用用看,它會很有趣", + "Randomly selects a keyword from the prompt and adds emphasis to it. Try this with Fixed Seed enabled.": "隨機強調提示詞中的一個關鍵詞,嘗試前要啟用固定隨機種子", + "Don't use prompt magic on negative prompts.": "不要對反向提示詞使用提示詞魔法。", + "Jinja2 templates are an expressive alternative to the standard syntax. See the Help section below for instructions.": "Jinja2 模板是標準語法富有表現力的一種替代品,相關說明參見下方幫助欄", + "Check this if you want to generate random prompts, even if your seed is fixed": "勾選此選項以在固定隨機種子的情況下依然產生隨機提示詞", + "Select this if you want to use the same seed for every generated image.\nThis is useful if you want to test prompt variations while using the same seed.\nIf there are no wildcards then all the images will be identical.": "勾選此選項以對每張產生的圖像用同樣的隨機種子。\n這在想用同樣的隨機種子測試提示詞變化時會有用。\n沒有萬用字元則所有圖像會相同。", + "Write the prompt template into the image metadata": "將提示詞範本寫入圖片數據。", + "Be sure to check the 'Write prompts to file' checkbox if you don't want to lose the generated prompts. Note, one image is still generated.": "不想失去產生的提示詞的話,需確保勾選 「將提示詞寫入檔案」。注意,依然會生成一張圖像。", + "The generated file is a slugified version of the prompt and can be found in the same directory as the generated images.\nE.g. in ./outputs/txt2img-images/.": "產生的檔案包含處理過的提示詞,和產生的圖像在同一目錄。\n例如 ./outputs/txt2img-images/", + "Complete documentation is available at https://github.com/adieyal/sd-dynamic-prompts. Please report any issues on GitHub.": "完整說明請在 https://github.com/adieyal/sd-dynamic-prompts 上取得。 任何問提請在 GitHub 上報告。", + "Generate all possible prompt combinations.": "產生所有可能的提示詞組合。", + "Automatically update your prompt with interesting modifiers. (Runs slowly the first time)": "使用有趣的修飾符自動更新你的提示詞。(第一次運行會比較慢)", + "Generate random prompts from lexica.art (your prompt is used as a search query).": "從 lexica.art 產生隨機提示詞(你的提示詞會被用作搜尋查詢)", + "Use the same seed for all prompts in this batch": "對這批次中的所有提示詞使用相同的種子", + "Write all generated prompts to a file": "將所有產生的提示詞寫入檔案", + "If this is set, then random prompts are generated, even if the seed is the same.": "如果設定了此項,則會產生隨機提示詞,即使種子相同。", + "Disable image generation. Useful if you only want to generate text prompts. (1 image will still be generated to keep Auto1111 happy.).": "停用圖像產生。這很有用,如果你只想產生文字提示。(仍將生成 1 張圖像以使 Auto1111 保持運行)", + "Add emphasis to a randomly selected keyword in the prompt.": "在提示詞中隨機選擇一個關鍵字加上強調符", + "Write template into image metadata.": "將範本寫入圖像中繼資料。", + "Note: Each model will download between 300mb and 1.4gb of data on first use.": "註記:每個模型第一次使用時會下載 300MB 到 1.4GB 的檔案。", + "= '": "= '", + "(0 = default (~14.6); maximum noise strength for k-diffusion noise schedule)": "(0 = default (~14.6); maximum noise strength for k-diffusion noise schedule)", + "(0 = default (7 for karras, 1 for polyexponential); higher values result in a more steep noise schedule (decreases faster))": "(0 = default (7 for karras, 1 for polyexponential); higher values result in a more steep noise schedule (decreases faster))", + "(0: disable, -1: show all images. Too many images can cause lag)": "(0: disable, -1: show all images. Too many images can cause lag)", + "10. Generate": "10. Generate", + "10th": "10th", + "1. Add URL and retrieve Model Info": "1. Add URL and retrieve Model Info", + "(1) Face Detection": "(1) Face Detection", + "1. Get Model Info by Civitai Url": "1. Get Model Info by Civitai Url", + "1. Get your key at": "1. Get your key at", + "1. Go to img2img tab": "1. Go to img2img tab", + "-1 means that it is calculated automatically. If both are -1, the size will be the same as the source size.": "-1 means that it is calculated automatically. If both are -1, the size will be the same as the source size.", + "1st": "1st", + "1. The tags common to all displayed images are shown in comma separated style.": "1. The tags common to all displayed images are shown in comma separated style.", + "(2) Crop and Resize the Faces": "(2) Crop and Resize the Faces", + "2D & 3D operator to move canvas left/right in pixels per frame": "2D & 3D operator to move canvas left/right in pixels per frame", + "2D & 3D operator to move canvas up/down in pixels per frame": "2D & 3D operator to move canvas up/down in pixels per frame", + "2D and 3D settings": "2D and 3D settings", + "2D operator that scales the canvas size, multiplicatively. [static = 1.0]": "2D operator that scales the canvas size, multiplicatively. [static = 1.0]", + "2D operator to rotate canvas clockwise/anticlockwise in degrees per frame": "2D operator to rotate canvas clockwise/anticlockwise in degrees per frame", + "2D or 3D animation_mode": "2D or 3D animation_mode", + "2nd": "2nd", + "2. Pick Subfolder and Model Version": "2. Pick Subfolder and Model Version", + "2. Refresh checkpoint list after authentication to get available checkpoints": "2. Refresh checkpoint list after authentication to get available checkpoints", + "2. Select [ebsynth utility] in the script combo box": "2. Select [ebsynth utility] in the script combo box", + "2. When changes are applied, all tags in each displayed images are replaced.": "2. When changes are applied, all tags in each displayed images are replaced.", + "360 Panorama to 3D": "360 Panorama to 3D", + "3D Fov settings:": "3D Fov settings:", + "3d glb": "3d glb", + "3D Mesh": "3D Mesh", + "3D Model": "3D 模型", + "3D operator to move canvas towards/away from view [speed set by FOV]": "3D operator to move canvas towards/away from view [speed set by FOV]", + "3D operator to pan canvas left/right in degrees per frame": "3D operator to pan canvas left/right in degrees per frame", + "3D operator to roll canvas clockwise/anticlockwise": "3D operator to roll canvas clockwise/anticlockwise", + "3D operator to tilt canvas up/down in degrees per frame": "3D operator to tilt canvas up/down in degrees per frame", + "3. Download Model": "3. Download Model", + "3D settings": "3D settings", + "3. Fill in the \"Project directory\" field with ": "3. Fill in the \"Project directory\" field with ", + "3. Generate without making your GPU go brrrr!": "3. Generate without making your GPU go brrrr!", + "3. If you change some tags into blank, they will be erased.": "3. If you change some tags into blank, they will be erased.", + "3rd": "3rd", + "(3) Recreate the Faces": "(3) Recreate the Faces", + "4. If you add some tags to the end, they will be added to the end/beginning of the text file.": "4. If you add some tags to the end, they will be added to the end/beginning of the text file.", + "(4) Paste the Faces": "(4) Paste the Faces", + "4. Select in the \"Mask Mode(Override img2img Mask mode)\" field with ": "4. Select in the \"Mask Mode(Override img2img Mask mode)\" field with ", + "4th": "4th", + "(5) Blend the entire image": "(5) Blend the entire image", + "5. Changes are not applied to the text files until the \"Save all changes\" button is pressed.": "5. Changes are not applied to the text files until the \"Save all changes\" button is pressed.", + "5. I recommend to fill in the \"Width\" field with ": "5. I recommend to fill in the \"Width\" field with ", + "5th": "5th", + "6. I recommend to fill in the \"Height\" field with ": "6. I recommend to fill in the \"Height\" field with ", + "6th": "6th", + "7. I recommend to fill in the \"Denoising strength\" field with lower than 0.35": "7. I recommend to fill in the \"Denoising strength\" field with lower than 0.35", + "7th": "7th", + "8. Fill in the remaining configuration fields of img2img. No image and mask settings are required.": "8. Fill in the remaining configuration fields of img2img. No image and mask settings are required.", + "8th": "8th", + "9. Drop any image onto the img2img main screen. This is necessary to avoid errors, but does not affect the results of img2img.": "9. Drop any image onto the img2img main screen. This is necessary to avoid errors, but does not affect the results of img2img.", + "9th": "9th", + "(A10) Primary": "(A10) Primary", + "(A1) Primary": "(A1) Primary", + "(A2) Primary": "(A2) Primary", + "(A3) Primary": "(A3) Primary", + "(A4) Primary": "(A4) Primary", + "(A5) Primary": "(A5) Primary", + "(A6) Primary": "(A6) Primary", + "(A7) Primary": "(A7) Primary", + "(A8) Primary": "(A8) Primary", + "(A9) Primary": "(A9) Primary", + "💡 About": "💡 About", + "About": "About", + "Abs": "Abs", + "Absolute": "Absolute", + "abs. path or url to audio file": "abs. path or url to audio file", + "abstract": "abstract", + "Abstract": "Abstract", + "Accelerate with OpenVINO": "Accelerate with OpenVINO", + "Accent": "Accent", + "Accent color": "Accent color", + "Accent Generate Button": "Accent Generate Button", + "Access results in ‘Open results’.": "Access results in ‘Open results’.", + "According to Live preview subject setting": "According to Live preview subject setting", + "Action for existing captions": "Action for existing captions", + "Action on existing caption": "Action on existing caption", + "Activate Selected Script": "Activate Selected Script", + "Activation keywords, comma-separated": "觸發提示詞,以逗號分隔", + "Active in img2img": "Active in img2img", + "Active in negative prompts": "Active in negative prompts", + "Active in third party textboxes": "Active in third party textboxes", + "Active in third party textboxes [Dataset Tag Editor] [Image Browser] [Tagger] [Multidiffusion Upscaler] (Requires restart)": "Active in third party textboxes [Dataset Tag Editor] [Image Browser] [Tagger] [Multidiffusion Upscaler] (Requires restart)", + "Active in txt2img": "Active in txt2img", + "Active Layer Only": "Active Layer Only", + "Active: peak amount of video memory used during generation (excluding cached data)": "Active: peak amount of video memory used during generation (excluding cached data)", + "A custom name to use when saving .ckpt and .pt files. Subdirectories will also be named this.": "A custom name to use when saving .ckpt and .pt files. Subdirectories will also be named this.", + "Adam Weight Decay": "Adam Weight Decay", + "AdamW Weight Decay": "AdamW Weight Decay", + "Adaptive (Gaussian)": "Adaptive (Gaussian)", + "Adaptive (Mean)": "Adaptive (Mean)", + "add": "add", + "➕ Add": "➕ Add", + "Add": "加入", + "Add additional prompting to the prefix, suffix and negative prompt in this screen. The actual prompt fields are ignored.": "Add additional prompting to the prefix, suffix and negative prompt in this screen. The actual prompt fields are ignored.", + "Add additional prompts to the head": "Add additional prompts to the head", + "Add ALL Displayed": "Add ALL Displayed", + "add APIs": "add APIs", "Add a prompt prefix, suffix and the negative prompt in the respective fields. They will be automatically added during processing.": "Add a prompt prefix, suffix and the negative prompt in the respective fields. They will be automatically added during processing.", - "Filter values": "Filter values", - "You can put comma seperated values here, those will be ignored from any list processing. For example, adding \"\"film grain, sepia\"\", will make these values not appear during generation.": "You can put comma seperated values here, those will be ignored from any list processing. For example, adding \"\"film grain, sepia\"\", will make these values not appear during generation.", - "For advanced users, you can create a permanent file in \\OneButtonPrompt\\userfiles\\ called antilist.csv": "For advanced users, you can create a permanent file in \\OneButtonPrompt\\userfiles\\ called antilist.csv", - "This way, you don’t ever have to add it manually again. This file won’t be overwritten during upgrades.": "This way, you don’t ever have to add it manually again. This file won’t be overwritten during upgrades.", - "Idea by redditor jonesaid.": "Idea by redditor jonesaid.", - "Workflow mode, turns off prompt generation and uses below Workflow prompt instead.": "Workflow mode, turns off prompt generation and uses below Workflow prompt instead.", - "Workflow prompt": "Workflow prompt", - "Workflow assist, suggestions by redditor Woisek.": "Workflow assist, suggestions by redditor Woisek.", - "With Workflow mode, you turn off the automatic generation of new prompts on ‘generate’, and it will use the Workflow prompt field instead. So you can work and finetune any fun prompts without turning of the script.": "With Workflow mode, you turn off the automatic generation of new prompts on ‘generate’, and it will use the Workflow prompt field instead. So you can work and finetune any fun prompts without turning of the script.", - "Below here, you can generate a set of random prompts, and send them to the Workflow prompt field. The generation of the prompt uses the settings in the Main tab.": "Below here, you can generate a set of random prompts, and send them to the Workflow prompt field. The generation of the prompt uses the settings in the Main tab.", - "Generate me some prompts!": "Generate me some prompts!", - "prompt 1": "prompt 1", - "Send prompt up": "Send prompt up", - "prompt 2": "prompt 2", - "prompt 3": "prompt 3", - "prompt 4": "prompt 4", - "prompt 5": "prompt 5", - "Prompt compounder": "Prompt compounder", - "Prompt seperator": "Prompt seperator", - "Prompt seperator mode": "Prompt seperator mode", - "Normally, it creates a single random prompt. With prompt compounder, it will generate multiple prompts and compound them together.": "Normally, it creates a single random prompt. With prompt compounder, it will generate multiple prompts and compound them together.", - "Keep at 1 for normal behavior.": "Keep at 1 for normal behavior.", - "Set to different values to compound that many prompts together. My suggestion is to try 2 first.": "Set to different values to compound that many prompts together. My suggestion is to try 2 first.", - "This was originally a bug in the first release when using multiple batches, now brought back as a feature.": "This was originally a bug in the first release when using multiple batches, now brought back as a feature.", - "Raised by redditor drone2222, to bring this back as a toggle, since it did create interesting results. So here it is.": "Raised by redditor drone2222, to bring this back as a toggle, since it did create interesting results. So here it is.", - "You can toggle the separator mode. Standardly this is a comma, but you can choose an AND or a BREAK.": "You can toggle the separator mode. Standardly this is a comma, but you can choose an AND or a BREAK.", - "You can also choose the prompt seperator mode for use with Latent Couple extension": "You can also choose the prompt seperator mode for use with Latent Couple extension", - "Example flow:": "Example flow:", - "Set the Latent Couple extension to 2 area’s (standard setting)": "Set the Latent Couple extension to 2 area’s (standard setting)", - "In the main tab, set the subject to humanoids": "In the main tab, set the subject to humanoids", - "In the prefix prompt field then add for example: Art by artistname, 2 people": "In the prefix prompt field then add for example: Art by artistname, 2 people", - "Set the prompt compounder to: 2": "Set the prompt compounder to: 2", - "Set the Prompt seperator to: AND": "Set the Prompt seperator to: AND", - "Set the Prompt Seperator mode to: prefix AND prompt + suffix": "Set the Prompt Seperator mode to: prefix AND prompt + suffix", - "“automatic” is entirely build around Latent Couple. It will pass artists and the amount of people/animals/objects to generate in the prompt automatically. Set the prompt compounder equal to the amount of areas defined in Laten Couple.": "“automatic” is entirely build around Latent Couple. It will pass artists and the amount of people/animals/objects to generate in the prompt automatically. Set the prompt compounder equal to the amount of areas defined in Laten Couple.", - "Leave the prompt field empty": "Leave the prompt field empty", - "Set the Prompt Seperator mode to: automatic": "Set the Prompt Seperator mode to: automatic", - "TXT2IMG": "TXT2IMG", - "Start WebUi with option --api for this to work.": "Start WebUi with option --api for this to work.", - "Start generating and upscaling!": "Start generating and upscaling!", - "Don't generate, only upscale": "Don't generate, only upscale", - "Only upscale will not use txt2img to generate an image.": "Only upscale will not use txt2img to generate an image.", - "Instead it will pick up all files in the \\upscale_me\\ folder and upscale them with below settings.": "Instead it will pick up all files in the \\upscale_me\\ folder and upscale them with below settings.", + "Add a spaces after comma": "Add a spaces after comma", + "Add a '/' to the beginning of directory buttons": "Add a '/' to the beginning of directory buttons", + "add audio to video from file/url or init video": "add audio to video from file/url or init video", + "Add background image": "Add background image", + "Add Background image": "加入背景圖像", + "Add Background Image": "Add Background Image", + "Add Blur": "Add Blur", + "Add Custom Mappings": "Add Custom Mappings", + "Add Detail": "Add Detail", + "Add difference:A+(B-C)*alpha": "Add difference:A+(B-C)*alpha", + "Added": "Added", + "Additional cli arguments to pass to ComfyUI (requires reload UI. Do NOT prepend --comfyui-, these are directly forwarded to comfyui)": "Additional cli arguments to pass to ComfyUI (requires reload UI. Do NOT prepend --comfyui-, these are directly forwarded to comfyui)", + "Additional components": "Additional components", + "Additional description": "Additional description", + "Additional Generation Info": "Additional Generation Info", + "Additional Networks": "附加網路(LoRA擴充功能)", + "Additional options": "Additional options", + "Additional tags (comma split)": "Additional tags (comma split)", + "Additional tags (split by comma)": "Additional tags (split by comma)", + "Add last frame to keyframes": "Add last frame to keyframes", + "Add LyCORIS to prompt": "Add LyCORIS to prompt", + "Add mask by sketch": "Add mask by sketch", + "Add model hash to infotext": "Add model hash to infotext", + "Add model name to infotext": "Add model name to infotext", + "AddNet Model 1": "[附加網絡] 模型 1️⃣", + "AddNet Model 2": "[附加網路] 模型 2️⃣", + "AddNet Model 3": "[附加網路] 模型 3️⃣", + "AddNet Model 4": "[附加網路] 模型 4️⃣", + "AddNet Model 5": "[附加網路] 模型 5️⃣", + "AddNet TEnc Weight 1": "[附加網路] 文字編碼器權重 1️⃣", + "AddNet TEnc Weight 2": "[附加網路] 文字編碼器權重 2️⃣", + "AddNet TEnc Weight 3": "[附加網路] 文字編碼器權重 3️⃣", + "AddNet TEnc Weight 4": "[附加網路] 文字編碼器權重 4️⃣", + "AddNet TEnc Weight 5": "[附加網路] 文字編碼器權重 5️⃣", + "AddNet UNet Weight 1": "[附加網路] UNet 權重 1️⃣", + "AddNet UNet Weight 2": "[附加網路] UNet 權重 2️⃣", + "AddNet UNet Weight 3": "[附加網路] UNet 權重 3️⃣", + "AddNet UNet Weight 4": "[附加網路] UNet 權重 4️⃣", + "AddNet UNet Weight 5": "[附加網路] UNet 權重 5️⃣", + "AddNet Weight 1": "[附加網路] 權重 1️⃣", + "AddNet Weight 2": "[附加網路] 權重 2️⃣", + "AddNet Weight 3": "[附加網路] 權重 3️⃣", + "AddNet Weight 4": "[附加網路] 權重 4️⃣", + "AddNet Weight 5": "[附加網路] 權重 5️⃣", + "Add new prompts": "Add new prompts", + "Add N to seed when repeating": "Add N to seed when repeating", + "Add number suffix": "Add number suffix", + "add_preview_prompt_button": "add_preview_prompt_button", + "Add program version to infotext": "Add program version to infotext", + "add prompt by image": "add prompt by image", + "Add/Remove...": "Add/Remove...", + "Address": "Address", + "Address of the ComfyUI server as seen from the webui. Only used by the extension to load the ComfyUI iframe (requires reload UI)": "Address of the ComfyUI server as seen from the webui. Only used by the extension to load the ComfyUI iframe (requires reload UI)", + "Add Reverse Frame": "Add Reverse Frame", + "Add selection [Enter]": "Add selection [Enter]", + "Add Smart-Steps minimum step and ToMe merging ratio value to generation information.": "Add Smart-Steps minimum step and ToMe merging ratio value to generation information.", + "Add soundtrack": "Add soundtrack", + "'add_soundtrack' and 'soundtrack_path' aren't being honoured in \"Interpolate an existing video\" mode. Original vid audio will be used instead with the same slow-mo rules above.": "'add_soundtrack' and 'soundtrack_path' aren't being honoured in \"Interpolate an existing video\" mode. Original vid audio will be used instead with the same slow-mo rules above.", + "Add to / replace in saved directories": "加入至或取代已儲存的目錄", + "Add to Sequence X": "Add to Sequence X", + "Add to Sequence Y": "Add to Sequence Y", + "add_trigger_words_button": "add_trigger_words_button", + "Add trigger words to prompt": "Add trigger words to prompt", + "Add user name to infotext when authenticated": "Add user name to infotext when authenticated", + "Add VAE hash to infotext": "Add VAE hash to infotext", + "Add VAE name to infotext": "Add VAE name to infotext", + "Add Vectorscope CC parameters to generation information": "Add Vectorscope CC parameters to generation information", + "Add weights to Sequence X": "Add weights to Sequence X", + "ADetailer CFG scale": "ADetailer CFG scale", + "ADetailer CFG scale 10th": "ADetailer CFG scale 10th", + "ADetailer CFG scale 2nd": "ADetailer CFG scale 2nd", + "ADetailer CFG scale 3rd": "ADetailer CFG scale 3rd", + "ADetailer CFG scale 4th": "ADetailer CFG scale 4th", + "ADetailer CFG scale 5th": "ADetailer CFG scale 5th", + "ADetailer CFG scale 6th": "ADetailer CFG scale 6th", + "ADetailer CFG scale 7th": "ADetailer CFG scale 7th", + "ADetailer CFG scale 8th": "ADetailer CFG scale 8th", + "ADetailer CFG scale 9th": "ADetailer CFG scale 9th", + "ADetailer checkpoint": "ADetailer checkpoint", + "ADetailer checkpoint 10th": "ADetailer checkpoint 10th", + "ADetailer checkpoint 2nd": "ADetailer checkpoint 2nd", + "ADetailer checkpoint 3rd": "ADetailer checkpoint 3rd", + "ADetailer checkpoint 4th": "ADetailer checkpoint 4th", + "ADetailer checkpoint 5th": "ADetailer checkpoint 5th", + "ADetailer checkpoint 6th": "ADetailer checkpoint 6th", + "ADetailer checkpoint 7th": "ADetailer checkpoint 7th", + "ADetailer checkpoint 8th": "ADetailer checkpoint 8th", + "ADetailer checkpoint 9th": "ADetailer checkpoint 9th", + "ADetailer CLIP skip": "ADetailer CLIP skip", + "ADetailer CLIP skip 10th": "ADetailer CLIP skip 10th", + "ADetailer CLIP skip 2nd": "ADetailer CLIP skip 2nd", + "ADetailer CLIP skip 3rd": "ADetailer CLIP skip 3rd", + "ADetailer CLIP skip 4th": "ADetailer CLIP skip 4th", + "ADetailer CLIP skip 5th": "ADetailer CLIP skip 5th", + "ADetailer CLIP skip 6th": "ADetailer CLIP skip 6th", + "ADetailer CLIP skip 7th": "ADetailer CLIP skip 7th", + "ADetailer CLIP skip 8th": "ADetailer CLIP skip 8th", + "ADetailer CLIP skip 9th": "ADetailer CLIP skip 9th", + "ADetailer confidence threshold %": "ADetailer confidence threshold %", + "ADetailer denoising strength": "ADetailer denoising strength", + "ADetailer erosion (-) / dilation (+)": "ADetailer erosion (-) / dilation (+)", + "ADetailer mask blur": "ADetailer mask blur", + "ADetailer model": "ADetailer model", + "ADetailer model 10th": "ADetailer model 10th", + "ADetailer model 2nd": "ADetailer model 2nd", + "ADetailer model 3rd": "ADetailer model 3rd", + "ADetailer model 4th": "ADetailer model 4th", + "ADetailer model 5th": "ADetailer model 5th", + "ADetailer model 6th": "ADetailer model 6th", + "ADetailer model 7th": "ADetailer model 7th", + "ADetailer model 8th": "ADetailer model 8th", + "ADetailer model 9th": "ADetailer model 9th", + "ADetailer negative prompt": "ADetailer negative prompt", + "ADetailer negative prompt 10th\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 10th\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt 2nd": "ADetailer negative prompt 2nd", + "ADetailer negative prompt 2nd\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 2nd\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt 3rd": "ADetailer negative prompt 3rd", + "ADetailer negative prompt 3rd\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 3rd\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt 4th": "ADetailer negative prompt 4th", + "ADetailer negative prompt 4th\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 4th\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt 5th": "ADetailer negative prompt 5th", + "ADetailer negative prompt 5th\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 5th\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt 6th\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 6th\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt 7th\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 7th\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt 8th\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 8th\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt 9th\nIf blank, the main negative prompt is used.": "ADetailer negative prompt 9th\nIf blank, the main negative prompt is used.", + "ADetailer negative prompt\nIf blank, the main negative prompt is used.": "ADetailer negative prompt\nIf blank, the main negative prompt is used.", + "ADetailer prompt": "ADetailer prompt", + "ADetailer prompt 10th\nIf blank, the main prompt is used.": "ADetailer prompt 10th\nIf blank, the main prompt is used.", + "ADetailer prompt 2nd": "ADetailer prompt 2nd", + "ADetailer prompt 2nd\nIf blank, the main prompt is used.": "ADetailer prompt 2nd\nIf blank, the main prompt is used.", + "ADetailer prompt 3rd": "ADetailer prompt 3rd", + "ADetailer prompt 3rd\nIf blank, the main prompt is used.": "ADetailer prompt 3rd\nIf blank, the main prompt is used.", + "ADetailer prompt 4th": "ADetailer prompt 4th", + "ADetailer prompt 4th\nIf blank, the main prompt is used.": "ADetailer prompt 4th\nIf blank, the main prompt is used.", + "ADetailer prompt 5th": "ADetailer prompt 5th", + "ADetailer prompt 5th\nIf blank, the main prompt is used.": "ADetailer prompt 5th\nIf blank, the main prompt is used.", + "ADetailer prompt 6th\nIf blank, the main prompt is used.": "ADetailer prompt 6th\nIf blank, the main prompt is used.", + "ADetailer prompt 7th\nIf blank, the main prompt is used.": "ADetailer prompt 7th\nIf blank, the main prompt is used.", + "ADetailer prompt 8th\nIf blank, the main prompt is used.": "ADetailer prompt 8th\nIf blank, the main prompt is used.", + "ADetailer prompt 9th\nIf blank, the main prompt is used.": "ADetailer prompt 9th\nIf blank, the main prompt is used.", + "ADetailer prompt\nIf blank, the main prompt is used.": "ADetailer prompt\nIf blank, the main prompt is used.", + "ADetailer sampler": "ADetailer sampler", + "ADetailer sampler 10th": "ADetailer sampler 10th", + "ADetailer sampler 2nd": "ADetailer sampler 2nd", + "ADetailer sampler 3rd": "ADetailer sampler 3rd", + "ADetailer sampler 4th": "ADetailer sampler 4th", + "ADetailer sampler 5th": "ADetailer sampler 5th", + "ADetailer sampler 6th": "ADetailer sampler 6th", + "ADetailer sampler 7th": "ADetailer sampler 7th", + "ADetailer sampler 8th": "ADetailer sampler 8th", + "ADetailer sampler 9th": "ADetailer sampler 9th", + "ADetailer steps": "ADetailer steps", + "ADetailer steps 10th": "ADetailer steps 10th", + "ADetailer steps 2nd": "ADetailer steps 2nd", + "ADetailer steps 3rd": "ADetailer steps 3rd", + "ADetailer steps 4th": "ADetailer steps 4th", + "ADetailer steps 5th": "ADetailer steps 5th", + "ADetailer steps 6th": "ADetailer steps 6th", + "ADetailer steps 7th": "ADetailer steps 7th", + "ADetailer steps 8th": "ADetailer steps 8th", + "ADetailer steps 9th": "ADetailer steps 9th", + "ADetailer VAE": "ADetailer VAE", + "ADetailer VAE 10th": "ADetailer VAE 10th", + "ADetailer VAE 2nd": "ADetailer VAE 2nd", + "ADetailer VAE 3rd": "ADetailer VAE 3rd", + "ADetailer VAE 4th": "ADetailer VAE 4th", + "ADetailer VAE 5th": "ADetailer VAE 5th", + "ADetailer VAE 6th": "ADetailer VAE 6th", + "ADetailer VAE 7th": "ADetailer VAE 7th", + "ADetailer VAE 8th": "ADetailer VAE 8th", + "ADetailer VAE 9th": "ADetailer VAE 9th", + "ADetailer x(→) offset": "ADetailer x(→) offset", + "ADetailer y(↑) offset": "ADetailer y(↑) offset", + "A directory or a file": "A directory or a file", + "adjust denoise each img2img batch": "adjust denoise each img2img batch", + "Adjust settings": "Adjust settings", + "adjusts the aspect ratio for the depth calculation (normally 1)": "adjusts the aspect ratio for the depth calculation (normally 1)", + "adjusts the aspect ratio for the depth calculations": "adjusts the aspect ratio for the depth calculations", + "adjusts the overall contrast per frame [default neutral at 1.0]": "adjusts the overall contrast per frame [default neutral at 1.0]", + "adjusts the overall contrast per frame [neutral at 1.0, recommended to *not* play with this param]": "adjusts the overall contrast per frame [neutral at 1.0, recommended to *not* play with this param]", + "adjusts the scale at which the canvas is moved in 3D by the translation_z value. [maximum range -180 to +180, with 0 being undefined. Values closer to 180 will make the image have less depth, while values closer to 0 will allow more depth]": "adjusts the scale at which the canvas is moved in 3D by the translation_z value. [maximum range -180 to +180, with 0 being undefined. Values closer to 180 will make the image have less depth, while values closer to 0 will allow more depth]", + "adjust the brightness of the mask. Should be a positive number, with 1.0 meaning no adjustment.": "adjust the brightness of the mask. Should be a positive number, with 1.0 meaning no adjustment.", + "Adjust tilt for detected faces": "Adjust tilt for detected faces", + "Admirable": "Admirable", + "Advanced editing": "Advanced editing", + "Advanced Options": "Advanced Options", + "Advanced Settings": "Advanced Settings", + "Advertising": "Advertising", + "Affected areas": "Affected areas", + "Affects optical flow hybrid motion. 1 is normal flow. -1 is negative flow. 0.5 is half flow, etc...": "Affects optical flow hybrid motion. 1 is normal flow. -1 is negative flow. 0.5 is half flow, etc...", + "Affine": "Affine", + "A file on the same machine where the server is running.": "A file on the same machine where the server is running.", + "After": "After", + "After Generation": "After Generation", + "After recompiling, later inferences will reuse the newly compiled model and achieve faster running times.": "After recompiling, later inferences will reuse the newly compiled model and achieve faster running times.", + "A higher temperature will produce more diverse results, but with a higher risk of less coherent text": "A higher temperature will produce more diverse results, but with a higher risk of less coherent text", + "Alien": "Alien", + "All Displayed Ones": "All Displayed Ones", + "all filters": "all filters", + "all - force multiple --> idea by redditor WestWordHoeDown, it forces to choose between 2 and 3 image types": "all - force multiple --> idea by redditor WestWordHoeDown, it forces to choose between 2 and 3 image types", + "All models in this directory will receive the selected model's metadata": "此目錄下的所有模型將被寫入所選模型的中繼資料", + "all --> normally picks a image type as random. Can choose a ‘other’ more unique type.": "all --> normally picks a image type as random. Can choose a ‘other’ more unique type.", + "Allow All": "Allow All", + "Allow img2img": "Allow img2img", + "Allow manually uploading pnginfo from the uploaded plugin image to txt2img or img2img.": "Allow manually uploading pnginfo from the uploaded plugin image to txt2img or img2img.", + "Allow NSFW": "Allow NSFW", + "allow overwrite": "allow overwrite", + "Allow overwrite output-model": "Allow overwrite output-model", + "Allow Painting": "Allow Painting", + "Allow Post Processing": "Allow Post Processing", + "Allows for random parameters during txt2img generation. This script is processed for all generations, regardless of the script selected, meaning this script will function with others as well, such as AUTOMATIC1111/stable-diffusion-webui-wildcards": "Allows for random parameters during txt2img generation. This script is processed for all generations, regardless of the script selected, meaning this script will function with others as well, such as AUTOMATIC1111/stable-diffusion-webui-wildcards", + "allows keyframing different samplers. Use names as they appear in ui dropdown in 'run' tab": "allows keyframing different samplers. Use names as they appear in ui dropdown in 'run' tab", + "allows keyframing different sd models. use *full* name as appears in ui dropdown": "allows keyframing different sd models. use *full* name as appears in ui dropdown", + "allows keyframing different sd models. Use *full* name as appears in ui dropdown": "allows keyframing different sd models. Use *full* name as appears in ui dropdown", + "allows keyframing of samplers. Use names as they appear in ui dropdown in 'run' tab": "allows keyframing of samplers. Use names as they appear in ui dropdown in 'run' tab", + "Allows the model to learn brightness and contrast with greater detail during training. Value controls the strength of the effect, 0 disables it.": "Allows the model to learn brightness and contrast with greater detail during training. Value controls the strength of the effect, 0 disables it.", + "Allow TF16 reduced precision math ops": "Allow TF16 reduced precision math ops", + "Allow Unsafe IP Address": "Allow Unsafe IP Address", + "🔄 All Reset": "🔄 All Reset", + "All string sanitization logic has been moved into the": "All string sanitization logic has been moved into the", + "alpha": "alpha", + "Alphabetical Order": "Alphabetical Order", + "Alpha channel image": "Alpha channel image", + "Alpha (default is half of Net dim)": "Alpha (default is half of Net dim)", + "Alpha matting": "Alpha matting", + "alpha threshold": "alpha threshold", + "Alpha threshold": "Alpha threshold", + "Also add data for WebUI metadata editor": "Also add data for WebUI metadata editor", + "also delete off-screen images": "also delete off-screen images", + "also enable wierd blocky upscale mode": "also enable wierd blocky upscale mode", + "Also output single joined audio file (will be named _joined.wav)": "Also output single joined audio file (will be named _joined.wav)", + "alternate": "alternate", + "Alternatively, if one wants to keep faraway details, one may instead make them smaller and closer than they physically would be (in order to fit nicely into the limited depth space).": "Alternatively, if one wants to keep faraway details, one may instead make them smaller and closer than they physically would be (in order to fit nicely into the limited depth space).", + "Alternatively, use": "或者,您也可以使用", + "Alternatively, you can enable": "Alternatively, you can enable", + "Always append space if inserting at the end of the textbox": "Always append space if inserting at the end of the textbox", + "Always bodysnatch responsibly.": "Always bodysnatch responsibly.", + "Always Display Buttons": "Always Display Buttons", + "Always Display Buttons on model cards": "Always Display Buttons on model cards", + "Always fully": "Always fully", + "A merger of the two checkpoints will be generated in your": "A merger of the two checkpoints will be generated in your", + "(amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2)": "(amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2)", + "amount of graininess to add per frame for diffusion diversity": "amount of graininess to add per frame for diffusion diversity", "Amount of images to generate": "Amount of images to generate", - "Size to generate": "Size to generate", - "CFG": "提示詞相關性", - "hires. fix": "hires. fix", - "Denoise strength": "Denoise strength", - "model to use": "model to use", - "hires upscaler": "hires upscaler", - "Quality Gate": "Quality Gate", - "Uses aesthetic image scorer extension to check the quality of the image.": "Uses aesthetic image scorer extension to check the quality of the image.", - "Once turned on, it will retry for n amount of times to get an image with the quality score. If not, it will take the best image so far and continue.": "Once turned on, it will retry for n amount of times to get an image with the quality score. If not, it will take the best image so far and continue.", - "Idea and inspiration by xKean.": "Idea and inspiration by xKean.", - "Quality": "Quality", + "amount of presence of previous frame to influence next frame, also controls steps in the following formula [steps - (strength_schedule * steps)]": "amount of presence of previous frame to influence next frame, also controls steps in the following formula [steps - (strength_schedule * steps)]", "Amount of tries": "Amount of tries", - "IMG2IMG upscale": "IMG2IMG upscale", - "Upscale image with IMG2IMG": "Upscale image with IMG2IMG", + "Amount schedule": "Amount schedule", "Amount times to repeat upscaling with IMG2IMG (loopback)": "Amount times to repeat upscaling with IMG2IMG (loopback)", - "img2img Sampling steps": "img2img Sampling steps", + "Amplification of distortion of circles": "Amplification of distortion of circles", + "Analog Film": "Analog Film", + "Analyse": "Analyse", + "Analyse Face": "Analyse Face", + "Analysis": "Analysis", + "Analysis Mode": "Analysis Mode", + "Analyze": "Analyze", + "Analyzes video frames for camera motion and applies movement to render.": "Analyzes video frames for camera motion and applies movement to render.", + "Ancestral ETA Schedule": "Ancestral ETA Schedule", + "AND": "AND", + "and drop a star if you like it!": "and drop a star if you like it!", + "and generate images according to random segmentation which preserve image layout.": "and generate images according to random segmentation which preserve image layout.", + "and report your problem.": "and report your problem.", + "and restart webui, and enjoy the joy of creation!": "and restart webui, and enjoy the joy of creation!", + ", and the underlying": ", and the underlying", + "and upscale to the size of the original video.": "and upscale to the size of the original video.", + "A negative prompt to use when generating class images. Can be empty.": "A negative prompt to use when generating class images. Can be empty.", + "A negative prompt to use when generating preview images.": "A negative prompt to use when generating preview images.", + "Angle": "Angle", + "angular": "angular", + "animal": "animal", + "animal - A random (fictional) animal. Has a chance to have human characteristics, such as clothing added.": "animal - A random (fictional) animal. Has a chance to have human characteristics, such as clothing added.", + "Animation mode": "動畫模式", + "anime": "anime", + "Anime": "Anime", + "Anime-inclined great guide (by FizzleDorf) with lots of examples:": "充滿動漫風格的詳細指南(由 FizzleDorf 製作),內含許多範例:", + "anime key visual": "anime key visual", + "Anime Remove Background": "Anime Remove Background", + "Anime Style (Up Detection, Down mask Quality)": "Anime Style (Up Detection, Down mask Quality)", + "Annotator resolution": "Annotator resolution", + "📣 Announcements": "📣 Announcements", + "Answer": "Answer", + "Anti Blur": "Anti Blur", + "API endpoint": "API endpoint", + "API Endpoint": "API Endpoint", + "API info may not be necessary for some boorus, but certain information or posts may fail to load without it. For example, Danbooru doesn't show certain posts in search results unless you auth as a Gold tier member.": "API info may not be necessary for some boorus, but certain information or posts may fail to load without it. For example, Danbooru doesn't show certain posts in search results unless you auth as a Gold tier member.", + "API key": "API key", + "API key for authenticating with Civitai. This is required to download some models. See Wiki for more details.": "API key for authenticating with Civitai. This is required to download some models. See Wiki for more details.", + "API Keys": "API Keys", + "Append": "Append", + "Append Artist tags from CLIP": "Append Artist tags from CLIP", + "Append Caption to File Name": "Append Caption to File Name", + "Append commas": "Append commas", + "Append DeepDanbooru to Caption": "Append DeepDanbooru to Caption", + "Append Flavor tags from CLIP": "Append Flavor tags from CLIP", + "Append Hires prompts to the end of the original prompts instead of replacing it.": "Append Hires prompts to the end of the original prompts instead of replacing it.", + "Append Medium tags from CLIP": "Append Medium tags from CLIP", + "Append prompts, not replace": "Append prompts, not replace", + "Append space on tag autocompletion": "Append space on tag autocompletion", + "Apply all selected styles to prompts.": "Apply all selected styles to prompts.", + "Apply and quit": "Apply and quit", + "Apply block weight from text": "Apply block weight from text", + "Apply button loads settings\nWrite custom name to enable save\nDelete automatically will save to file": "Apply button loads settings\nWrite custom name to enable save\nDelete automatically will save to file", + "Apply changes & restart server": "Apply changes & restart server", + "Apply changes to ALL displayed images": "Apply changes to ALL displayed images", + "Apply changes to filtered images": "Apply changes to filtered images", + "Apply changes to selected image": "Apply changes to selected image", + "Apply color correction to img2img results to match original colors": "Apply color correction to img2img results to match original colors", + "apply colormatch before adding noise (use with CN's Tile)": "apply colormatch before adding noise (use with CN's Tile)", + "Apply data": "Apply data", + "Apply horizontal Flip": "Apply horizontal Flip", + "Apply inside mask only": "Apply inside mask only", + "Apply LoRA checkpoint to TensorRT model": "Apply LoRA checkpoint to TensorRT model", + "Apply mask to original image": "Apply mask to original image", + "Apply mask to the Ref Image": "Apply mask to the Ref Image", + "Apply mask to the result": "Apply mask to the result", + "Apply only selected scripts to ADetailer": "Apply only selected scripts to ADetailer", + "Apply (Reload UI)": "Apply (Reload UI)", + "Apply scripts to faces": "Apply scripts to faces", + "Apply selection filter": "Apply selection filter", + "💾 Apply settings": "💾 Apply settings", + "Apply Settings": "Apply Settings", + "Apply Style": "Apply Style", + "Apply to": "Apply to", + "Apply transfer control when loading models": "Apply transfer control when loading models", + "app_started_callback": "app_started_callback", + "A prompt describing the subject. Use [Filewords] to parse image filename/.txt to insert existing prompt here.": "A prompt describing the subject. Use [Filewords] to parse image filename/.txt to insert existing prompt here.", + "A prompt for generating classification/regularization images. See the readme for more info.": "A prompt for generating classification/regularization images. See the readme for more info.", + "A prompt used to generate a 'baseline' image that will be created with other samples to verify model fidelity.": "A prompt used to generate a 'baseline' image that will be created with other samples to verify model fidelity.", + "a random seed will be used on each frame of the animation": "a random seed will be used on each frame of the animation", + "Architectural": "Architectural", + "architecture": "architecture", + "Architecture": "Architecture", + "Area": "Area", + "Area (large to small)": "Area (large to small)", + "Arguments are case-sensitive.": "Arguments are case-sensitive.", + "Arm Length": "Arm Length", + "art deco": "art deco", + "Artistic": "Artistic", + "Artists have a major impact on the result. Automatically, it will select between 0-3 artists out of 3483 artists for your prompt.": "Artists have a major impact on the result. Automatically, it will select between 0-3 artists out of 3483 artists for your prompt.", + "art nouveau": "art nouveau", + "Arxiv": "Arxiv", + "as a UI to define your animation schedules (see the Parseq section in the Init tab).": "as a UI to define your animation schedules (see the Parseq section in the Init tab).", + "Ascending": "Ascending", + "A setting of 1 will cause every frame to receive diffusion in the sequence of image outputs. A setting of 2 will only diffuse on every other frame, yet motion will still be in effect. The output of images during the cadence sequence will be automatically blended, additively and saved to the specified drive. This may improve the illusion of coherence in some workflows as the content and context of an image will not change or diffuse during frames that were skipped. Higher values of 4-8 cadence will skip over a larger amount of frames and only diffuse the “Nth” frame as set by the diffusion_cadence value. This may produce more continuity in an animation, at the cost of little opportunity to add more diffused content. In extreme examples, motion within a frame will fail to produce diverse prompt context, and the space will be filled with lines or approximations of content - resulting in unexpected animation patterns and artifacts. Video Input & Interpolation modes are not affected by diffusion_cadence.": "A setting of 1 will cause every frame to receive diffusion in the sequence of image outputs. A setting of 2 will only diffuse on every other frame, yet motion will still be in effect. The output of images during the cadence sequence will be automatically blended, additively and saved to the specified drive. This may improve the illusion of coherence in some workflows as the content and context of an image will not change or diffuse during frames that were skipped. Higher values of 4-8 cadence will skip over a larger amount of frames and only diffuse the “Nth” frame as set by the diffusion_cadence value. This may produce more continuity in an animation, at the cost of little opportunity to add more diffused content. In extreme examples, motion within a frame will fail to produce diverse prompt context, and the space will be filled with lines or approximations of content - resulting in unexpected animation patterns and artifacts. Video Input & Interpolation modes are not affected by diffusion_cadence.", + "Aspect Ratio schedule": "Aspect Ratio schedule", + "Aspect Ratios Dropdown": "Aspect Ratios Dropdown", + "Attempt to automatically set training parameters based on total VRAM. Still under development.": "Attempt to automatically set training parameters based on total VRAM. Still under development.", + "Audio (if provided) will *not* be transferred to the interpolated video if Slow-Mo is enabled.": "Audio (if provided) will *not* be transferred to the interpolated video if Slow-Mo is enabled.", + "Audio Player 0": "Audio Player 0", + "Audio Player 1": "Audio Player 1", + "Audio Player 2": "Audio Player 2", + "Audio Player 3": "Audio Player 3", + "Audio Player 4": "Audio Player 4", + "Audio Player 5": "Audio Player 5", + "Audio Player 6": "Audio Player 6", + "Audio Player 7": "Audio Player 7", + "Author": "作者", + "Author of this model": "此模型的作者", + "auto": "auto", + "AUTO": "AUTO", + "autocast": "autocast", + "Autocomplete options": "Autocomplete options", + "Autocontrast low/high cutoff schedules 0-100. Low 0 High 100 is full range.": "Autocontrast low/high cutoff schedules 0-100. Low 0 High 100 is full range.", + "Auto-contrasts the mask for the composite. If enabled, uses the low/high autocontrast cutoff schedules.": "Auto-contrasts the mask for the composite. If enabled, uses the low/high autocontrast cutoff schedules.", + "auto-delete imgs when video is ready": "auto-delete imgs when video is ready", + "auto-delete imgs when video is ready. Will break Resume from timestring!": "auto-delete imgs when video is ready. Will break Resume from timestring!", + "auto-delete inputframes (incl CN ones) when video is ready": "auto-delete inputframes (incl CN ones) when video is ready", + "Autodetect": "Autodetect", + "Auto detect size from img2img": "Auto detect size from img2img", + "Auto det_size : Will load model twice and test faces on each if needed (old behaviour). Takes more VRAM. Precedence over fixed det_size": "Auto det_size : Will load model twice and test faces on each if needed (old behaviour). Takes more VRAM. Precedence over fixed det_size", + "Auto enable when loading the PNG Info of a generation that used FreeU": "Auto enable when loading the PNG Info of a generation that used FreeU", + "Auto face size adjustment by model": "Auto face size adjustment by model", + "Auto load and save JSON database": "Auto load and save JSON database", + "automatic": "automatic", + "Automatic": "Automatic", + "Automatically purge wildcard cache on every generation.": "Automatically purge wildcard cache on every generation.", + "Automatically unpack .zip after downloading": "Automatically unpack .zip after downloading", + "Automatically unpack .zip files after downloading": "Automatically unpack .zip files after downloading", + "“automatic” is entirely build around Latent Couple. It will pass artists and the amount of people/animals/objects to generate in the prompt automatically. Set the prompt compounder equal to the amount of areas defined in Laten Couple.": "“automatic” is entirely build around Latent Couple. It will pass artists and the amount of people/animals/objects to generate in the prompt automatically. Set the prompt compounder equal to the amount of areas defined in Laten Couple.", + "Automaticlly revert VAE to 32-bit floats": "Automaticlly revert VAE to 32-bit floats", + "Auto SAM": "Auto SAM", + "Auto SAM Config": "Auto SAM Config", + "Auto SAM is mainly for semantic segmentation and image layout generation, which is supported based on ControlNet. You must have ControlNet extension installed, and you should not change its directory name (sd-webui-controlnet).": "Auto SAM is mainly for semantic segmentation and image layout generation, which is supported based on ControlNet. You must have ControlNet extension installed, and you should not change its directory name (sd-webui-controlnet).", + "Auto save merged model": "Auto save merged model", + "Auto search port": "Auto search port", + "Auto segmentation output": "Auto segmentation output", + "Auto Tagging": "Auto Tagging", + "Auto Tagging option": "Auto Tagging option", + "Available LORA": "Available LORA", + "Available LORAs": "Available LORAs", + "Available LyCORIS": "Available LyCORIS", + "Available TensorRT Engine Profiles": "Available TensorRT Engine Profiles", + "Axis": "Axis", + "Backend": "Backend", + "- Background": "- Background", + "Background": "Background", + "background color": "背景顏色", + "Background Color": "背景顏色", + "Background gradiant color": "Background gradiant color", + "Background source(mp4 or directory containing images)": "Background source(mp4 or directory containing images)", + "Background type": "Background type", + "Back in the main UI, select the TRT model from the sd_unet dropdown menu at the top of the page.": "Back in the main UI, select the TRT model from the sd_unet dropdown menu at the top of the page.", + "Back to top": "Back to top", + "Backup original text file (original file will be renamed like filename.000, .001, .002, ...)": "Backup original text file (original file will be renamed like filename.000, .001, .002, ...)", + "Balance between eyes": "Balance between eyes", + "baroque": "baroque", + "base": "base", + "Base": "Base", + "Base alpha": "Base alpha", + "Base Depth": "Base Depth", + "Base model": "Base model", + "Base model:": "Base model:", + "Base Model:": "Base Model:", + "Base Model used for Add-Difference mode": "Base Model used for Add-Difference mode", + "Base on Stable Diffusion V2": "Base on Stable Diffusion V2", + "Base Sampler": "Base Sampler", + "Basic info": "Basic info", + "⚙️ Basic Settings": "⚙️ Basic Settings", + "Basis": "Basis", + "Batch Edit Captions": "Batch Edit Captions", + "Batch from Dir": "Batch from Dir", + "Batch from directory": "Batch from directory", + "Batch Images": "Batch Images", + "Batch img2img": "Batch img2img", + "Batch import prompts": "Batch import prompts", + "Batch Mode, Resume and more": "Batch Mode, Resume and more", + "Batch Mode/ run from setting files": "Batch Mode/ run from setting files", + "Batch name": "批次名稱", + "batch process": "batch process", + "Batch process": "Batch process", + "Batch process images. Will apply enhancement in the tools enhancement tab.": "Batch process images. Will apply enhancement in the tools enhancement tab.", + "Batch Run": "Batch Run", + "Batch Settings": "Batch Settings", + "batch size": "batch size", + "batch size for large queries": "batch size for large queries", + "batch size in generation": "batch size in generation", + "batch size, restore face, hires fix settigns must be set here": "batch size, restore face, hires fix settigns must be set here", + "Batch Sources Images": "Batch Sources Images", + "Batch-Warp": "Batch-Warp", + "bauhaus": "bauhaus", + "Beam Search": "Beam Search", + "Before": "Before", + "Before Motion": "Before Motion", + "Before sending image, ensure img2img Inpaint tab is selected.": "Before sending image, ensure img2img Inpaint tab is selected.", + "Begin train": "Begin train", + "behind HEAD": "behind HEAD", + "Below here, you can generate a set of random prompts, and send them to the Workflow prompt field. The generation of the prompt uses the settings in the Main tab.": "Below here, you can generate a set of random prompts, and send them to the Workflow prompt field. The generation of the prompt uses the settings in the Main tab.", + "Benchmark": "Benchmark", + "Benchmark Data": "Benchmark Data", + "Benchmark level": "Benchmark level", + "Benchmarks...": "Benchmarks...", + "best": "best", + "best practice page": "best practice page", + "beta": "beta", + "Better": "Better", + "Bicubic": "Bicubic", + "Bilinear": "Bilinear", + "Birthday": "Birthday", + "Bitwise operation": "Bitwise operation", + "Black/Whitelist models": "Black/Whitelist models", + "blend": "blend", + "Blend": "Blend", + "Blend Faces ((Source|Checkpoint)+References = 1)": "Blend Faces ((Source|Checkpoint)+References = 1)", + "Blend factor max": "Blend factor max", + "Blend factor slope": "Blend factor slope", + "Blend mode": "混合模式", + "Blend multiple outputs (batch, ControlNet maps)": "Blend multiple outputs (batch, ControlNet maps)", + "Block ID": "Block ID", + "Block method": "Block method", + "Block NSFW images of a certain threshold and higher.\n Civitai marks all images for NSFW models as also being NSFW.\n These ratings do not seem to be explicitly defined on Civitai's\n end, but \"Soft\" seems to be suggestive, with NSFW elements but\n not explicit nudity, \"Mature\" seems to include nudity but not\n always, and \"X\" seems to be explicitly adult content.": "Block NSFW images of a certain threshold and higher.\n Civitai marks all images for NSFW models as also being NSFW.\n These ratings do not seem to be explicitly defined on Civitai's\n end, but \"Soft\" seems to be suggestive, with NSFW elements but\n not explicit nudity, \"Mature\" seems to include nudity but not\n always, and \"X\" seems to be explicitly adult content.", + "Block NSFW images of a certain threshold and higher.\nCivitai marks all images for NSFW models as also being NSFW.\nThese ratings do not seem to be explicitly defined on Civitai's\nend, but \"Soft\" seems to be suggestive, with NSFW elements but\nnot explicit nudity, \"Mature\" seems to include nudity but not\nalways, and \"X\" seems to be explicitly adult content.": "Block NSFW images of a certain threshold and higher.\nCivitai marks all images for NSFW models as also being NSFW.\nThese ratings do not seem to be explicitly defined on Civitai's\nend, but \"Soft\" seems to be suggestive, with NSFW elements but\nnot explicit nudity, \"Mature\" seems to include nudity but not\nalways, and \"X\" seems to be explicitly adult content.", + "Block NSFW images of a certain threshold and higher. Civitai marks all images for NSFW models as also being NSFW. These ratings do not seem to be explicitly defined on Civitai's end, but \"Soft\" seems to be suggestive, with NSFW elements but not explicit nudity, \"Mature\" seems to include nudity but not always, and \"X\" seems to be explicitly adult content.": "Block NSFW images of a certain threshold and higher. Civitai marks all images for NSFW models as also being NSFW. These ratings do not seem to be explicitly defined on Civitai's end, but \"Soft\" seems to be suggestive, with NSFW elements but not explicit nudity, \"Mature\" seems to include nudity but not always, and \"X\" seems to be explicitly adult content.", + "Block NSFW Level Above": "Block NSFW Level Above", + "Block size": "Block size", + "blur": "模糊", + "Blur": "Blur", + "Blur amount:": "模糊量:", + "Blur edges of final overlay mask, if used. Minimum = 0 (no blur)": "Blur edges of final overlay mask, if used. Minimum = 0 (no blur)", + "blur_gaussian": "blur_gaussian", + "- Body": "- Body", + "Body Parameters": "Body Parameters", + "Booru Score Threshold": "Booru Score Threshold", + "BOOST (multi-resolution merging)": "提升(多分辨率合併)", + "border": "border", + "Border": "邊界", + "border frames": "border frames", + "Border Frames": "Border Frames", + "Border Key Frames": "Border Key Frames", + "Border mode": "Border mode", + "Both": "Both", + "both directions": "both directions", + "bottom-top": "bottom-top", + "box_nms_thresh": "box_nms_thresh", + "branch": "branch", + "brightness": "brightness", + "Brightness": "Brightness", + "Brightness:": "Brightness:", + "Bring prompts and setting into one column left side": "Bring prompts and setting into one column left side", + "Browser": "Browser", + "Browser default": "Browser default", + "Build": "Build", + "Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory.": "Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the faceswaplab/faces directory.", + "built with gradio": "built with gradio", + "(Buttons will display the contents of the selected directory without acting as a search filter.)": "(Buttons will display the contents of the selected directory without acting as a search filter.)", + "button to free up memory.": "button to free up memory.", + "By comma": "By comma", + "By default and where supported, SVG-Edit can store your editor preferences and SVG content locally on your machine so you do not need to add these back each time you load SVG-Edit. If, for privacy reasons, you do not wish to store this information on your machine, you can change away from the default option below.": "By default and where supported, SVG-Edit can store your editor preferences and SVG content locally on your machine so you do not need to add these back each time you load SVG-Edit. If, for privacy reasons, you do not wish to store this information on your machine, you can change away from the default option below.", + "By default, the algorithm tends to like dark images too much, if you think the output is too dark or not dark enough, you can adjust this ratio. 1 = ‘Do not darken at all’, 0 = ‘A totally black image is ok’, default = 0.9.": "By default, the algorithm tends to like dark images too much, if you think the output is too dark or not dark enough, you can adjust this ratio. 1 = ‘Do not darken at all’, 0 = ‘A totally black image is ok’, default = 0.9.", + "By none": "By none", + "By tokens": "By tokens", + "By vectors": "By vectors", + "By words": "By words", + "(C10) Thertiary": "(C10) Thertiary", + "(C1) Thertiary": "(C1) Thertiary", + "(C2) Thertiary": "(C2) Thertiary", + "(C3) Thertiary": "(C3) Thertiary", + "(C4) Thertiary": "(C4) Thertiary", + "(C5) Thertiary": "(C5) Thertiary", + "(C6) Thertiary": "(C6) Thertiary", + "(C7) Thertiary": "(C7) Thertiary", + "(C8) Thertiary": "(C8) Thertiary", + "(C9) Thertiary": "(C9) Thertiary", + "Cache FP16 weight for LoRA": "Cache FP16 weight for LoRA", + "(Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram.)": "(Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram.)", + "Cache the compiled models on disk for faster model load in subsequent launches (Recommended)": "Cache the compiled models on disk for faster model load in subsequent launches (Recommended)", + "Cadence": "節奏", + "Cafe Aesthetic": "Cafe Aesthetic", + "calculate dimension of LoRAs(It may take a few minutes if there are many LoRAs)": "calculate dimension of LoRAs(It may take a few minutes if there are many LoRAs)", + "Calculate hash": "Calculate hash", + "Calculate hash for all checkpoint": "Calculate hash for all checkpoint", + "Calculate the optimal GIF palette, improves quality significantly, removes banding": "Calculate the optimal GIF palette, improves quality significantly, removes banding", + "- Camera": "- Camera", + "Camera Far": "Camera Far", + "Camera Focal Length": "Camera Focal Length", + "Camera Near": "Camera Near", + "Camera Parameters": "Camera Parameters", + "(can be any valid CSS value, for example 768px or 20em)": "(can be any valid CSS value, for example 768px or 20em)", + "Can be empty, indicating no translation": "Can be empty, indicating no translation", + "Can be empty,indicating no translation": "Can be empty,indicating no translation", + "Cancel Download": "Cancel Download", + "Cancel training.": "Cancel training.", + "canny": "canny", + "Canvas Height": "畫布高度", + "- Canvas Size": "- Canvas Size", + "Canvas Width": "畫布寬度", + "Caption Backup File": "Caption Backup File", + "Caption File Ext": "Caption File Ext", + "Caption of Selected Image": "Caption of Selected Image", + "Caption: Returns a string describing the image": "Caption: Returns a string describing the image", + "Captions": "描述", + "Caption Text File": "Caption Text File", + "Caption type": "Caption type", + "Card height for Extra Networks (px)": "Card height for Extra Networks (px)", + "cards": "cards", + "Card width for Extra Networks (px)": "Card width for Extra Networks (px)", + "Carriage Return": "Carriage Return", + "cartoon": "cartoon", + "Cartoon": "Cartoon", + "cascadePSP": "cascadePSP", + "case sensitive": "case sensitive", + "Categorical mask status": "Categorical mask status", + "Categorys of prompt": "Categorys of prompt", + "Catppuccin Flavor": "Catppuccin Flavor", + "⚠ Caution: You should only use these options if you know what you are doing. ⚠": "⚠ 警告:只有您知道在做什麼時才使用這些選項。⚠", + "Censor NSFW when NSFW is disabled": "Censor NSFW when NSFW is disabled", + "Center Crop": "Center Crop", + "centered": "centered", + "(CF10) Checkpoint format": "(CF10) Checkpoint format", + "(CF1) Checkpoint format": "(CF1) Checkpoint format", + "(CF2) Checkpoint format": "(CF2) Checkpoint format", + "(CF3) Checkpoint format": "(CF3) Checkpoint format", + "(CF4) Checkpoint format": "(CF4) Checkpoint format", + "(CF5) Checkpoint format": "(CF5) Checkpoint format", + "(CF6) Checkpoint format": "(CF6) Checkpoint format", + "(CF7) Checkpoint format": "(CF7) Checkpoint format", + "(CF8) Checkpoint format": "(CF8) Checkpoint format", + "(CF9) Checkpoint format": "(CF9) Checkpoint format", + "cfg scale": "提示詞相關性", + "CFG scale schedule": "CFG scale schedule", + "changeable blocks : BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11": "changeable blocks : BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11", + "Change brightness": "Change brightness", + "Change contrast": "Change contrast", + "Change CTRL keybindings to SHIFT": "將 Ctrl 改成 Shift 按鍵", + "Change gain": "Change gain", + "Change gamma": "Change gamma", + "⏱ Changelog": "⏱ Changelog", + "Changes are not applied to the text files until the \"Save all changes\" button is pressed.": "Changes are not applied to the text files until the \"Save all changes\" button is pressed.", + "Change saturation": "Change saturation", + "(changes seeds drastically; use CPU to produce the same picture across different videocard vendors)": "(changes seeds drastically; use CPU to produce the same picture across different videocard vendors)", + "Change your brush width to make it thinner if you want to draw something.": "Change your brush width to make it thinner if you want to draw something.", + "Chant filename": "Chant filename", + "Chant filename (Chants are longer prompt presets)": "Chant filename (Chants are longer prompt presets)", + "(Chants are longer prompt presets)": "(Chants are longer prompt presets)", + "character": "character", + "Chat": "Chat", + "ChatGPT Model Name": "ChatGPT Model Name", + "Check Console log for Downloading Status": "Check Console log for Downloading Status", + "Check models' new version": "Check models' new version", + "Check models’ new version": "Check models’ new version", + "Check New Version from Civitai": "Check New Version from Civitai", + "checkpoint": "checkpoint", + "Checkpoint A": "Checkpoint A", + "Checkpoint B": "Checkpoint B", + "Checkpoint C": "Checkpoint C", + "Checkpoint Dropdown": "Checkpoint Dropdown", + "Checkpoint dropdown: use filenames without paths": "Checkpoint dropdown: use filenames without paths", + "Checkpoint Format": "Checkpoint Format", + "Checkpoint schedule": "Checkpoint schedule", + "Check progress": "Check progress", + "checks": "checks", + "Check similarity": "Check similarity", + "Check tensors": "Check tensors", + "check this box to enable guided images mode": "check this box to enable guided images mode", + "* check your CLI for outputs": "* check your CLI for outputs", + "* check your CLI for outputs *": "* check your CLI for outputs *", + "Chess": "棋盤狀", + "children's illustration": "children's illustration", + "choose an algorithm/ method for keeping color coherence across the animation": "choose an algorithm/ method for keeping color coherence across the animation", + "Choose Min-max to activate these controls": "Choose Min-max to activate these controls", + "Choose mode:": "Choose mode:", + "Choose preprocessor for semantic segmentation:": "Choose preprocessor for semantic segmentation:", + "choose the frame interpolation engine and version": "choose the frame interpolation engine and version", + "Choose which animation params are to be saved to the .srt file (Frame # and Seed will always be saved):": "Choose which animation params are to be saved to the .srt file (Frame # and Seed will always be saved):", + "Choose your favorite mask:": "Choose your favorite mask:", + "chromatic": "chromatic", + "Chromatic": "Chromatic", + "circle": "circle", + "City": "City", + "cityscape": "cityscape", + "CivitAI account settings": "CivitAI account settings", + "Civitai Browser+": "Civitai Browser+", + "Civitai Helper": "Civitai Helper", + "CivitAI Tags": "CivitAI Tags", + "Civitai URL": "Civitai URL", + "Civitai URL or Model ID": "Civitai URL or Model ID", + "Class": "Class", + "classic": "classic", + "Classify type": "Classify type", + "Class prompt": "Class prompt", + "clean": "clean", + "Cleaner": "Cleaner", + "Cleaner Model ID": "Cleaner Model ID", + "Clear all": "Clear all", + "Clear ALL filters": "Clear ALL filters", + "Clear Cache": "Clear Cache", + "Clear selection": "Clear selection", + "Clear tag filters": "Clear tag filters", + "Clear values": "Clear values", + "Click Enhance.": "Click Enhance.", + "Click Here": "Click Here", + "Click here after the generation to show the video": "Click here after the generation to show the video", + "click here to gather relevant info": "click here to gather relevant info", + "Click on the files that appear in the tree to edit them.": "Click on the files that appear in the tree to edit them.", + "Click on the \"Generate Default Engines\" button. This step can take 2-10 min depending on your GPU. You can generate engines for other combinations.": "Click on the \"Generate Default Engines\" button. This step can take 2-10 min depending on your GPU. You can generate engines for other combinations.", + "Click the search icon to load models.": "Click the search icon to load models.", + "click to download": "click to download", + "Clip and renormalize": "Clip and renormalize", + "🕵️‍♂️ CLIP Interrogator 🕵️‍♂️": "🕵️‍♂️ CLIP Interrogator 🕵️‍♂️", + "CLIP Interrogator on Github": "CLIP Interrogator on Github", + "CLIP Maximum length": "最小 CLIP 長度", + "CLIP Minimum length": "最小 CLIP 長度", + "CLiP model": "CLiP model", + "CLIP Model": "CLIP Model", + "CLIP models:": "CLIP models:", + "clipseg": "clipseg", + "clipseg options": "clipseg options", + "Clip skip:": "Clip skip:", + "CLIP Skip": "CLIP Skip", + "Clip skip (2 if training anime model)": "Clip skip (2 if training anime model)", + "CLIP skip schedule": "CLIP skip schedule", + "CLIP tensors checker": "CLIP tensors checker", + "clip_threshold": "clip_threshold", + "clone repositores": "clone repositores", + "Closed": "Closed", + "Closed loop": "Closed loop", + "Close Preview": "Close Preview", + "Closer is brighter": "Closer is brighter", + "Close the video": "Close the video", + "cloudscape": "cloudscape", + "cluster num": "cluster num", + "Codec": "Codec", + "CodeFormers (Face restoration)": "CodeFormers (Face restoration)", + "CodeFormer vis.": "CodeFormer vis.", + "codeformer weight": "codeformer weight", + "CodeFormer Weight": "CodeFormer Weight", + "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", + "Coherence": "Coherence", + "🦒 Colab Run Command": "🦒 Colab Run Command", + "collage": "collage", + "Collage": "Collage", + "Collection actions": "Collection actions", + "Color": "色彩", + "color_burn": "加深顏色", + "color_coherence": "color_coherence", + "Color coherence": "Color coherence", + "Color coherence may be used with hybrid composite off, to just use video color.": "Color coherence may be used with hybrid composite off, to just use video color.", + "color_coherence_video_every_N_frames": "color_coherence_video_every_N_frames", + "Color correction factor": "Color correction factor", + "🎨 Color Correction Settings": "🎨 Color Correction Settings", + "color_dodge": "加亮顏色", + "Color Fix": "Color Fix", + "Color force Grayscale": "Color force Grayscale", + "colorful": "colorful", + "[color-matcher]": "[color-matcher]", + "Color Matcher Ref Image": "Color Matcher Ref Image", + "Color Matcher Ref Image Type": "Color Matcher Ref Image Type", + "color mode": "color mode", + "Color reduce algo": "Color reduce algo", + "colors": "colors", + "Color Transfer Method": "Color Transfer Method", + "Combine axis": "Combine axis", + "Combine input and depthmap into one image": "Combine input and depthmap into one image", + "Combine interrogations": "Combine interrogations", + "Combine into one image": "Combine into one image", + "ComfyUI install location": "ComfyUI install location", + "ComfyUI server graceful termination timeout (in seconds) when reloading the gradio UI (-1 to block until the ComfyUI server exits normally)": "ComfyUI server graceful termination timeout (in seconds) when reloading the gradio UI (-1 to block until the ComfyUI server exits normally)", + "Comic Book": "Comic Book", + "comics": "comics", + "Comma separated face number(s)": "Comma separated face number(s)", + "Comma-separated list of tags (\"artist, style, character, 2d, 3d...\")": "此模型的標記列表(\"artist, style, character, 2d, 3d...\")", + "Common Tags": "Common Tags", + "Compact prompt layout": "Compact prompt layout", + "Comp alpha schedule": "Comp alpha schedule", + "Compare": "Compare", + "Compatibility with ControlNet": "Compatibility with ControlNet", + "Compatibility with other extensions": "Compatibility with other extensions", + "Compatible with Python v3.10.6 and WebUI v1.5.1.": "Compatible with Python v3.10.6 and WebUI v1.5.1.", + "Compatible with Python v3.10.6 and WebUI v1.6.0.": "Compatible with Python v3.10.6 and WebUI v1.6.0.", + "Comp mask auto contrast": "Comp mask auto contrast", + "Comp mask auto contrast cutoff high schedule": "Comp mask auto contrast cutoff high schedule", + "Comp mask auto contrast cutoff low schedule": "Comp mask auto contrast cutoff low schedule", + "Comp mask blend alpha schedule": "Comp mask blend alpha schedule", + "Comp mask contrast schedule": "Comp mask contrast schedule", + "Comp mask equalize": "Comp mask equalize", + "Comp mask inverse": "Comp mask inverse", + "Comp mask type": "Comp mask type", + "Component": "Component", + "Composable Mask scheduling": "Composable Mask scheduling", + "Composite audio files extracted from the original video onto the concatenated video.": "Composite audio files extracted from the original video onto the concatenated video.", + "Composite video with previous frame init image in": "Composite video with previous frame init image in", + "Comp save extra frames": "Comp save extra frames", + "Compute Settings": "Compute Settings", + "Compute similarity": "Compute similarity", + "Concatenate each frame while crossfading.": "Concatenate each frame while crossfading.", + "Concavity of distortion of circles": "Concavity of distortion of circles", + "concept": "concept", + "concept art": "concept art", + "concept - Can be a concept, such as “a X of Y”, or an historical event such as “The Trojan War”.": "concept - Can be a concept, such as “a X of Y”, or an historical event such as “The Trojan War”.", + "Config file for Adapter models": "Config file for Adapter models", + "Config file for Control Net models": "Config file for Control Net models", + "Config Presets": "Config Presets", + "Config state file to restore from, under 'config-states/' folder": "Config state file to restore from, under 'config-states/' folder", + "configuration": "configuration", + "configuration for": "configuration for", + "Configure colors. See the Settings section in the README for more info. Must be valid JSON.": "Configure colors. See the Settings section in the README for more info. Must be valid JSON.", + "Configure Hotkeys. For possible values, see https://www.w3.org/TR/uievents-key, or leave empty / set to 'None' to disable. Must be valid JSON.": "Configure Hotkeys. For possible values, see https://www.w3.org/TR/uievents-key, or leave empty / set to 'None' to disable. Must be valid JSON.", + "Connect": "Connect", + "Connecting": "Connecting", + "Connection errored out.": "Connection errored out.", + "Consider a donation on ko-fi! :3": "Consider a donation on ko-fi! :3", + "Consistency mask blur": "Consistency mask blur", + "Console logging": "Console logging", + "Console log tag counts per file, no progress bar": "Console log tag counts per file, no progress bar", + "Containing directory": "目標模型目錄", + "Content type:": "Content type:", + "Content Type:": "Content Type:", + "Context batch size": "Context batch size", + "contrast": "contrast", + "Contrast": "Contrast", + "Contrast:": "Contrast:", + "Contrast schedule": "Contrast schedule", + "control animation mode, will hide non relevant params upon change": "control animation mode, will hide non relevant params upon change", + "Control Mode (Guess Mode)": "Control Mode (Guess Mode)", + "controlnet: A neural network structure to control diffusion models by adding extra conditions. Check manual for setup info.": "controlnet: A neural network structure to control diffusion models by adding extra conditions. Check manual for setup info.", + "ControlNet Fastload": "ControlNet Fastload", + "Controlnet Fastload Filter": "Controlnet Fastload Filter", + "Controlnet Fastload First": "Controlnet Fastload First", + "ControlNet Fastload Plugin First": "ControlNet Fastload Plugin First", + "Controlnet First": "Controlnet First", + "ControlNet guidance end": "ControlNet guidance end", + "ControlNet guidance end 10th": "ControlNet guidance end 10th", + "ControlNet guidance end 2nd": "ControlNet guidance end 2nd", + "ControlNet guidance end 3rd": "ControlNet guidance end 3rd", + "ControlNet guidance end 4th": "ControlNet guidance end 4th", + "ControlNet guidance end 5th": "ControlNet guidance end 5th", + "ControlNet guidance end 6th": "ControlNet guidance end 6th", + "ControlNet guidance end 7th": "ControlNet guidance end 7th", + "ControlNet guidance end 8th": "ControlNet guidance end 8th", + "ControlNet guidance end 9th": "ControlNet guidance end 9th", + "ControlNet guidance start": "ControlNet guidance start", + "ControlNet guidance start 10th": "ControlNet guidance start 10th", + "ControlNet guidance start 2nd": "ControlNet guidance start 2nd", + "ControlNet guidance start 3rd": "ControlNet guidance start 3rd", + "ControlNet guidance start 4th": "ControlNet guidance start 4th", + "ControlNet guidance start 5th": "ControlNet guidance start 5th", + "ControlNet guidance start 6th": "ControlNet guidance start 6th", + "ControlNet guidance start 7th": "ControlNet guidance start 7th", + "ControlNet guidance start 8th": "ControlNet guidance start 8th", + "ControlNet guidance start 9th": "ControlNet guidance start 9th", + "ControlNet Info": "ControlNet Info", + "ControlNet Inpaint": "ControlNet Inpaint", + "ControlNet Inpaint Index": "ControlNet Inpaint Index", + "ControlNet inpaint model index": "ControlNet inpaint model index", + "ControlNet inpaint not masked": "ControlNet inpaint not masked", + "ControlNet Inpaint Number": "ControlNet Inpaint Number", + "ControlNet input directory": "ControlNet input directory", + "ControlNet Input Video/ Image Path": "ControlNet Input Video/ Image Path", + "ControlNet Input Video Path": "ControlNet Input Video Path", + "ControlNet Mask Video/ Image Path (*NOT WORKING, kept in UI for CN's devs testing!*)": "ControlNet Mask Video/ Image Path (*NOT WORKING, kept in UI for CN's devs testing!*)", + "ControlNet Mask Video Path": "ControlNet Mask Video Path", + "ControlNet model": "ControlNet model", + "ControlNet model 10th": "ControlNet model 10th", + "ControlNet model 2nd": "ControlNet model 2nd", + "ControlNet model 3rd": "ControlNet model 3rd", + "ControlNet model 4th": "ControlNet model 4th", + "ControlNet model 5th": "ControlNet model 5th", + "ControlNet model 6th": "ControlNet model 6th", + "ControlNet model 7th": "ControlNet model 7th", + "ControlNet model 8th": "ControlNet model 8th", + "ControlNet model 9th": "ControlNet model 9th", + "ControlNet Model ID": "ControlNet Model ID", + "ControlNet model index": "ControlNet model index", + "ControlNet number": "ControlNet number", + "ControlNet option": "ControlNet option", + "ControlNet options": "ControlNet options", + "ControlNet Plugin First": "ControlNet Plugin First", + "ControlNet Preprocessor": "ControlNet Preprocessor", + "ControlNet Segmentation Index": "ControlNet Segmentation Index", + "Controlnet tile model name": "Controlnet tile model name", + "ControlNet Video Input": "ControlNet Video Input", + "ControlNet Video Mask Input": "ControlNet Video Mask Input", + "ControlNet weight": "ControlNet weight", + "Control Net Weight": "Control Net Weight", + "ControlNet weight 10th": "ControlNet weight 10th", + "ControlNet weight 2nd": "ControlNet weight 2nd", + "ControlNet weight 3rd": "ControlNet weight 3rd", + "ControlNet weight 4th": "ControlNet weight 4th", + "ControlNet weight 5th": "ControlNet weight 5th", + "ControlNet weight 6th": "ControlNet weight 6th", + "ControlNet weight 7th": "ControlNet weight 7th", + "ControlNet weight 8th": "ControlNet weight 8th", + "ControlNet weight 9th": "ControlNet weight 9th", + "Control Net Weight For Face": "Control Net Weight For Face", + "controls for how many frames the same seed should stick before iterating to the next one": "controls for how many frames the same seed should stick before iterating to the next one", + "controls pixel generation method for images smaller than the frame. hover on the options to see more info": "controls pixel generation method for images smaller than the frame. hover on the options to see more info", + "Controls the contrast of the composite mask. 0.5 if half, 1 is normal contrast, 2 is double, etc.": "Controls the contrast of the composite mask. 0.5 if half, 1 is normal contrast, 2 is double, etc.", + "controls the handling of pixels outside the field of view as they come into the scene": "controls the handling of pixels outside the field of view as they come into the scene", + "controls the seed behavior that is used for animation. hover on the options to see more info": "controls the seed behavior that is used for animation. hover on the options to see more info", + "controls the seed behavior that is used for animation. Hover on the options to see more info": "controls the seed behavior that is used for animation. Hover on the options to see more info", + "Controls the strength of the diffusion on the init image. 0 = disabled": "Controls the strength of the diffusion on the init image. 0 = disabled", + "controls which sampler to use at a specific scheduled frame": "controls which sampler to use at a specific scheduled frame", + "convert": "convert", + "Convert": "Convert", + "Convert a 360 spherical panorama to a 3D mesh": "Convert a 360 spherical panorama to a 3D mesh", + "Convert a single 2D image to a 3D mesh": "Convert a single 2D image to a 3D mesh", + "Convert currently loaded checkpoint into ONNX. The conversion will fail catastrophically if TensorRT was used at any point prior to conversion, so you might have to restart webui before doing the conversion.": "Convert currently loaded checkpoint into ONNX. The conversion will fail catastrophically if TensorRT was used at any point prior to conversion, so you might have to restart webui before doing the conversion.", + "Converted checkpoints will be saved in your": "Converted checkpoints will be saved in your", + "Convert excessive brackets to weights": "Convert excessive brackets to weights", + "Convert Folder": "Convert Folder", + "Convert ONNX to TensorRT": "Convert ONNX to TensorRT", + "Convert spaces to underscores (default: underscore to spaces)": "Convert spaces to underscores (default: underscore to spaces)", + "Convert to ONNX": "Convert to ONNX", + "Convert to TensorRT": "Convert to TensorRT", + "Convert Unet to ONNX": "Convert Unet to ONNX", + "Copy": "複製", + "Copy and Overwrite": "Copy and Overwrite", + "Copy caption from selected images automatically": "Copy caption from selected images automatically", + "Copy main UI prompt to style.": "Copy main UI prompt to style.", + "Copy Metadata": "複製中繼資料", + "Copy metadata to other models in directory": "複製中繼資料至其他目錄中的模型", + "Copy or move captions together": "Copy or move captions together", + "copy to clipboard": "複製到剪貼簿", + "📋 Copy to clipboard": "📋 Copy to clipboard", + "Copy to ControlNet Inpainting": "Copy to ControlNet Inpainting", + "Copy to ControlNet Segmentation": "Copy to ControlNet Segmentation", + "Copy to favorites": "Copy to favorites", + "Copy to Inpaint Upload & ControlNet Inpainting": "Copy to Inpaint Upload & ControlNet Inpainting", + "Copy to txt2img ControlNet Inpainting": "Copy to txt2img ControlNet Inpainting", + "Country": "Country", + "Cover image": "封面圖像", + "Cozy Image Browser": "Cozy Image Browser", + "Cozy Nest Image Browser": "Cozy Nest Image Browser", + "Craft Clay": "Craft Clay", + "create a gif in addition to .mp4 file. supports up to 30 fps, will self-disable at higher fps values": "create a gif in addition to .mp4 file. supports up to 30 fps, will self-disable at higher fps values", + "Create a new model.": "Create a new model.", + "Create animation": "Create animation", + "Create a text file with infotext next to every generated image": "Create a text file with infotext next to every generated image", + "Create blank canvas": "Create blank canvas", + "Create classification images using training settings without training.": "Create classification images using training settings without training.", + "Create mask": "Create mask", + "Create Mask": "Create Mask", + "Create mask area": "Create mask area", + "": "", + "Create Project": "Create Project", + "Creates animation sequence from denoised intermediate steps with video frame interpolation to achieve desired animation duration": "Creates animation sequence from denoised intermediate steps with video frame interpolation to achieve desired animation duration", + "create ui": "create ui", + "Create Version": "Create Version", + "Create your wildcard library by copying a collection using the dropdown below.": "Create your wildcard library by copying a collection using the dropdown below.", + "Create your wildcard library by copying a collection using the dropdown in the Collection actions.": "Create your wildcard library by copying a collection using the dropdown in the Collection actions.", + "Create zip archive when downloading multiple images": "Create zip archive when downloading multiple images", + "CRF": "CRF", + "crop": "crop", + "Crop Images": "裁切圖像", + "crop_n_layers": "crop_n_layers", + "crop_nms_thresh": "crop_nms_thresh", + "crop_n_points_downscale_factor": "crop_n_points_downscale_factor", + "crop_overlap_ratio": "crop_overlap_ratio", + "Cropping": "裁切", + "crops_n_layers": "crops_n_layers", + "Crop: top, left, bottom, right": "Crop: top, left, bottom, right", + "Cross-attention": "Cross-attention", + "Cross-Attention Visualizer": "Cross-Attention Visualizer", + "Crossfade blend rate": "Crossfade blend rate", + "csv file with columns for filenames and prompts": "csv file with columns for filenames and prompts", + "Ctrl+up/down whitespace delimiters": "Ctrl+up/down whitespace delimiters", + "cubism": "cubism", + "Cubist": "Cubist", + "CUDA Settings": "CUDA Settings", + "Current batch": "Current batch", + "Current Cache": "Current Cache", + "Current Dataset to be trained": "Current Dataset to be trained", + "Currently only supports PNG from webui. Param from NovelAI soon.": "Currently only supports PNG from webui. Param from NovelAI soon.", + "currently selected model": "currently selected model", + "Current Model": "Current Model", + "Current page": "Current page", + "Current ranking": "Current ranking", + "Current version": "Current version", + "Custom Aria2 command line flags": "Custom Aria2 command line flags", + "Custom EXIF": "Custom EXIF", + "Custom parameters": "Custom parameters", + "Custom settings file": "自訂設定檔", + "Custom size": "自訂尺寸", + "Custom VAE": "Custom VAE", + "custum name": "custum name", + "Cutoff strongly.": "Cutoff strongly.", + "Cut video in to segments": "Cut video in to segments", + "Cut white margin from input": "Cut white margin from input", + "Cyan | Red": "Cyan | Red", + "Cycle BG": "Cycle BG", + "Daam script": "Daam script", + "dark": "dark", + "darken": "變暗", + "Dataset": "Dataset", + "Dataset folder structure": "資料集資料夾結構", + "Dataset Images": "Dataset Images", + "Dataset Load Settings": "Dataset Load Settings", + "Dataset size:": "Dataset size:", + "date": "日期", + "Date:": "Date:", + "Date Created": "Date Created", + "Date Modified": "Date Modified", + "Date Modified (newest first)": "Date Modified (newest first)", + "Date Modified (oldest first)": "Date Modified (oldest first)", + "Date trained:": "Date trained:", + "Debug": "Debug", + "Debug info": "Debug info", + "Debug Infos": "Debug Infos", + "Debug level": "Debug level", + "Debug log": "Debug log", + "Debug Mode 🡢 debug": "Debug Mode 🡢 debug", + "Dedicated negative prompt": "Dedicated negative prompt", + "Default": "Default", + "Default Image CFG": "Default Image CFG", + "Default Input Batch Output Directory": "Default Input Batch Output Directory", + "default LoRA multiplier": "default LoRA multiplier", + "Default magic prompt model": "Default magic prompt model", + "Default Options Button": "Default Options Button", + "Default order field for Extra Networks cards": "Default order field for Extra Networks cards", + "Default order for Extra Networks cards": "Default order for Extra Networks cards", + "Default Out Batches": "Default Out Batches", + "Default Sampler": "Default Sampler", + "Default scoring type": "Default scoring type", + "Default Sort": "Default Sort", + "Default Steps": "Default Steps", + "Default sub folders": "Default sub folders", + "Default Text CFG": "Default Text CFG", + "Default Upscaled swapper color corrections (requires restart)": "Default Upscaled swapper color corrections (requires restart)", + "Default Upscaled swapper face restorer (requires restart)": "Default Upscaled swapper face restorer (requires restart)", + "Default Upscaled swapper face restorer visibility (requires restart)": "Default Upscaled swapper face restorer visibility (requires restart)", + "Default Upscaled swapper face restorer weight (codeformer) (requires restart)": "Default Upscaled swapper face restorer weight (codeformer) (requires restart)", + "Default Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible. (requires restart)": "Default Upscaled swapper mask erosion factor, 1 = default behaviour. The larger it is, the more blur is applied around the face. Too large and the facial change is no longer visible. (requires restart)", + "Default Upscaled swapper sharpen": "Default Upscaled swapper sharpen", + "Default Upscaled swapper upscaler (Recommanded : LDSR but slow) (requires restart)": "Default Upscaled swapper upscaler (Recommanded : LDSR but slow) (requires restart)", + "Default upscaler for image resize operations": "Default upscaler for image resize operations", + "Default Use improved segmented mask (use pastenet to mask only the face) (requires restart)": "Default Use improved segmented mask (use pastenet to mask only the face) (requires restart)", + "default variables: in \\{\\}, like \\{init_mask\\}, \\{video_mask\\}, \\{everywhere\\}": "default variables: in \\{\\}, like \\{init_mask\\}, \\{video_mask\\}, \\{everywhere\\}", + "defines the seed behavior that is used for animations": "defines the seed behavior that is used for animations", + "❌ Del": "❌ Del", + "delete": "delete", + "❌Delete": "❌Delete", + "Delete 0-entries from exif cache": "Delete 0-entries from exif cache", + "Delete All Inputframes": "Delete All Inputframes", + "Delete all preview image": "Delete all preview image", + "DELETE cannot be undone. The files will be deleted completely.": "DELETE cannot be undone. The files will be deleted completely.", + "DELETE File(s)": "DELETE File(s)", + "Delete Imgs": "Delete Imgs", + "Delete intermediate": "Delete intermediate", + "Delete intermediate frames after GIF generation": "Delete intermediate frames after GIF generation", + "(Delete keyframe, or Add keyframe from ": "(Delete keyframe, or Add keyframe from ", + "Delete known junk data": "Delete known junk data", + "Delete Model": "Delete Model", + "delete next": "刪除後 N 張", + "Delete old version after download": "Delete old version after download", + "Delete old version(s) after download": "Delete old version(s) after download", + "delete or keep raw affected (interpolated/ upscaled depending on the UI section) png imgs": "delete or keep raw affected (interpolated/ upscaled depending on the UI section) png imgs", + "Delete Selected Skeleton (D key)": "Delete Selected Skeleton (D key)", + "Delete Style": "Delete Style", + "DemonCrawl Avatar Generator": "DemonCrawl Avatar Generator", + "Denoise": "重繪幅度", + "Denoising strenght": "Denoising strenght", + "Denoising strength for face images": "Denoising strength for face images", + "Denoising strength for the entire image": "Denoising strength for the entire image", + "Denoising strength (Inpaint)": "Denoising strength (Inpaint)", + "Denoising strength webui": "Denoising strength webui", + "Depth Algorithm": "Depth Algorithm", + "depth_leres": "depth_leres", + "depth_leres++": "depth_leres++", + "Depth Map": "Depth Map", + "depth_midas": "depth_midas", + "Depth (Midas/Adabins)": "Depth (Midas/Adabins)", + "Depth Output": "Depth Output", + "Depth Prediction": "Depth Prediction", + "Depth Prediction demo": "Depth Prediction demo", + "Depth Warping & FOV": "Depth Warping & FOV", + "depth_zoe": "depth_zoe", + "Descending": "Descending", + "description-based:": "description-based:", + "deselect all": "deselect all", + "Deselect visible tags": "Deselect visible tags", + "Destination Directory": "Destination Directory", + "detailed": "detailed", + "Detailed": "Detailed", + "Detailed Save As": "Detailed Save As", + "Details": "Details", + "Detect from image": "從圖像偵測", + "Detect from Image": "Detect from Image", + "Detection": "Detection", + "Detection model confidence threshold": "Detection model confidence threshold", + "Detection model confidence threshold %": "Detection model confidence threshold %", + "Detection model confidence threshold 10th": "Detection model confidence threshold 10th", + "Detection model confidence threshold % 2nd": "Detection model confidence threshold % 2nd", + "Detection model confidence threshold 2nd": "Detection model confidence threshold 2nd", + "Detection model confidence threshold % 3rd": "Detection model confidence threshold % 3rd", + "Detection model confidence threshold 3rd": "Detection model confidence threshold 3rd", + "Detection model confidence threshold % 4th": "Detection model confidence threshold % 4th", + "Detection model confidence threshold 4th": "Detection model confidence threshold 4th", + "Detection model confidence threshold % 5th": "Detection model confidence threshold % 5th", + "Detection model confidence threshold 5th": "Detection model confidence threshold 5th", + "Detection model confidence threshold 6th": "Detection model confidence threshold 6th", + "Detection model confidence threshold 7th": "Detection model confidence threshold 7th", + "Detection model confidence threshold 8th": "Detection model confidence threshold 8th", + "Detection model confidence threshold 9th": "Detection model confidence threshold 9th", + "Detection threshold": "Detection threshold", + "det_size : Size of the detection area for face analysis. Higher values may improve quality but reduce speed. Low value may improve detection of very large face.": "det_size : Size of the detection area for face analysis. Higher values may improve quality but reduce speed. Low value may improve detection of very large face.", + "det_thresh : Face Detection threshold": "det_thresh : Face Detection threshold", + "Device Info": "Device Info", + "Device precision type": "Device precision type", + "difference": "差異化", + "Difference": "Difference", + "diffusers default": "diffusers default", + "Diffusers LoRA loading variant": "Diffusers LoRA loading variant", + "Diffusers model loading variant": "Diffusers model loading variant", + "Diffusers pipeline": "Diffusers pipeline", + "Diffusers Settings": "Diffusers Settings", + "Diffusers VAE loading variant": "Diffusers VAE loading variant", + "Diffuse the first frame based on an image, similar to img2img.": "Diffuse the first frame based on an image, similar to img2img.", + "digital": "digital", + "digital art": "digital art", + "Dilation factor (B)": "Dilation factor (B)", + "Directly Draw Scribbles": "Directly Draw Scribbles", + "directory.": "directory.", + "Directory path": "目錄路徑", + "Disable all extensions (preserves the list of disabled extensions)": "Disable all extensions (preserves the list of disabled extensions)", + "Disable Async DNS for Aria2": "Disable Async DNS for Aria2", + "Disable at the last loopback time": "Disable at the last loopback time", + "Disable built-in Lora handler": "Disable built-in Lora handler", + "Disable Class Matching": "Disable Class Matching", + "Disabled": "Disabled", + "Disable de-duplication of wildcards before processing.": "Disable de-duplication of wildcards before processing.", + "Disable during hires pass": "Disable during hires pass", + "Disable feedback": "Disable feedback", + "Disable for Negative prompt.": "Disable for Negative prompt.", + "Disable image browser (Reload UI required)": "Disable image browser (Reload UI required)", + "Disable image browser (requires reload UI)": "Disable image browser (requires reload UI)", + "Disable Logging": "Disable Logging", + "Disables logging (graphs), may cause minor performance improvements.": "Disables logging (graphs), may cause minor performance improvements.", + "Disable sorting of wildcards before processing.": "Disable sorting of wildcards before processing.", + "Disable these extensions": "Disable these extensions", + "(Disable this option if you're experiencing any issues with downloads.)": "(Disable this option if you're experiencing any issues with downloads.)", + "(Disable to use the old download method)": "(Disable to use the old download method)", + "Disable waves and gradiant background": "Disable waves and gradiant background", + "Disable waves and gradiant background animations": "Disable waves and gradiant background animations", + "Disallow usage of checkpoints in ckpt format": "Disallow usage of checkpoints in ckpt format", + "Disallow usage of models in ckpt format": "Disallow usage of models in ckpt format", + "discard": "discard", + "Discard images with low similarity or no faces :": "Discard images with low similarity or no faces :", + "Disco": "Disco", + "discord server": "discord server", + "Discussion Board": "Discussion Board", + "👎 Dislike": "👎 Dislike", + "Disliked images": "Disliked images", + "👎 Dislikes": "👎 Dislikes", + "display both english and target language": "同時顯示原文與翻譯", + "Display information dialog on Cozy Nest error": "Display information dialog on Cozy Nest error", + "Display loop number": "Display loop number", + "Display loop number (0 = infinite loop)": "Display loop number (0 = infinite loop)", + "Display mode:": "Display mode:", + "Display Name": "Display Name", + "Display name for this model": "此模型的顯示名稱", + "Disregard fields from pasted infotext": "Disregard fields from pasted infotext", + "dithering": "dithering", + "Divergence (3D effect)": "Divergence (3D effect)", + "Divide cards by date": "Divide cards by date", + "doc": "doc", + "Dolly": "Dolly", + "Dominant image color": "圖像主色", + "Do not add watermark to images": "Do not add watermark to images", + "Do not optimize attention layers": "Do not optimize attention layers", + "(Do not recalculate conds from prompts if prompts have not changed since previous calculation)": "(Do not recalculate conds from prompts if prompts have not changed since previous calculation)", + "Do not show any images in gallery": "Do not show any images in gallery", + "Do not store my preferences or SVG content locally": "Do not store my preferences or SVG content locally", + "Do not upload here until bugfix": "Do not upload here until bugfix", + "] Do not use this option if you have made changes with the metadata editor without backing up your data!!": "] Do not use this option if you have made changes with the metadata editor without backing up your data!!", + "Do not use this option if you have made changes with the metadata editor without backing up your data!!": "Do not use this option if you have made changes with the metadata editor without backing up your data!!", + "Don't cache latents": "Don't cache latents", + "Don't Cache Latents": "Don't Cache Latents", + "don't delete upscaled imgs": "don't delete upscaled imgs", + "Don't generate, only upscale": "Don't generate, only upscale", + "Don't outfill": "不進行填充", + "Don't Override": "Don't Override", + "Don’t use wierd blocky upscale mode. Or maybe do?": "Don’t use wierd blocky upscale mode. Or maybe do?", + "dot size": "dot size", + "double-straight-line": "double-straight-line", + "Download All files": "Download All files", + "Download configuration files from CivitAI": "Download configuration files from CivitAI", + "Download folder:": "Download folder:", + "Download Folder:": "Download Folder:", + "💾 Download image": "💾 Download image", + "Download Link": "Download Link", + "Download Max Size Preview": "Download Max Size Preview", + "Download missing activation triggers on startup": "Download missing activation triggers on startup", + "Download missing models upon reading generation parameters from prompt": "Download missing models upon reading generation parameters from prompt", + "Download missing preview images on startup": "Download missing preview images on startup", + "Download model": "Download model", + "Download Model": "Download Model", + "Download models using Aria2": "Download models using Aria2", + "Download NSFW (adult) preview images": "Download NSFW (adult) preview images", + "Download path": "Download path", + "Download the pose as .json file": "Download the pose as .json file", + "Downscaling": "Downscaling", + "Drag Me": "Drag Me", + "Drawing Canvas": "Drawing Canvas", + "Draw Legends": "Draw Legends", + "Draw mask": "Draw mask", + "Draw region + show mask": "Draw region + show mask", + "Dreamscape": "Dreamscape", + "Dropdown": "下拉式清單", + "Drop tabs here to hide them": "Drop tabs here to hide them", + "📝 Dry Run": "📝 Dry Run", + "Due to ControlNet base extension's inner works it needs its models to be located at 'extensions/deforum-for-automatic1111-webui/models'. So copy, symlink or move them there until a more elegant solution is found. And, as of now, it requires use_init checked for the first run. The ControlNet extension version used in the dev process is a24089a62e70a7fae44b7bf35b51fd584dd55e25, if even with all the other options above used it still breaks, upgrade/downgrade your CN version to this one.": "Due to ControlNet base extension's inner works it needs its models to be located at 'extensions/deforum-for-automatic1111-webui/models'. So copy, symlink or move them there until a more elegant solution is found. And, as of now, it requires use_init checked for the first run. The ControlNet extension version used in the dev process is a24089a62e70a7fae44b7bf35b51fd584dd55e25, if even with all the other options above used it still breaks, upgrade/downgrade your CN version to this one.", + "Due to the limitation of Segment Anything, when there are point prompts, at most 1 box prompt will be allowed; when there are multiple box prompts, no point prompts are allowed.": "Due to the limitation of Segment Anything, when there are point prompts, at most 1 box prompt will be allowed; when there are multiple box prompts, no point prompts are allowed.", + "duplicate": "duplicate", + "Duplicate File Behavior": "Duplicate File Behavior", + "Duplicate Skeleton (X-axis)": "Duplicate Skeleton (X-axis)", + "Duplicate Skeleton (Z-axis)": "Duplicate Skeleton (Z-axis)", + "during the run sequence, only frames specified by this value will be extracted, saved, and diffused upon. A value of 1 indicates that every frame is to be accounted for. Values of 2 will use every other frame for the sequence. Higher values will skip that number of frames respectively.": "during the run sequence, only frames specified by this value will be extracted, saved, and diffused upon. A value of 1 indicates that every frame is to be accounted for. Values of 2 will use every other frame for the sequence. Higher values will skip that number of frames respectively.", + "dw_openpose_full": "dw_openpose_full", + "Dynamic engines support a range of resolutions and batch sizes, at a small cost in performance. Wider ranges will use more VRAM.": "Dynamic engines support a range of resolutions and batch sizes, at a small cost in performance. Wider ranges will use more VRAM.", + "Dynamic Image Normalization": "Dynamic Image Normalization", + "dynamicprompts library is version 0.29.0": "dynamicprompts library is version 0.29.0", + "Dystopian": "Dystopian", + "Each preset can be adjusted with the \"Advanced Settings\" option.": "Each preset can be adjusted with the \"Advanced Settings\" option.", + "Each Tags": "Each Tags", + "(Early access models are only downloadable for supporter tier members, Requires API key)": "(Early access models are only downloadable for supporter tier members, Requires API key)", + "Easy editing": "Easy editing", + "EBSynth Mode": "EBSynth Mode", + "Ebsynth-Process": "Ebsynth-Process", + "EBSynth Settings": "EBSynth Settings", + "[Ebsynth Utility]->[configuration]->[stage 2]->[Threshold of delta frame edge]": "[Ebsynth Utility]->[configuration]->[stage 2]->[Threshold of delta frame edge]", + "edit": "edit", + "Edit": "Edit", + "Edit Caption": "Edit Caption", + "Edit Caption of Selected Image": "Edit Caption of Selected Image", + "Edit common tags.": "Edit common tags.", + "Edit Exif": "Edit Exif", + "Editing Enabled": "啟用中繼資料編輯", + "Edit metadata": "Edit metadata", + "Edit Model Basic Data": "Edit Model Basic Data", + "Edit Model Trigger Words": "Edit Model Trigger Words", + "edit prompt words...": "edit prompt words...", + "edit prompt words... ": "edit prompt words... ", + "Edit SVG": "Edit SVG", + "Edit Tags": "Edit Tags", + "Edit workflow type": "Edit workflow type", + "Effect": "效果器", + "Effective Block Analyzer": "Effective Block Analyzer", + "(e.g. \"hair/colours/light/...\")": "(e.g. \"hair/colours/light/...\")", + "Elemental": "Elemental", + "Elemental Merge": "Elemental Merge", + "Elemental Merge, Adjust": "Elemental Merge, Adjust", + "elements": "elements", + "ema-only": "ema-only", + "Embeddings: loaded": "Embeddings: loaded", + "Embeddings: skipped": "Embeddings: skipped", + "Embed photo": "Embed photo", + "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention", + "empty cannot be saved": "empty cannot be saved", + "empty strings cannot be translated": "empty strings cannot be translated", + "Enable AA for Downscaling.": "Enable AA for Downscaling.", + "Enable AA for Upscaling.": "Enable AA for Upscaling.", + "Enable ADetailer": "Enable ADetailer", + "Enable Ancestral ETA scheduling": "Enable Ancestral ETA scheduling", + "Enable AnimateDiff": "Enable AnimateDiff", + "Enable Anti Burn (and everything)": "Enable Anti Burn (and everything)", + "Enable Autocomplete": "Enable Autocomplete", + "Enable Autopruning": "Enable Autopruning", + "Enable batch mode": "Enable batch mode", + "Enable Bilingual Localization": "啟用雙語翻譯對照", + "Enable CFG-Based guidance": "Enable CFG-Based guidance", + "Enable checkpoint scheduling": "Enable checkpoint scheduling", + "Enable clear gallery button in txt2img and img2img tabs": "Enable clear gallery button in txt2img and img2img tabs", + "Enable CLIP skip scheduling": "Enable CLIP skip scheduling", + "Enable color correction": "Enable color correction", + "Enable controlnet tile resample": "Enable controlnet tile resample", + "enabled": "enabled", + "Enable Dev mode - adds extra reporting in console": "Enable Dev mode - adds extra reporting in console", + "Enable Devtools Log": "Enable Devtools Log", + "Enable during hires. fix": "Enable during hires. fix", + "Enable Dynamic Thresholding (CFG Scale Fix)": "Enable Dynamic Thresholding (CFG Scale Fix)", + "Enable extension": "Enable extension", + "Enable extra network tweaks": "Enable extra network tweaks", + "Enable extras": "Enable extras", + "Enable Face Prompt": "Enable Face Prompt", + "Enable feedback during hires. fix": "Enable feedback during hires. fix", + "Enable for SDXL": "Enable for SDXL", + "enable for seamless-tiling of each generated image. Experimental": "enable for seamless-tiling of each generated image. Experimental", + "Enable GroundingDINO": "Enable GroundingDINO", + "Enable guided images mode": "Enable guided images mode", + "Enable Hires. fix+": "Enable Hires. fix+", + "Enable Hypertile U-Net": "Enable Hypertile U-Net", + "Enable Hypertile U-Net for hires fix second pass": "Enable Hypertile U-Net for hires fix second pass", + "Enable Hypertile VAE": "Enable Hypertile VAE", + "Enable IPEX Optimize for Intel GPUs": "Enable IPEX Optimize for Intel GPUs", + "Enable JavaScript aspect ratio controls": "Enable JavaScript aspect ratio controls", + "Enable Maintenance tab": "Enable Maintenance tab", + "Enable model compile (experimental)": "Enable model compile (experimental)", + "Enable Model Mixer": "Enable Model Mixer", + "Enable MultiDiffusion": "Enable MultiDiffusion", + "Enable neutral-prompt extension": "Enable neutral-prompt extension", + "Enable noise multiplier scheduling": "Enable noise multiplier scheduling", + "Enable optimized monocular depth estimation": "Enable optimized monocular depth estimation", + "Enable Or Disable Style Selector": "Enable Or Disable Style Selector", + "Enable overwrite": "Enable overwrite", + "Enable perspective flip": "Enable perspective flip", + "Enable Pixel Perfect from lllyasviel. Configure your target width and height on txt2img/img2img default panel before preview if you wish to enable pixel perfect.": "Enable Pixel Perfect from lllyasviel. Configure your target width and height on txt2img/img2img default panel before preview if you wish to enable pixel perfect.", + "Enable quantization in K samplers for sharper and cleaner results": "Enable quantization in K samplers for sharper and cleaner results", + "Enable Randomize extension": "Enable Randomize extension", + "Enable Refiner": "Enable Refiner", + "Enable Region 1": "Enable Region 1", + "Enable Region 2": "Enable Region 2", + "Enable Region 3": "Enable Region 3", + "Enable Region 4": "Enable Region 4", + "Enable Region 5": "Enable Region 5", + "Enable Region 6": "Enable Region 6", + "Enable Region 7": "Enable Region 7", + "Enable Region 8": "Enable Region 8", + "enables 2D mode functions to simulate faux 3D movement": "enables 2D mode functions to simulate faux 3D movement", + "Enable sampler scheduling": "啟用取樣器排程", + "Enable sd-webui-comfyui extension": "Enable sd-webui-comfyui extension", + "(enables hypertile for all modes, including hires fix second pass; noticeable change in details of the generated picture)": "(enables hypertile for all modes, including hires fix second pass; noticeable change in details of the generated picture)", + "(enables hypertile just for hires fix second pass - regardless of whether the above setting is enabled)": "(enables hypertile just for hires fix second pass - regardless of whether the above setting is enabled)", + "Enable steps scheduling": "Enable steps scheduling", + "Enable Style Selector": "Enable Style Selector", + "enable Style Selector by default": "enable Style Selector by default", + "Enable Subseed scheduling": "Enable Subseed scheduling", + "Enable tensorboard logging": "Enable tensorboard logging", + "Enable this to generate model with fp16 precision. Results in a smaller checkpoint with minimal loss in quality.": "Enable this to generate model with fp16 precision. Results in a smaller checkpoint with minimal loss in quality.", + "Enable this to save VRAM.": "Enable this to save VRAM.", + "Enable thumbnail tooltips": "Enable thumbnail tooltips", + "Enable Tiled Diffusion": "Enable Tiled Diffusion", + "Enable Tiled VAE": "Enable Tiled VAE", + "Enable Token Merging (faster, less VRAM, less accurate)": "Enable Token Merging (faster, less VRAM, less accurate)", + "Enable to Store file in object storage that supports the s3 protocol": "Enable to Store file in object storage that supports the s3 protocol", + "enable to trigger webui's face restoration on each frame during the generation": "enable to trigger webui's face restoration on each frame during the generation", + "Enable uploading manually created mask to SAM.": "Enable uploading manually created mask to SAM.", + "Enable upscale with extras": "Enable upscale with extras", + "Enable Vectorizing": "Enable Vectorizing", + "Enable webcam": "Enable webcam", + "Enable/When": "Enable/When", + "Enabling this will provide better results and editability, but cost more VRAM.": "Enabling this will provide better results and editability, but cost more VRAM.", + "Enabling this will save the EMA unet weights as the 'normal' model weights and ignore the regular unet weights.": "Enabling this will save the EMA unet weights as the 'normal' model weights and ignore the regular unet weights.", + "End blur width": "End blur width", + "Ending Control Step schedule": "Ending Control Step schedule", + "End of prompt": "End of prompt", + "End Page": "尾頁", + "end the animation at this frame number": "end the animation at this frame number", + "Engages hybrid compositing of video into animation in various ways with comp alpha as a master mix control.": "Engages hybrid compositing of video into animation in various ways with comp alpha as a master mix control.", + "Engine": "Engine", + "Enhance": "Enhance", + "Enhanced img2img": "Enhanced img2img", + "Enqueue": "Enqueue", + "enter any additional notes": "enter any additional notes", + "Enter categody ids, separated by +. For example, if you want bed+person, your input should be 7+12 for ade20k and 59+0 for coco.": "Enter categody ids, separated by +. For example, if you want bed+person, your input should be 7+12 for ade20k and 59+0 for coco.", + "Enter category IDs": "Enter category IDs", + "Enter file name to save": "Enter file name to save", + "Enter input path": "Enter input path", + "Enter output path": "Enter output path", + "Enter prompts (one line for one trigger words)": "Enter prompts (one line for one trigger words)", + "Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%09d.png', just replace 20230124234916 with your batch ID. The %05d is important, don't forget it!": "Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%09d.png', just replace 20230124234916 with your batch ID. The %05d is important, don't forget it!", + "Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%09d.png', just replace 20230124234916 with your batch ID. The %09d is important, don't forget it!": "Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%09d.png', just replace 20230124234916 with your batch ID. The %09d is important, don't forget it!", + "enter username for submission": "enter username for submission", + "Enter your custom name.": "Enter your custom name.", + "Enter your prompt word (trigger word/prompt/negative prompt)": "Enter your prompt word (trigger word/prompt/negative prompt)", + "Enter your trigger word. EX: character_name_\\(title of novel\\)": "Enter your trigger word. EX: character_name_\\(title of novel\\)", + "Entire Caption": "Entire Caption", + "Equirectangular projection": "Equirectangular projection", + "Erase BG": "Erase BG", + "error": "error", + "Error Correction Level": "Error Correction Level", + "Escape brackets": "Escape brackets", + "eta (noise multiplier) for ancestral samplers": "eta (noise multiplier) for ancestral samplers", + "eta (noise multiplier) for DDIM": "eta (noise multiplier) for DDIM", + "etc": "etc", + "Every prompt in Batch Will Have Random Style": "Every prompt in Batch Will Have Random Style", + "EX: ": "EX: ", + "ex A.": "ex A.", + "Examine the instance and class images and report any instance images without corresponding class images.": "Examine the instance and class images and report any instance images without corresponding class images.", + "Example: Default args should use 221 as the total keyframes.": "Example: Default args should use 221 as the total keyframes.", + "Example: Default args should use 221 as total keyframes.": "Example: Default args should use 221 as total keyframes.", + "Example flow:": "Example flow:", + "Examples": "Examples", + "Example: seed_schedule could use 0:(5), 1:(-1), 219:(-1), 220:(5)": "Example: seed_schedule could use 0:(5), 1:(-1), 219:(-1), 220:(5)", + "Example: seed_schedule could use 0:(s), 1:(-1), \"max_f-2\":(-1), \"max_f-1\":(s)": "Example: seed_schedule could use 0:(s), 1:(-1), \"max_f-2\":(-1), \"max_f-1\":(s)", + "Example: strength_schedule could use 0:(0.25 * cos((72 / 60 * 3.141 * (t + 0) / 30))**13 + 0.7) to make alternating changes each 30 frames": "Example: strength_schedule could use 0:(0.25 * cos((72 / 60 * 3.141 * (t + 0) / 30))**13 + 0.7) to make alternating changes each 30 frames", + "ex B.": "ex B.", + "ex C.": "ex C.", + "Excluded Tag confidences": "Excluded Tag confidences", + "Excluded tags": "Excluded tags", + "Exclude tag, ..": "Exclude tag, ..", + "Exclude tags (split by comma)": "Exclude tags (split by comma)", + "Exclude Target (e.g., finger, book)": "Exclude Target (e.g., finger, book)", + "exclusion": "排除", + "Excudes (split by comma)": "Excudes (split by comma)", + "exif_data": "exif_data", + "exif keyword": "exif 關鍵字", + "EXIF keyword search": "EXIF keyword search", + "Exit ⭕": "Exit ⭕", + "Expand accordion by default": "Expand accordion by default", + "Expanded Mask": "Expanded Mask", + "Expand Mask": "Expand Mask", + "Expand Mask Iterations": "Expand Mask Iterations", + "Expand mask region": "Expand mask region", + "(Experimental, keep cond caches across jobs, reduce overhead.)": "(Experimental, keep cond caches across jobs, reduce overhead.)", + "EXPERIMENTAL: LoRA Shared Diffusers Source": "EXPERIMENTAL: LoRA Shared Diffusers Source", + "Experimental Shared Source:": "Experimental Shared Source:", + "Experimental Shared Src": "Experimental Shared Src", + "Explanation of Each Parameter": "Explanation of Each Parameter", + "Export": "Export", + "Export Default Engine": "Export Default Engine", + "Exported Text": "Exported Text", + "Export Engine": "Export Engine", + "Export type": "Export type", + "ExposureOffset": "ExposureOffset", + "expressionism": "expressionism", + "extcrop": "extcrop", + "Extended prompt guide (jp)": "Extended prompt guide (jp)", + "extension to be installed.": "extension to be installed.", + "extension to be installed, but will work with it disabled.)": "extension to be installed, but will work with it disabled.)", + "extensive": "extensive", + "extfull": "extfull", + "Extra args": "額外參數", + "Extra arguments": "Extra arguments", + "Extra arguments for trtexec command in plain text form": "Extra arguments for trtexec command in plain text form", + "Extra .cni file": "Extra .cni file", + "Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab.": "Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab.", + "Extract frames from the original video.": "Extract frames from the original video.", + "Extract from frame": "Extract from frame", + "Extract nth frame": "Extract nth frame", + "Extract to frame": "Extract to frame", + "Extract U-Net features": "Extract U-Net features", + "Extra filename": "Extra filename", + "Extra network card height": "Extra network card height", + "Extra network card width": "Extra network card width", + "Extra paths to scan for LoRA models, comma-separated. Paths containing commas must be enclosed in double quotes. In the path, \" (one quote) must be replaced by \"\" (two quotes).": "掃描 LoRA模型的額外目錄,以逗號分隔。包含逗號的路徑必須用雙引號括起來。在路徑中,一個引號「\"」必須替換為「\"\"」兩個引號。", + "Extra steps": "Extra steps", + "Extra text to add before <...> when adding extra network to prompt": "Extra text to add before <...> when adding extra network to prompt", + "Extremely slow (OpenCV + FaceMesh)": "Extremely slow (OpenCV + FaceMesh)", + "Face": "Face", + "Face Area Magnification": "Face Area Magnification", + "Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image.": "Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image.", + "Face Checkpoint (precedence over reference face)": "Face Checkpoint (precedence over reference face)", + "Face Crop option": "Face Crop option", + "Face Crop Resolution": "Face Crop Resolution", + "Face Denoising Strength": "Face Denoising Strength", + "Face detection confidence": "Face detection confidence", + "Face Detection Method": "Face Detection Method", + "Face Editor": "Face Editor", + "Face margin": "Face margin", + "face model resolution": "face model resolution", + "Face Restore Model": "面部修復模型", + "FaceSwapLab FaceSwap Model": "FaceSwapLab FaceSwap Model", + "Fairy Tale": "Fairy Tale", + "False": "False", + "fantasy": "fantasy", + "Fantasy Art": "Fantasy Art", + "Far clip": "Far clip", + "Far schedule": "Far schedule", + "fashion": "fashion", + "fast": "fast", + "Fast Encoder Color Fix": "Fast Encoder Color Fix", + "faster": "faster", + "Fast PNG Info ⚡": "Fast PNG Info ⚡", + "fauvism": "fauvism", + "favorites": "favorites", + "Favorites": "收藏夾", + "Favorites path from settings: log/images": "Favorites path from settings: log/images", + "Fax": "Fax", + "Feedback end": "Feedback end", + "Feedback start": "Feedback start", + "Feedback Strength": "Feedback Strength", + "FEMALE": "FEMALE", + "Fetch output folder from a1111 settings (Reload needed to enable)": "Fetch output folder from a1111 settings (Reload needed to enable)", + "Fetch previews for existing models": "Fetch previews for existing models", + "ffmpeg binary. Only set this if it fails otherwise.": "ffmpeg binary. Only set this if it fails otherwise.", + "FFmpeg CRF value": "FFmpeg CRF value", + "FFmpeg path/ location": "FFmpeg path/ location", + "FFmpeg Preset": "FFmpeg Preset", + "FFmpeg settings": "FFmpeg settings", + "Fields managed in your Parseq manifest override the values and schedules set in other parts of this UI. You can select which values to override by using the \"Managed Fields\" section in Parseq.": "Fields managed in your Parseq manifest override the values and schedules set in other parts of this UI. You can select which values to override by using the \"Managed Fields\" section in Parseq.", + "Fighting Game": "Fighting Game", + "figurativism": "figurativism", + "File:": "File:", + "File extensions": "File extensions", + "file_from": "file_from", + "file_name": "file_name", + "filename": "filename", + "File Name": "檔案名", + "Filename keyword search": "Filename keyword search", + "Filename of auto save model": "Filename of auto save model", + "filename(option)": "filename(option)", + "File name or path": "File name or path", + "Files in the output directory may be overwritten.": "Files in the output directory may be overwritten.", + "File size:": "File size:", + "Files to process": "Files to process", + "File system": "File system", + "file_to": "file_to", + "File type": "File type", + "fill down": "fill down", + "fill left": "fill left", + "fill right": "fill right", + "fill up": "fill up", + "Fill value used when Padding is set to constant": "Fill value used when Padding is set to constant", + "Film Noir": "Film Noir", + "Filter base model:": "Filter base model:", + "Filter Base Model:": "Filter Base Model:", + "Filter by Selection": "Filter by Selection", + "Filter by Tags": "Filter by Tags", + "Filter Images": "Filter Images", + "Filter Images by Tags": "Filter Images by Tags", + "filter item": "filter item", + "Filter Logic": "Filter Logic", + "Filter models by path name": "模型列表將僅顯示此路徑下的模型", + "Filter NSFW content": "Filter NSFW content", + "Filter out following properties (comma seperated). Example film grain, purple, cat": "Filter out following properties (comma seperated). Example film grain, purple, cat", + "filter value": "filter value", + "Filter values": "Filter values", + "find but bo translated:": "find but bo translated:", + "Find your cozy spot on Auto1111's webui": "Find your cozy spot on Auto1111's webui", + "First": "First", + "First face of batches sources will be extracted and used as input (or blended if blend is activated).": "First face of batches sources will be extracted and used as input (or blended if blend is activated).", + "First frame as init image": "First frame as init image", + "first frame of img2img result": "first frame of img2img result", + "FirstGen": "FirstGen", + "First inference involves compilation of the model for best performance.": "First inference involves compilation of the model for best performance.", + "First Name": "First Name", + "First Page": "首頁", + "Firstpass height": "Firstpass height", + "Firstpass width": "Firstpass width", + "First upscaler": "First upscaler", + "Fit video length": "Fit video length", + "Fix broken CLIP position IDs": "Fix broken CLIP position IDs", + "Fix clip": "Fix clip", + "Flat Papercut": "Flat Papercut", + "Flip": "Flip", + "Flow consistency mask": "Flow consistency mask", + "Flow factor schedule": "Flow factor schedule", + "Flow method": "Flow method", + "folder_from": "folder_from", + "Folder name where output images will be saved": "Folder name where output images will be saved", + "folder_to": "folder_to", + "Folder to Save Generation Images": "Folder to Save Generation Images", + "Follow installation instructions here": "Follow installation instructions here", + "Font color": "Font color", + "Font size": "Font size", + "Food Photography": "Food Photography", + "Foot Size": "Foot Size", + "For -1 seed, all frames in a GIF have common seed": "For -1 seed, all frames in a GIF have common seed", + "For accurate performance measurements, it is recommended to exclude this slower first inference, as it doesn't reflect normal running time.": "For accurate performance measurements, it is recommended to exclude this slower first inference, as it doesn't reflect normal running time.", + "for ade20k and": "for ade20k and", + "For advanced keyframing with Math functions, see": "若欲進階使用數學函數進行關鍵幀動畫,請參考:", + "For advanced users, you can create a permanent file in \\OneButtonPrompt\\userfiles\\ called antilist.csv": "For advanced users, you can create a permanent file in \\OneButtonPrompt\\userfiles\\ called antilist.csv", + "for backward compatibility. uses the formula: `width/height`": "for backward compatibility. uses the formula: `width/height`", + "For best prompts with Stable Diffusion 1.* choose the": "For best prompts with Stable Diffusion 1.* choose the", + "For best prompts with Stable Diffusion 2.* choose the": "For best prompts with Stable Diffusion 2.* choose the", + "force all frames to be in grayscale": "force all frames to be in grayscale", + "Force convert half to float on interpolation (for some platforms)": "在插值時強制將一半賺換為浮點(對於某些平台)", + "Force image gallery to use temporary files": "Force image gallery to use temporary files", + "Force Rebuild.": "Force Rebuild.", + "Force Reset": "Force Reset", + "Force touch mode": "Force touch mode", + "for coco to get category->id map. Note that coco jumps some numbers, so the actual ID is line_number - 21.": "for coco to get category->id map. Note that coco jumps some numbers, so the actual ID is line_number - 21.", + "for depth map.": "for depth map.", + "Forearm": "Forearm", + "Foreground": "Foreground", + "Foreground Transparency": "Foreground Transparency", + "for explanation of each parameter. If you still cannot understand, use default.": "for explanation of each parameter. If you still cannot understand, use default.", + "FOR HELP CLICK HERE": "需要幫助請點擊這裡", + "for how many frames the same seed should stick before iterating to the next one": "for how many frames the same seed should stick before iterating to the next one", + "For image processing do exactly the amount of steps as specified": "For image processing do exactly the amount of steps as specified", + "Format": "格式", + "format: http://127.0.0.1:port": "format: http://127.0.0.1:port", + "For more details, and functionality, see the documentation (coming soon)": "For more details, and functionality, see the documentation (coming soon)", + "for more detials": "for more detials", + "for more info/ a Guide.": "for more info/ a Guide.", + "For more information, please visit the TensorRT Extension GitHub page": "For more information, please visit the TensorRT Extension GitHub page", + "For negative prompts, please write your positive prompt, then --neg ugly, text, assymetric, or any other negative tokens of your choice. OR:": "For negative prompts, please write your positive prompt, then --neg ugly, text, assymetric, or any other negative tokens of your choice. OR:", + "(for small sets of custom tags)": "(for small sets of custom tags)", + "for supported extensions)": "for supported extensions)", + "for your animation (leave blank to ignore).": "for your animation (leave blank to ignore).", + "Found a bug or want to ask for a feature ? Please": "Found a bug or want to ask for a feature ? Please", + "Found a bug or want to ask for a feature ? Please use": "Found a bug or want to ask for a feature ? Please use", + "Found a bug or want to ask for a feature ? Please use ": "Found a bug or want to ask for a feature ? Please use ", + "Found tags": "Found tags", + "FOV schedule": "FOV schedule", + "fp32": "fp32", + "FP8 weight": "FP8 weight", + "fps": "fps", + "fps: ": "fps: ", + "FPS": "FPS", + "(fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation)": "(fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation)", + "frame extracted": "frame extracted", + "Frame Height": "Frame Height", + "Frame Interpolation": "Frame Interpolation", + "Frame Interpolation to smooth out, slow-mo (or both) any video.": "Frame Interpolation to smooth out, slow-mo (or both) any video.", + "Frame Interpolation will *not* run if any of the following are enabled: 'Store frames in ram' / 'Skip video for run all'.": "Frame Interpolation will *not* run if any of the following are enabled: 'Store frames in ram' / 'Skip video for run all'.", + "Framerate": "偵率", + "Frames": "Frames", + "frames per keyframe": "frames per keyframe", + "Frames per second (FPS)": "Frames per second (FPS)", + "Frames Per Wave": "Frames Per Wave", + "Frames to Video": "Frames to Video", + "Frame Width": "Frame Width", + "Frequency": "Frequency", + "(from CivitAI)": "(from CivitAI)", + "From (full path)": "From (full path)", + "full": "full", + "Full page image viewer: enable": "Full page image viewer: enable", + "Full page image viewer: gamepad repeat period": "Full page image viewer: gamepad repeat period", + "Full page image viewer: navigate with gamepad": "Full page image viewer: navigate with gamepad", + "Full page image viewer: show images zoomed in by default": "Full page image viewer: show images zoomed in by default", + "full prompts list in a JSON format. value on left side is the frame number": "full prompts list in a JSON format. value on left side is the frame number", + "Full quality": "Full quality", + "Full res mask": "Full res mask", + "Full res mask padding": "Full res mask padding", + "Gallery": "Gallery", + "Gallery height in _absolute_ percent of your screen (not remaining height)": "Gallery height in _absolute_ percent of your screen (not remaining height)", + "gamma": "gamma", + "Gamma": "Gamma", + "Gap fill technique": "Gap fill technique", + "Gather": "Gather", + "Gen": "Gen", + "Gender of the character": "Gender of the character", + "⚙️ General Settings": "⚙️ General Settings", + "GENERATE": "GENERATE", + "Generate 3D inpainted mesh": "Generate 3D inpainted mesh", + "Generate 3D inpainted mesh. (Sloooow, required for generating videos)": "Generate 3D inpainted mesh. (Sloooow, required for generating videos)", + "Generate 4 demo videos with 3D inpainted mesh.": "Generate 4 demo videos with 3D inpainted mesh.", + "Generate a checkpoint at the current training level.": "Generate a checkpoint at the current training level.", + "Generate a checkpoint at the current training lvel.": "Generate a checkpoint at the current training lvel.", + "Generate all checkpoint preview": "Generate all checkpoint preview", + "Generate all checkpoint preview after train finished": "Generate all checkpoint preview after train finished", + "Generate All Styles In Order": "Generate All Styles In Order", + "Generate a mask image.": "Generate a mask image.", + "Generate Batch": "Generate Batch", + "Generate Caption": "Generate Caption", + "Generated psd file": "Generated psd file", + "Generated video": "Generated video", + "Generate .ebs file.(ebsynth project file)": "Generate .ebs file.(ebsynth project file)", + "Generate graphs from training logs showing learning rate and loss averages over the course of training.": "Generate graphs from training logs showing learning rate and loss averages over the course of training.", + "Generate HeatMap": "Generate HeatMap", + "Generate human masks": "Generate human masks", + "Generate Info": "產生資訊", + "Generate inputframes": "Generate inputframes", + "Generate layout for batch process": "Generate layout for batch process", + "Generate layout for single image": "Generate layout for single image", + "Generate me some prompts!": "Generate me some prompts!", + "Generate Movie Mode": "Generate Movie Mode", + "Generate preview every N frames": "Generate preview every N frames", + "Generate preview images every N epochs.": "Generate preview images every N epochs.", + "Generate preview images every N steps.": "Generate preview images every N steps.", + "Generate preview video during generation? (Preview does not include frame interpolation or upscaling.)": "Generate preview video during generation? (Preview does not include frame interpolation or upscaling.)", + "Generate sample images using the currently saved diffusers model.": "Generate sample images using the currently saved diffusers model.", + "Generate simple 3D mesh": "Generate simple 3D mesh", + "Generate simple 3D mesh. (Fast, accurate only with ZoeDepth models and no boost, no custom maps)": "Generate simple 3D mesh. (Fast, accurate only with ZoeDepth models and no boost, no custom maps)", + "Generate stereoscopic image(s)": "Generate stereoscopic image(s)", + "Generate video": "Generate video", + "Generate Video": "產生影片", + "Generate video from inpainted(!) mesh.": "Generate video from inpainted(!) mesh.", + "Generation Info": "Generation Info", + "Generation Parameters": "Generation Parameters", + "Generation settings:": "Generation settings:", + "Generation TEST!!(Ignore Project directory and use the image and mask specified in the main UI)": "Generation TEST!!(Ignore Project directory and use the image and mask specified in the main UI)", + "Generator device": "Generator device", + "Gen Options": "Gen Options", + "Get Civitai Model Info by Model Page URL": "Get Civitai Model Info by Model Page URL", + "*Get depth from uploaded video*": "*Get depth from uploaded video*", + "Get javascript logs": "Get javascript logs", + "Get List": "Get List", + "Get mask": "Get mask", + "Get mask as alpha of image": "Get mask as alpha of image", + "Get Model Info by Civitai Url": "Get Model Info by Civitai Url", + "Get Model Info from Civitai": "Get Model Info from Civitai", + "Get Model Info from Civitai by URL": "Get Model Info from Civitai by URL", + "Get prompt from:": "Get prompt from:", + "Get sub directories": "讀取子目錄", + "Get Tags": "Get Tags", + "Getting Started": "Getting Started", + "GFPGAN as Face enhancer": "GFPGAN as Face enhancer", + "GFPGAN vis.": "GFPGAN vis.", + "github": "github", + "git version info": "git version info", + "Give a similarity score between two images (only first face is compared).": "Give a similarity score between two images (only first face is compared).", + "give it a star on GitHub": "在 GitHub 上給它一顆星星", + "Global-Inpainting (all faces)": "Global-Inpainting (all faces)", + "Global Post-Processing": "Global Post-Processing", + "GLOB patterns (comma separated)": "GLOB patterns (comma separated)", + "Glob recursively with input directory pattern": "Glob recursively with input directory pattern", + "Glow": "Glow", + "Glow mode": "Glow mode", + "Gold Pendant": "Gold Pendant", + "gore": "gore", + "Gothic": "Gothic", + "Go to Settings → User Interface → Quick Settings List, add sd_unet. Apply these settings, then reload the UI.": "Go to Settings → User Interface → Quick Settings List, add sd_unet. Apply these settings, then reload the UI.", + "grad": "grad", + "gradio launch": "gradio launch", + "Gradio theme (requires restart)": "Gradio theme (requires restart)", + "graffiti": "graffiti", + "Graffiti": "Graffiti", + "graphic design": "graphic design", + "gray": "gray", + "Great": "Great", + "greg mode": "greg mode", + "GroundingDINO batch progress status": "GroundingDINO batch progress status", + "GroundingDINO Box Threshold": "GroundingDINO Box Threshold", + "GroundingDINO Detection Prompt": "GroundingDINO Detection Prompt", + "GroundingDINO Model (Auto download from huggingface)": "GroundingDINO Model (Auto download from huggingface)", + "GroundingDINO + Segment Anything can achieve [text prompt]->[object detection]->[segmentation]": "GroundingDINO + Segment Anything can achieve [text prompt]->[object detection]->[segmentation]", + "Group/split table by: (when not started with single quote - so only for prompts, not for merge)": "Group/split table by: (when not started with single quote - so only for prompts, not for merge)", + "Grunge": "Grunge", + "GTA": "GTA", + "Guess Mode": "Guess Mode", + "Guidance End (T)": "Guidance End (T)", + "Guidance scale": "Guidance scale", + "Guidance scale webui": "Guidance scale webui", + "Guidance Start (T)": "Guidance Start (T)", + "Guided Images": "Guided Images", + "Guided images schedules": "Guided images schedules", + "🎓 Guides": "🎓 Guides", + "Hair": "Hair", + "HakuImg": "HakuImg", + "haku_output": "haku_output", + "Hands": "Hands", + "Hand Size": "Hand Size", + "Happy prompting!": "Happy prompting!", + "hard_light": "實光", + "has metadata": "有中繼資料", + "has user metadata": "有使用者中繼資料", + "Hat": "Hat", + "Head Size": "Head Size", + "Height Resolution": "Height Resolution", + "Helper": "Helper", + "[here]": "[here]", + "here": "這裡", + "Hidden SSID": "Hidden SSID", + "Hidden UI tabs (requires restart)": "Hidden UI tabs (requires restart)", + "Hide": "Hide", + "Hide accordion by default": "Hide accordion by default", + "Hide annotator result": "Hide annotator result", + "Hide checked Civitai Helper buttons on model cards": "Hide checked Civitai Helper buttons on model cards", + "Hide early access models": "Hide early access models", + "Hide Non-API Components (Restarts UI)": "Hide Non-API Components (Restarts UI)", + "Hide samplers in user interface (requires restart)": "Hide samplers in user interface (requires restart)", + "Hide sub-folders that start with a .": "Hide sub-folders that start with a .", + "Hide subprompt masks in prompt mode": "Hide subprompt masks in prompt mode", + "high contrast": "high contrast", + "Higher levels increases complexity and randomness of generated prompt": "Higher levels increases complexity and randomness of generated prompt", + "Highest Rated": "Highest Rated", + "Highres. fix": "Highres. fix", + "Hips": "Hips", + "Hires CFG": "Hires CFG", + "hires. fix": "hires. fix", + "hiresfix": "hiresfix", + "Hires. fix+": "Hires. fix+", + "Hires. fix+ to do steps optimization": "Hires. fix+ to do steps optimization", + "Hires Negative prompt": "Hires Negative prompt", + "Hires Prompt": "Hires Prompt", + "Hires Sampling method": "Hires Sampling method", + "hires upscaler": "hires upscaler", + "Historical": "Historical", + "History": "歷史記錄", + "Homepage": "Homepage", + "horizontal": "horizontal", + "horizontal only": "horizontal only", + "horizontal split num": "horizontal split num", + "horror": "horror", + "Horror": "Horror", + "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)": "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)", + "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)`": "how closely the image should conform to the prompt. Lower values produce more creative results. (recommended range 5-15)`", + "how close to get to the colors of the input frame image/ the amount each frame during a tweening step to use the new images colors": "how close to get to the colors of the input frame image/ the amount each frame during a tweening step to use the new images colors", + "How many batches of images to create": "How many batches of images to create", + "How many classification images to use per instance image.": "How many classification images to use per instance image.", + "How many classifier/regularization images to generate at once.": "How many classifier/regularization images to generate at once.", + "How many images to process at once per training step?": "How many images to process at once per training step?", + "How many samples to generate per subject.": "How many samples to generate per subject.", + "How many steps to wait before applying semantic guidance, default 10": "How many steps to wait before applying semantic guidance, default 10", + "How many timesteps to smooth graph data over. A lower value means a more jagged graph with more information, higher value will make things prettier but slightly less accurate.": "How many timesteps to smooth graph data over. A lower value means a more jagged graph with more information, higher value will make things prettier but slightly less accurate.", + "how many times to interpolate the source video. e.g source video fps of 12 and a value of x2 will yield a 24fps interpolated video": "how many times to interpolate the source video. e.g source video fps of 12 and a value of x2 will yield a 24fps interpolated video", + "How much of noise from each octave is added on each iteration. Higher values will make it more straighter and sharper, while lower values will make it rounder and smoother. It is limited by 1.0 as the resulting gain fill the frame completely with noise.": "How much of noise from each octave is added on each iteration. Higher values will make it more straighter and sharper, while lower values will make it rounder and smoother. It is limited by 1.0 as the resulting gain fill the frame completely with noise.", + "how much the image should look like the previou one and new image frame init. strength schedule might be better if this is higher, around .75 during the keyfames you want to switch on": "how much the image should look like the previou one and new image frame init. strength schedule might be better if this is higher, around .75 during the keyfames you want to switch on", + "How should Style Names Rendered on UI": "How should Style Names Rendered on UI", + "How to complete nested wildcard paths": "How to complete nested wildcard paths", + "How to complete nested wildcard paths (e.g. \"hair/colours/light/...\")": "How to complete nested wildcard paths (e.g. \"hair/colours/light/...\")", + "How to handle input image?": "How to handle input image?", + "hue": "hue", + "huge batch query (TF 2.10, experimental)": "huge batch query (TF 2.10, experimental)", + "HuggingFace cache directory, see huggingface_hub guides/manage-cache": "HuggingFace cache directory, see huggingface_hub guides/manage-cache", + "Huggingface token": "Huggingface token", + "humanoid": "humanoid", + "humanoid - A random humanoid, males, females, fantasy types, fictional and non-fictional characters. Can add clothing, features and a bunch of other things.": "humanoid - A random humanoid, males, females, fantasy types, fictional and non-fictional characters. Can add clothing, features and a bunch of other things.", + "Humans Masking": "Humans Masking", + "hybrid_comp_mask_auto_contrast": "hybrid_comp_mask_auto_contrast", + "hybrid_comp_mask_blend_alpha_schedule": "hybrid_comp_mask_blend_alpha_schedule", + "hybrid_comp_mask_type": "hybrid_comp_mask_type", + "Hybrid composite": "Hybrid composite", + "Hybrid motion": "Hybrid motion", + "Hybrid motion may be used with hybrid composite off, to just use video motion.": "Hybrid motion may be used with hybrid composite off, to just use video motion.", + "Hybrid Schedules": "Hybrid Schedules", + "Hybrid Settings": "Hybrid Settings", + "Hybrid Video": "混合影片", + "Hybrid Video Compositing in 2D/3D Mode": "Hybrid Video Compositing in 2D/3D Mode", + "Hybrid Video Schedules": "Hybrid Video Schedules", + "Hypertile U-Net max depth": "Hypertile U-Net max depth", + "Hypertile U-Net max tile size": "Hypertile U-Net max tile size", + "Hypertile U-Net swap size": "Hypertile U-Net swap size", + "Hypertile VAE max depth": "Hypertile VAE max depth", + "Hypertile VAE max tile size": "Hypertile VAE max tile size", + "Hypertile VAE swap size": "Hypertile VAE swap size", + "Idea and inspiration by xKean.": "Idea and inspiration by xKean.", + "Idea by redditor jonesaid.": "Idea by redditor jonesaid.", + "Identifer:BlockID:Elements:Ratio,...,separated by empty line": "Identifer:BlockID:Elements:Ratio,...,separated by empty line", + "If a model was extracted or trained with EMA weights, these will be appended separately to the model for use in training later.": "If a model was extracted or trained with EMA weights, these will be appended separately to the model for use in training later.", + "If an image is specified below, it will be used with highest priority.": "If an image is specified below, it will be used with highest priority.", + "If an image is too large, crop it from the center.": "If an image is too large, crop it from the center.", + "If blank or set to 0, parameters in the \"txt2img\" tab are used.": "If blank or set to 0, parameters in the \"txt2img\" tab are used.", + "If checked, the modified area will appear around the edges of the image. If unchecked, the modified area will appear in the center of the image.": "If checked, the modified area will appear around the edges of the image. If unchecked, the modified area will appear in the center of the image.", + "If Deforum crashes due to CN updates, go": "If Deforum crashes due to CN updates, go", + "If EMA weights are saved in a model, these will be extracted instead of the full Unet. Probably not necessary for training or fine-tuning.": "If EMA weights are saved in a model, these will be extracted instead of the full Unet. Probably not necessary for training or fine-tuning.", + "If Empty": "If Empty", + "If enabled, changes the behavior or hybrid_motion to captures motion by comparing the current video frame to the previous rendered image, instead of the previous video frame.": "If enabled, changes the behavior or hybrid_motion to captures motion by comparing the current video frame to the previous rendered image, instead of the previous video frame.", + "If enabled, only images will be saved": "If enabled, only images will be saved", + "if enabled, raw imgs will be deleted after a successful video/ videos (upsacling, interpolation, gif) creation": "if enabled, raw imgs will be deleted after a successful video/ videos (upsacling, interpolation, gif) creation", + "If loading of the Yolov5_anime model fails, check": "If loading of the Yolov5_anime model fails, check", + "If multiple .ebs files are generated, run them all.": "If multiple .ebs files are generated, run them all.", + "If non-deforum frames, use the correct number of counting digits. For files like 'bunnies-0000.jpg', you'd use 'bunnies-%04d.jpg'": "If non-deforum frames, use the correct number of counting digits. For files like 'bunnies-0000.jpg', you'd use 'bunnies-%04d.jpg'", + "If out-* directory already exists in the Project directory, delete it manually before executing.": "If out-* directory already exists in the Project directory, delete it manually before executing.", + "iFrame height": "iFrame height", + "If \"Save a copy of image before doing face restoration.\" is enabled, save every image during rolling generation": "If \"Save a copy of image before doing face restoration.\" is enabled, save every image during rolling generation", + "If setting2prompt width, which width-ratio between both columns (0: minimize setting, 1: 50/50, 6: minimize output gallery column)": "If setting2prompt width, which width-ratio between both columns (0: minimize setting, 1: 50/50, 6: minimize output gallery column)", + "If setting2prompt width, which width-ratio between both columns (0: minimize setting, 1: 50/50, 6: minimize output gallery column)": "If setting2prompt width, which width-ratio between both columns (0: minimize setting, 1: 50/50, 6: minimize output gallery column)", + "If Smart-Step is enabled, the number of iterations for Hires. fix will never be less than this:": "If Smart-Step is enabled, the number of iterations for Hires. fix will never be less than this:", + "If .srt file is saved, soft-embed the subtitles into the rendered video file": "If .srt file is saved, soft-embed the subtitles into the rendered video file", + "If the ControlNet Plugin is enabled, which do you use first?": "If the ControlNet Plugin is enabled, which do you use first?", + "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG": "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG", + "If this option is selected, many extra frames will be output for the various processes into the hybridframes folder.": "If this option is selected, many extra frames will be output for the various processes into the hybridframes folder.", + "If true, instead of sending the flattened image, will send just the currently selected layer.": "If true, instead of sending the flattened image, will send just the currently selected layer.", + "If using a blend mask, this controls the blend amount of the video and render for the composite mask.": "If using a blend mask, this controls the blend amount of the video and render for the composite mask.", + "If using autocontrast option, this is the high cutoff for the operation.": "If using autocontrast option, this is the high cutoff for the operation.", + "If using autocontrast option, this is the low cutoff for the operation.": "If using autocontrast option, this is the low cutoff for the operation.", + "If you do not know the path, try opening the folder in Explorer and copying the path": "If you do not know the path, try opening the folder in Explorer and copying the path", + "If you do not like the selection, you can modify it manually.": "If you do not like the selection, you can modify it manually.", + "If you have already created a background video in Invert Mask Mode([Ebsynth Utility]->[configuration]->[etc]->[Mask Mode]),": "If you have already created a background video in Invert Mask Mode([Ebsynth Utility]->[configuration]->[etc]->[Mask Mode]),", + "If you have any issues please visit": "If you have any issues please visit", + "If you have bad results and don't want to fine-tune here, just enable Codeformer in \"Global Post-Processing\".": "If you have bad results and don't want to fine-tune here, just enable Codeformer in \"Global Post-Processing\".", + "If you have trouble entering the video path manually, you can also use drag and drop.For large videos, please enter the path manually.": "If you have trouble entering the video path manually, you can also use drag and drop.For large videos, please enter the path manually.", + "If you liked this extension, please": "如果您喜歡這個擴展,請", + "(If your wildcard files have a specific custom order, disable this to keep it)": "(If your wildcard files have a specific custom order, disable this to keep it)", + "If you still cannot understand, use default.": "If you still cannot understand, use default.", + "If you want to": "If you want to", + "If you want to use the same tagging results the next time you run img2img, rename the file to prompts.txt": "If you want to use the same tagging results the next time you run img2img, rename the file to prompts.txt", + "If you want to use Width/Height which are not multiples of 64, please change noise_type to 'Uniform', in Keyframes --> Noise.": "如果您想使用非 64 的倍數作為寬度/高度,請在關鍵幀 --> 噪音中將 噪音類型(noise_type) 設置為 '均勻分佈(Uniform)'。", + "Ignore black area": "Ignore black area", + "Ignore emphasis": "Ignore emphasis", + "Ignore faces larger than specified size": "Ignore faces larger than specified size", + "Ignore matching rules when collecting the class dataset. (# of class images must still be configured on the concepts tab).": "Ignore matching rules when collecting the class dataset. (# of class images must still be configured on the concepts tab).", + "Ignore sd-webui grammar": "Ignore sd-webui grammar", + "I know what I am doing.": "我知道我在做什麼。", + "image": "image", + "Image Browser": "圖庫瀏覽器", + "Image Browser Settings": "Image Browser Settings", + "Image Count": "圖像數", + "Image creation progress preview mode": "Image creation progress preview mode", + "Image data view": "Image data view", + "Image Directory": "圖像目錄", + "Image File": "Image File", + "Image for Auto Segmentation": "Image for Auto Segmentation", + "Image for Image Layout": "Image for Image Layout", + "Image for Recognition": "Image for Recognition", + "Image for Segment Anything": "Image for Segment Anything", + "Image Init": "Image Init", + "Image Layout": "Image Layout", + "Image layout status": "Image layout status", + "Image Parameters": "圖像參數", + "Image path": "Image path", + "Image preview height": "圖像預覽高度", + "(Images are output to ": "(Images are output to ", + "Images directory": "圖像目錄", + "Images folder": "Images folder", + "Images path": "Images path", + "Images to use for keyframe guidance": "Images to use for keyframe guidance", + "Image strength schedule": "Image strength schedule", + "Image to 3D": "Image to 3D", + "Image to 3D mesh": "Image to 3D mesh", + "Image to be masked": "Image to be masked", "img2img CFG": "img2img CFG", "img2img denoise strength": "img2img denoise strength", - "adjust denoise each img2img batch": "adjust denoise each img2img batch", + "img2img-grids": "img2img-網格", + "img2img history": "img2img history", + "Img2Img input will be used if no ControlNet input is specified.": "Img2Img input will be used if no ControlNet input is specified.", + "img2img keyframes.": "img2img keyframes.", "img2img model to use": "img2img model to use", - "img2img sampler": "img2img sampler", - "img2img upscaler": "img2img upscaler", - "img2img scale": "img2img scale", "img2img padding": "img2img padding", - "Use Ultimate SD Upscale script instead": "Use Ultimate SD Upscale script instead", - "This requires the Ultimate SD Upscale extension, install this if you haven't": "This requires the Ultimate SD Upscale extension, install this if you haven't", - "tile width": "tile width", - "tile height": "tile height", - "Type": "類型", - "Seams fix": "儲存接縫修復圖像", + "Img2Img Repeat Count (Loop Back)": "Img2Img Repeat Count (Loop Back)", + "img2img sampler": "img2img sampler", + "img2img Sampling steps": "img2img Sampling steps", + "img2img scale": "img2img scale", + "Img2Img Settings:": "Img2Img Settings:", + "IMG2IMG upscale": "IMG2IMG upscale", + "img2img upscaler": "img2img upscaler", + "Import": "Import", + "Import a model from Huggingface.co instead of using a local checkpoint.": "Import a model from Huggingface.co instead of using a local checkpoint.", + "Import a model from Huggingface.co instead of using a local checkpoint. Hub model MUST contain diffusion weights. You can specify a local folder with a cloned model, no HF token will be needed in this case.": "Import a model from Huggingface.co instead of using a local checkpoint. Hub model MUST contain diffusion weights. You can specify a local folder with a cloned model, no HF token will be needed in this case.", + "⚠️ Important info, please read carefully": "⚠️ Important info, please read carefully", + "Important notes:": "Important notes:", + "Important Notes:": "Important Notes:", + "Important notes and Help": "Important notes and Help", + "*Important* notes on Prompts": "*Important* notes on Prompts", + "import completed!": "import completed!", + "import gradio": "import gradio", + "Import image": "Import image", + "import ldm": "import ldm", + "Import Model from Huggingface Hub": "Import Model from Huggingface Hub", + "Import settings from file": "Import settings from file", + "import sgm": "import sgm", + "import torch": "import torch", + "impressionism": "impressionism", + "Impressionist": "Impressionist", + "(in %)": "(in %)", + "IN00": "IN00", + "IN01": "IN01", + "IN02": "IN02", + "IN03": "IN03", + "IN04": "IN04", + "IN05": "IN05", + "IN06": "IN06", + "IN07": "IN07", + "IN08": "IN08", + "IN09": "IN09", + "IN10": "IN10", + "IN11": "IN11", + "IN_A_00": "IN_A_00", + "IN_A_01": "IN_A_01", + "IN_A_02": "IN_A_02", + "IN_A_03": "IN_A_03", + "IN_A_04": "IN_A_04", + "IN_A_05": "IN_A_05", + "IN_A_06": "IN_A_06", + "IN_A_07": "IN_A_07", + "IN_A_08": "IN_A_08", + "IN_A_09": "IN_A_09", + "IN_A_10": "IN_A_10", + "IN_A_11": "IN_A_11", + "IN_B_00": "IN_B_00", + "IN_B_01": "IN_B_01", + "IN_B_02": "IN_B_02", + "IN_B_03": "IN_B_03", + "IN_B_04": "IN_B_04", + "IN_B_05": "IN_B_05", + "IN_B_06": "IN_B_06", + "IN_B_07": "IN_B_07", + "IN_B_08": "IN_B_08", + "IN_B_09": "IN_B_09", + "IN_B_10": "IN_B_10", + "IN_B_11": "IN_B_11", + "Include": "Include", + "Include confident of tags matches in results": "Include confident of tags matches in results", + "Include embeddings in normal tag results": "Include embeddings in normal tag results", + "Include images in sub directories": "包含子目錄的圖像", + "Include metadata in saved images": "Include metadata in saved images", + "Include resource hashes in image metadata (for resource auto-detection on Civitai)": "Include resource hashes in image metadata (for resource auto-detection on Civitai)", + "Include watermark in saved images": "Include watermark in saved images", + "Incremental/decremental percentage (-50%, +50%)": "增減百分比(-50%,+50%)", + "Index": "索引", + "Info and links": "Info and links", + "Info & Help": "Info & Help", + "Info, Links and Help": "資訊、連結與幫助", + "Info object": "Info object", + "Information": "Information", + "Information, comment and share @": "Information, comment and share @", + "Infotext is what this software calls the text that contains generation parameters and can be used to generate the same picture again.\nIt is displayed in UI below the image. To use infotext, paste it into the prompt and click the ↙️ paste button.": "Infotext is what this software calls the text that contains generation parameters and can be used to generate the same picture again.\nIt is displayed in UI below the image. To use infotext, paste it into the prompt and click the ↙️ paste button.", + "In FPS": "In FPS", + "In Frame Count": "In Frame Count", + "In \"Interpolate existing pics\" mode, FPS is determined *only* by output FPS slider. Audio will be added if requested even with slow-mo \"enabled\", as it does *nothing* in this mode.": "In \"Interpolate existing pics\" mode, FPS is determined *only* by output FPS slider. Audio will be added if requested even with slow-mo \"enabled\", as it does *nothing* in this mode.", + "Init": "初始化", + "initial": "initial", + "Initial denoising strength": "Initial denoising strength", + "Initial generated image number": "Initial generated image number", + "initialize extra networks": "initialize extra networks", + "initialize shared": "initialize shared", + "Initial seed": "Initial seed", + "initial startup": "initial startup", + "Initiates extraction of video frames from your video_init_path to the inputframes folder. You only need to do this once and then you can change it to False and re-render": "Initiates extraction of video frames from your video_init_path to the inputframes folder. You only need to do this once and then you can change it to False and re-render", + "Init image": "Init image", + "Init image box": "Init image box", + "Init image URL": "Init image URL", + "Init tab's strength slider should be greater than 0. Recommended value (.65 - .80).": "Init tab's strength slider should be greater than 0. Recommended value (.65 - .80).", + "Init Video": "Init Video", + "In Keyframes tab, you can also set": "In Keyframes tab, you can also set", + "in <>, like , ": "in <>, like , ", + "Inner Fit (Scale to Fit)": "Inner Fit (Scale to Fit)", + "InOutPaint": "InOutPaint", + "➠ Inpaint": "➠ Inpaint", + "Inpaint Area(Override img2img Inpaint area)": "Inpaint Area(Override img2img Inpaint area)", + "Inpaint at full resolution 2nd": "Inpaint at full resolution 2nd", + "Inpaint at full resolution 3rd": "Inpaint at full resolution 3rd", + "Inpaint at full resolution 4th": "Inpaint at full resolution 4th", + "Inpaint at full resolution 5th": "Inpaint at full resolution 5th", + "Inpaint at full resolution padding, pixels 2nd": "Inpaint at full resolution padding, pixels 2nd", + "Inpaint at full resolution padding, pixels 3rd": "Inpaint at full resolution padding, pixels 3rd", + "Inpaint at full resolution padding, pixels 4th": "Inpaint at full resolution padding, pixels 4th", + "Inpaint at full resolution padding, pixels 5th": "Inpaint at full resolution padding, pixels 5th", + "Inpaint denoising strength": "Inpaint denoising strength", + "Inpaint denoising strength 10th": "Inpaint denoising strength 10th", + "Inpaint denoising strength 2nd": "Inpaint denoising strength 2nd", + "Inpaint denoising strength 3rd": "Inpaint denoising strength 3rd", + "Inpaint denoising strength 4th": "Inpaint denoising strength 4th", + "Inpaint denoising strength 5th": "Inpaint denoising strength 5th", + "Inpaint denoising strength 6th": "Inpaint denoising strength 6th", + "Inpaint denoising strength 7th": "Inpaint denoising strength 7th", + "Inpaint denoising strength 8th": "Inpaint denoising strength 8th", + "Inpaint denoising strength 9th": "Inpaint denoising strength 9th", + "inpaint_global_harmonious": "inpaint_global_harmonious", + "inpaint height": "inpaint height", + "Inpaint+ Height": "Inpaint+ Height", + "inpaint height 10th": "inpaint height 10th", + "inpaint height 2nd": "inpaint height 2nd", + "inpaint height 3rd": "inpaint height 3rd", + "inpaint height 4th": "inpaint height 4th", + "inpaint height 5th": "inpaint height 5th", + "inpaint height 6th": "inpaint height 6th", + "inpaint height 7th": "inpaint height 7th", + "inpaint height 8th": "inpaint height 8th", + "inpaint height 9th": "inpaint height 9th", + "inpainting": "inpainting", + "Inpainting": "Inpainting", + "Inpainting Model ID": "Inpainting Model ID", + "Inpainting Model ID webui": "Inpainting Model ID webui", + "Inpainting negative prompt use [gender] instead of men or woman": "Inpainting negative prompt use [gender] instead of men or woman", + "Inpainting Prompt": "Inpainting Prompt", + "Inpainting prompt use [gender] instead of men or woman": "Inpainting prompt use [gender] instead of men or woman", + "Inpainting Sampler": "Inpainting Sampler", + "Inpainting seed": "Inpainting seed", + "Inpainting sends image to inpainting with a mask on face (once for each faces).": "Inpainting sends image to inpainting with a mask on face (once for each faces).", + "Inpainting steps": "Inpainting steps", + "Inpainting webui": "Inpainting webui", + "Inpaint mask blur": "Inpaint mask blur", + "Inpaint mask blur 10th": "Inpaint mask blur 10th", + "Inpaint mask blur 2nd": "Inpaint mask blur 2nd", + "Inpaint mask blur 3rd": "Inpaint mask blur 3rd", + "Inpaint mask blur 4th": "Inpaint mask blur 4th", + "Inpaint mask blur 5th": "Inpaint mask blur 5th", + "Inpaint mask blur 6th": "Inpaint mask blur 6th", + "Inpaint mask blur 7th": "Inpaint mask blur 7th", + "Inpaint mask blur 8th": "Inpaint mask blur 8th", + "Inpaint mask blur 9th": "Inpaint mask blur 9th", + "Inpaint+ mode guide": "Inpaint+ mode guide", + "inpaint_only": "inpaint_only", + "inpaint_only+lama": "inpaint_only+lama", + "Inpaint only masked": "Inpaint only masked", + "Inpaint only masked 10th": "Inpaint only masked 10th", + "Inpaint only masked 2nd": "Inpaint only masked 2nd", + "Inpaint only masked 3rd": "Inpaint only masked 3rd", + "Inpaint only masked 4th": "Inpaint only masked 4th", + "Inpaint only masked 5th": "Inpaint only masked 5th", + "Inpaint only masked 7th": "Inpaint only masked 7th", + "Inpaint only masked 8th": "Inpaint only masked 8th", + "Inpaint only masked 9th": "Inpaint only masked 9th", + "Inpaint only masked padding, pixels": "Inpaint only masked padding, pixels", + "Inpaint only masked padding, pixels 10th": "Inpaint only masked padding, pixels 10th", + "Inpaint only masked padding, pixels 2nd": "Inpaint only masked padding, pixels 2nd", + "Inpaint only masked padding, pixels 3rd": "Inpaint only masked padding, pixels 3rd", + "Inpaint only masked padding, pixels 4th": "Inpaint only masked padding, pixels 4th", + "Inpaint only masked padding, pixels 5th": "Inpaint only masked padding, pixels 5th", + "Inpaint only masked padding, pixels 6th": "Inpaint only masked padding, pixels 6th", + "Inpaint only masked padding, pixels 7th": "Inpaint only masked padding, pixels 7th", + "Inpaint only masked padding, pixels 8th": "Inpaint only masked padding, pixels 8th", + "Inpaint only masked padding, pixels 9th": "Inpaint only masked padding, pixels 9th", + "Inpaint selection": "Inpaint selection", + "➠ Inpaint sketch": "➠ Inpaint sketch", + "inpaint width": "inpaint width", + "Inpaint+ Width": "Inpaint+ Width", + "inpaint width 10th": "inpaint width 10th", + "inpaint width 2nd": "inpaint width 2nd", + "inpaint width 3rd": "inpaint width 3rd", + "inpaint width 4th": "inpaint width 4th", + "inpaint width 5th": "inpaint width 5th", + "inpaint width 6th": "inpaint width 6th", + "inpaint width 7th": "inpaint width 7th", + "inpaint width 8th": "inpaint width 8th", + "inpaint width 9th": "inpaint width 9th", + "Input audio": "Input audio", + "Input directory - See also settings tab.": "Input directory - See also settings tab.", + "Input directory - To recurse use ** or */* in your glob; also check the settings tab.": "Input directory - To recurse use ** or */* in your glob; also check the settings tab.", + "Input file path": "Input file path", + "Input Folder": "Input Folder", + "Input image": "Input image", + "Input Image": "Input Image", + "Input image here": "Input image here", + "Input images directory": "輸入圖像目錄", + "Input Mesh (.ply | .obj)": "Input Mesh (.ply | .obj)", + "Input <= Output": "Input <= Output", + "Input path": "Input path", + "Input Video": "Input Video", + "In queue...": "In queue...", + "In Res": "In Res", + "Insert [\\Author Name] & [\\Model Name] & [\\Model Name\\Version Name] as sub folder options": "Insert [\\Author Name] & [\\Model Name] & [\\Model Name\\Version Name] as sub folder options", + "Insert [/Model Name] & [/Model Name/Version Name] as default sub folder options": "Insert [/Model Name] & [/Model Name/Version Name] as default sub folder options", + "Insert [\\Model Name] & [\\Model Name\\Version Name] as sub folder options": "Insert [\\Model Name] & [\\Model Name\\Version Name] as sub folder options", + "Inspect": "Inspect", + "install": "install", + "installation": "installation", + "Installing...": "Installing...", + "Instance prompt": "Instance prompt", + "Instance prompt(Optional)": "Instance prompt(Optional)", + "Instead it will pick up all files in the \\upscale_me\\ folder and upscale them with below settings.": "Instead it will pick up all files in the \\upscale_me\\ folder and upscale them with below settings.", + "instead of Python.": "instead of Python.", + "instructs the handling of pixels outside the field of view as they come into the scene.": "instructs the handling of pixels outside the field of view as they come into the scene.", + "instructs the run to start from a specified point": "instructs the run to start from a specified point", + "integrations": "integrations", + "interactive splines and Bezier curves": "互動式的曲線和貝茲曲線進行關鍵幀的製作。", + "Intermediate files path": "Intermediate files path", + "Intermediate results may look better than the end results. /!\\ Intermediate results are cleaned after each run, save them elsewhere if you want to keep them.": "Intermediate results may look better than the end results. /!\\ Intermediate results are cleaned after each run, save them elsewhere if you want to keep them.", + "Interpolate existing Video/ Images": "Interpolate existing Video/ Images", + "*Interpolate Pics*": "*Interpolate Pics*", + "Interpolate upscaled images, if available": "Interpolate upscaled images, if available", + "*Interpolate Video*": "*Interpolate Video*", + "Interpolation": "插值", + "interpolation method": "interpolation method", + "Interpolation method": "Interpolation method", + "Interprocess communication strategy": "Interprocess communication strategy", + "Interp X": "Interp X", + "Interrogate Deepbooru": "Interrogate Deepbooru", + "Interrogate image": "Interrogate image", + "Interrogate: keep models in VRAM": "Interrogate: keep models in VRAM", + "Interrogate: num_beams for BLIP": "Interrogate: num_beams for BLIP", + "Interrogate Result": "Interrogate Result", + "Interrogate Selected Image": "Interrogate Selected Image", + "Interrogator": "Interrogator", + "Interrogators": "Interrogators", + "Interrogator Settings": "Interrogator Settings", + "interrupt": "interrupt", + "In the filename, '%09d' represents the 9 counting numbers, For '20230124234916_000000001.png', use '20230124234916_%09d.png'": "In the filename, '%09d' represents the 9 counting numbers, For '20230124234916_000000001.png', use '20230124234916_%09d.png'", + "In the main tab, set the subject to humanoids": "In the main tab, set the subject to humanoids", + "In the prefix prompt field then add for example: Art by artistname, 2 people": "In the prefix prompt field then add for example: Art by artistname, 2 people", + "Invert": "Invert", + "Invert (black=near, white=far)": "Invert (black=near, white=far)", + "Invert colors": "Invert colors", + "Invert colors if your image has white background.": "Invert colors if your image has white background.", + "Invert DepthMap": "Invert DepthMap", + "Invert DepthMap (black=near, white=far)": "Invert DepthMap (black=near, white=far)", + "invert (from white bg & black line)": "invert (from white bg & black line)", + "Invert Input Color": "Invert Input Color", + "Invert mask": "Invert mask", + "Invert selection": "Invert selection", + "Invert sort order": "Invert sort order", + "Inverts the composite mask.": "Inverts the composite mask.", + "ip-adapter_clip_sd15": "ip-adapter_clip_sd15", + "ip-adapter_clip_sdxl": "ip-adapter_clip_sdxl", + "IP-Adapter Model ID": "IP-Adapter Model ID", + "is also a good option, it makes compact math formulae for Deforum keyframes by selecting various waveforms.": "是一個不錯的選擇,它透過選擇不同的波形為 Deforum 關鍵幀生成簡潔的數學公式。", + "is experimental functions and NO PROOF of effectiveness.": "is experimental functions and NO PROOF of effectiveness.", + "Isometric Style": "Isometric Style", + "is SDXL": "is SDXL", + "is set,": "is set,", + "Issue Tracker": "Issue Tracker", + "is the": "is the", + "is the current frame number.": "is the current frame number.", + "is the length of the video, in frames.": "is the length of the video, in frames.", + "iter": "iter", + "It is automatically set to No when adding, and it needs to be changed again": "It is automatically set to No when adding, and it needs to be changed again", + "It takes time, just wait. Check console log for detail": "It takes time, just wait. Check console log for detail", + "It takes time, just wait. Check console log for details": "It takes time, just wait. Check console log for details", + "It will then recognize the body type and not generate it. It also recognizes the keyword wearing, and will not generate an outfit.": "It will then recognize the body type and not generate it. It also recognizes the keyword wearing, and will not generate an outfit.", + "It will translate prompt from your native language into English. So, you can write prompt with your native language.": "It will translate prompt from your native language into English. So, you can write prompt with your native language.", + "I want to preview GroundingDINO detection result and select the boxes I want.": "I want to preview GroundingDINO detection result and select the boxes I want.", + "JavaScript aspect ratio buttons (1:1, 4:3, 16:9, 9:16, 21:9)": "JavaScript aspect ratio buttons (1:1, 4:3, 16:9, 9:16, 21:9)", + "Javascript logs": "Javascript logs", + "JavaScript selection method": "JavaScript selection method", + "Jitter step:": "Jitter step:", + "Jitter the seeds of sub-generations when doing a rolling generation (Still deterministic)": "Jitter the seeds of sub-generations when doing a rolling generation (Still deterministic)", + "Join the": "加入我們", + "- Joints and Limbs": "- Joints and Limbs", + "json path": "json path", + "JSON Validator": "JSON Validator", + "Judge": "Judge", + "Kana Name": "Kana Name", + "Keep": "Keep", + "Keep at 1 for normal behavior.\nSet to different values to compound that many prompts together. My suggestion is to try 2 first.": "Keep at 1 for normal behavior.\nSet to different values to compound that many prompts together. My suggestion is to try 2 first.", + "Keep blank if you don't have mask": "Keep blank if you don't have mask", + "Keep Imgs": "Keep Imgs", + "Keep interpolated images on disk": "Keep interpolated images on disk", + "Keep occlusion edges": "Keep occlusion edges", + "Keep original feet 🡢 keep_feet": "Keep original feet 🡢 keep_feet", + "Keep original hands 🡢 keep_hands": "Keep original hands 🡢 keep_hands", + "keep original image before swapping": "keep original image before swapping", + "Keep original image channels": "Keep original image channels", + "Keep settings persistent upon relaunch of webui": "Keep settings persistent upon relaunch of webui", + "(Keeps the page navigation always visible at the top, Requires UI reload)": "(Keeps the page navigation always visible at the top, Requires UI reload)", + "Keep tag, ..": "Keep tag, ..", + "Keep temp images": "Keep temp images", + "Keep the normalization layers of CLIP frozen during training. Advanced usage, may increase model performance and editability.": "Keep the normalization layers of CLIP frozen during training. Advanced usage, may increase model performance and editability.", + "Keep the Ratio": "Keep the Ratio", + "Kept tag, ..": "Kept tag, ..", + "Kernel schedule": "Kernel schedule", + "kernel size": "kernel size", + "key for Fullscreen mode,": "key for Fullscreen mode,", + "Keyframes": "關鍵格", + "Keyframes: animation settings (animation mode, max frames, border)": "Keyframes: animation settings (animation mode, max frames, border)", + "Keyframes are output to ": "Keyframes are output to ", + "Keyframes: coherence (color coherence & cadence)": "Keyframes: coherence (color coherence & cadence)", + "Keyframes: depth warping": "Keyframes: depth warping", + "Keyframes: generation settings (noise, strength, contrast, scale).": "Keyframes: generation settings (noise, strength, contrast, scale).", + "Keyframes: motion parameters for 2D and 3D (angle, zoom, translation, rotation, perspective flip).": "Keyframes: motion parameters for 2D and 3D (angle, zoom, translation, rotation, perspective flip).", + "key_max_gap: ": "key_max_gap: ", + "key_min_gap: ": "key_min_gap: ", + "keys : ": "keys : ", + "key_th: ": "key_th: ", + "key to Reset zoom": "key to Reset zoom", + "Keywords": "觸發提示詞", + "Kirigami": "Kirigami", + "kmeans": "kmeans", + "kmeans with dithering": "kmeans with dithering", + "❔ Known Issues": "❔ Known Issues", + "Kohya module:": "Kohya module:", + "LAB is a more linear approach to mimic human perception of color space - a good default setting for most users.": "LAB is a more linear approach to mimic human perception of color space - a good default setting for most users.", + "ladder": "ladder", + "landscape": "landscape", + "landscape - A landscape or a landscape with a building.": "landscape - A landscape or a landscape with a building.", + "(larger = more neural network layers affected; minor effect on performance)": "(larger = more neural network layers affected; minor effect on performance)", + "(larger = worse performance)": "(larger = worse performance)", + "Last Image": "Last Image", + "Last message": "Last message", + "Last Name": "Last Name", + "Latitude": "Latitude", + "launcher": "launcher", + "Layer1": "圖層 1", + "Layer1 mask blur": "圖層 1 遮罩模糊", + "Layer1 mask strength": "圖層 1 遮罩強度", + "Layer1 opacity": "圖層 1 透明度", + "Layer2": "圖層 2", + "Layer2 mask blur": "圖層 2 遮罩模糊", + "Layer2 mask strength": "圖層 2 遮罩強度", + "Layer2 opacity": "圖層 2 透明度", + "Layer3": "圖層 3", + "Layer3 mask blur": "圖層 3 遮罩模糊", + "Layer3 mask strength": "圖層 3 遮罩強度", + "Layer3 opacity": "圖層 3 透明度", + "Layer4": "圖層 4", + "Layer4 mask blur": "圖層 4 遮罩模糊", + "Layer4 mask strength": "圖層 4 遮罩強度", + "Layer4 opacity": "圖層 4 透明度", + "Layer5": "圖層 5", + "Layer5 mask blur": "圖層 5 遮罩模糊", + "Layer5 mask strength": "圖層 5 遮罩強度", + "Layer5 opacity": "圖層 5 透明度", + "Layer Divider": "Layer Divider", + "Layers": "圖層", + "Learning rate": "Learning rate", + "Learn More ➜": "Learn More ➜", + "Leave blank to save images to the same path.": "Leave blank to save images to the same path.", + "Leave empty for using generation negative prompt": "Leave empty for using generation negative prompt", + "Leave empty for using generation prompt": "Leave empty for using generation prompt", + "Leave empty to use the --listen address of the ComfyUI server": "Leave empty to use the --listen address of the ComfyUI server", + "Leave empty to use the same name as model and put results into models/Unet-onnx directory": "Leave empty to use the same name as model and put results into models/Unet-onnx directory", + "Leave empty to use the same name as onnx and put results into models/Unet-trt directory": "Leave empty to use the same name as onnx and put results into models/Unet-trt directory", + "Leave the prompt field empty": "Leave the prompt field empty", + "Leave this alone unless you know what you are doing": "Leave this alone unless you know what you are doing", + "Left click the image to add one positive point (black dot). Right click the image to add one negative point (red dot). Left click the point to remove it.": "Left click the image to add one positive point (black dot). Right click the image to add one negative point (red dot). Left click the point to remove it.", + "left-right": "left-right", + "Left/Right Balance": "Left/Right Balance", + "Legacy colormatch": "Legacy colormatch", + "Legacy hash": "舊雜湊值", + "Legend of Zelda": "Legend of Zelda", + "Leg Length": "Leg Length", + "Len": "Len", + "Length": "Length", + "Lens distortion (Fisheye)": "Lens distortion (Fisheye)", + "Lerp": "Lerp", + "lets you only match every N frames": "lets you only match every N frames", + "Let the Dice roll": "Let the Dice roll", + "libraries": "libraries", + "License": "License", + "light": "light", + "LightDiffusionFlow File": "LightDiffusionFlow File", + "lighten": "變亮", + "👍 Like": "👍 Like", + "Liked images": "Liked images", + "like here": "like here", + "👍 Likes": "👍 Likes", + "Limb Width": "Limb Width", + "Limit Jinja prompts: Limit the number of prompts to batch_count * batch_size. The default is to generate batch_count * batch_size * number of prompts generated by Jinja": "Limit Jinja prompts: Limit the number of prompts to batch_count * batch_size. The default is to generate batch_count * batch_size * number of prompts generated by Jinja", + "linear_burn": "線性加深", + "linear_dodge": "線性加亮", + "linear_light": "線性光線", + "Line Art": "Line Art", + "lineart_anime": "lineart_anime", + "lineart_anime_denoise": "lineart_anime_denoise", + "lineart_coarse": "lineart_coarse", + "lineart_realistic": "lineart_realistic", + "lineart_standard": "lineart_standard", + "lineart_standard (form white bg & black line)": "lineart_standard (form white bg & black line)", + "lineart_standard (from white bg & black line)": "lineart_standard (from white bg & black line)", + "line drawing": "line drawing", + "Line Feed": "Line Feed", + "Link to DeepL": "Link to DeepL", + "Link to online results": "Link to online results", + "list extensions": "list extensions", + "List loaded embeddings": "List loaded embeddings", + "list localizations": "list localizations", + "List of active tabs (separated by commas). Available options are txt2img, img2img, txt2img-grids, img2img-grids, Extras, Favorites, Others, All, Maintenance. Custom folders are also supported by specifying their path.": "List of active tabs (separated by commas). Available options are txt2img, img2img, txt2img-grids, img2img-grids, Extras, Favorites, Others, All, Maintenance. Custom folders are also supported by specifying their path.", + "List of active tabs (separated by commas). Available options are txt2img, img2img, txt2img-grids, img2img-grids, Extras, Favorites, Others. Custom folders are also supported by specifying their path.": "List of active tabs (separated by commas). Available options are txt2img, img2img, txt2img-grids, img2img-grids, Extras, Favorites, Others. Custom folders are also supported by specifying their path.", + "list SD models": "list SD models", + "Live Previews": "Live Previews", + "⚙️ Load": "⚙️ Load", + "Load": "載入", + "Load all installed models": "Load all installed models", + "Load All Settings": "Load All Settings", + "Load an image.": "Load an image.", + "Load BG": "Load BG", + "Load caption from filename if no text file exists": "Load caption from filename if no text file exists", + "Load ComfyUI iframes through a reverse proxy (requires reload UI. Needs --api. Default is on if webui is remote)": "Load ComfyUI iframes through a reverse proxy (requires reload UI. Needs --api. Default is on if webui is remote)", + "Load data from file": "Load data from file", + "Loaded checkpoint is a SDXL checkpoint": "Loaded checkpoint is a SDXL checkpoint", + "Load from JSON": "從 JSON 載入", + "Load from subdirectories": "Load from subdirectories", + "load_history": "load_history", + "Loading...": "Loading...", + "Load model": "Load model", + "Load models using stream loading method": "Load models using stream loading method", + "Load Only": "Load Only", + "Load Params": "Load Params", + "Load preset": "Load preset", + "Load Preset": "Load Preset", + "Load results": "載入結果", + "Load Romdom preset": "Load Romdom preset", + "Load & Save": "Load & Save", + "Load Scene": "Load Scene", + "load scripts": "load scripts", + "Load SD checkpoint to VRAM from RAM": "Load SD checkpoint to VRAM from RAM", + "Load settings from:": "Load settings from:", + "Load trigger words from Dreambooth model": "Load trigger words from Dreambooth model", + "load upscalers": "load upscalers", + "Load Video Settings": "Load Video Settings", + "Localization file (Please leave `User interface` - `Localization` as None)": "本地化翻譯(請將使用者介面下的本地化翻譯設為無)", + "Localization (requires restart)": "Localization (requires restart)", + "Location": "Location", + "Lock current settings": "Lock current settings", + "Log": "Log", + "Logo": "Logo", + "Long Exposure": "Long Exposure", + "Longitude": "Longitude", + "Loop": "Loop", + "LoopBack mode": "LoopBack mode", + "Loopback option": "Loopback option", + "Loopback source": "Loopback source", + "Looping recommendations:": "Looping recommendations:", + "Loops:": "Loops:", + "LoRA directory (if not default)": "LoRA directory (if not default)", + "LoRA in negative textencoder": "LoRA in negative textencoder", + "LoRA in negative U-net": "LoRA in negative U-net", + "LoRA Model": "LoRA Model", + "LoRA model name filter": "LoRA 模型名稱過濾器", + "Lora multiplier": "Lora multiplier", + "LoRA prompt helper": "LoRA prompt helper", + "Lo RA Prompt Helper": "Lo RA Prompt Helper", + "LoRA use buggy requires grad": "LoRA use buggy requires grad", + "low contrast": "low contrast", + "Lower Leg": "Lower Leg", + "Lower limit for X": "Lower limit for X", + "Low fps": "Low fps", + "Lowpoly": "Lowpoly", + "Low VRAM (8GB or below)": "Low VRAM (8GB or below)", + "luminism": "luminism", + "LyCORIS directory (if not default)": "LyCORIS directory (if not default)", + "M00": "M00", + "M_A_00": "M_A_00", + "Made by": "製作者:", + "Made by deforum.github.io, port for AUTOMATIC1111's webui maintained by kabachuha": "Made by deforum.github.io, port for AUTOMATIC1111's webui maintained by kabachuha", + "Magenta | Green": "Magenta | Green", + "magical realism": "magical realism", + "Magic Prompt batch size (higher is faster but uses more memory)": "Magic Prompt batch size (higher is faster but uses more memory)", + "mainly allows using more than 200 steps. otherwise, it's a mirror-like param of 'strength schedule'": "mainly allows using more than 200 steps. otherwise, it's a mirror-like param of 'strength schedule'", + "mainly allows using more than 200 steps. Otherwise, it's a mirror-like param of 'strength schedule'": "mainly allows using more than 200 steps. Otherwise, it's a mirror-like param of 'strength schedule'", + "Main menu position": "Main menu position", + "Main Settings": "Main Settings", + "Main Splitting": "Main Splitting", + "Maintenance": "維護", + "Maji Merger": "Maji Merger", + "Make a backup copy of the model being edited when saving its metadata.": "儲存中繼資料時,備份正在編輯的模型", + "Make GIF": "Make GIF", + "make gif in addition to the video/s": "make gif in addition to the video/s", + "make GIF in addition to the video/s": "make GIF in addition to the video/s", + "Make Hires. fix+ run before any other extensions (will reload WebUI)": "Make Hires. fix+ run before any other extensions (will reload WebUI)", + "Make Images": "Make Images", + "Make LoRA (A-B)": "Make LoRA (A-B)", + "Make LoRA (alpha * A - beta * B)": "Make LoRA (alpha * A - beta * B)", + "Make your choices, adjust your settings, set a name, save. To edit a prior choice, select from dropdown and overwrite.": "Make your choices, adjust your settings, set a name, save. To edit a prior choice, select from dropdown and overwrite.", + "Make Zip when Save?": "Make Zip when Save?", + "MALE": "MALE", + "Manage Extensions": "Manage Extensions", + "manga": "manga", + "manifest for your animation (leave blank to ignore).": "manifest for your animation (leave blank to ignore).", + "📘 Manual": "📘 Manual", + "Manual install": "Manual install", + "mask": "mask", + "Mask area Only": "Mask area Only", + "Mask Blur": "Mask Blur", + "Mask Blur Kernel Size": "Mask Blur Kernel Size", + "Mask Blur Kernel Size(GaussianBlur)": "Mask Blur Kernel Size(GaussianBlur)", + "Mask Blur Kernel Size(MedianBlur)": "Mask Blur Kernel Size(MedianBlur)", + "Mask brightness adjust": "Mask brightness adjust", + "Mask by Category": "Mask by Category", + "mask content ratio": "mask content ratio", + "Mask contrast adjust": "Mask contrast adjust", + "Mask contrast schedule is from 0-255. Normal is 1. Affects all masks.": "Mask contrast schedule is from 0-255. Normal is 1. Affects all masks.", + "Mask directory": "Mask directory", + "Masked area width (px)": "Masked area width (px)", + "Mask erosion (-) / dilation (+)": "Mask erosion (-) / dilation (+)", + "Mask erosion (-) / dilation (+) 10th": "Mask erosion (-) / dilation (+) 10th", + "Mask erosion (-) / dilation (+) 2nd": "Mask erosion (-) / dilation (+) 2nd", + "Mask erosion (-) / dilation (+) 3rd": "Mask erosion (-) / dilation (+) 3rd", + "Mask erosion (-) / dilation (+) 4th": "Mask erosion (-) / dilation (+) 4th", + "Mask erosion (-) / dilation (+) 5th": "Mask erosion (-) / dilation (+) 5th", + "Mask erosion (-) / dilation (+) 6th": "Mask erosion (-) / dilation (+) 6th", + "Mask erosion (-) / dilation (+) 7th": "Mask erosion (-) / dilation (+) 7th", + "Mask erosion (-) / dilation (+) 8th": "Mask erosion (-) / dilation (+) 8th", + "Mask erosion (-) / dilation (+) 9th": "Mask erosion (-) / dilation (+) 9th", + "Mask file": "Mask file", + "Mask fill": "Mask fill", + "Mask for ControlNet Inpaint": "Mask for ControlNet Inpaint", + "mask image:": "遮罩圖像:", + "Mask image": "Mask image", + "Masking Method": "Masking Method", + "Masking method 🡢 mask_method": "Masking method 🡢 mask_method", + "Mask Init": "Mask Init", + "Mask max area ratio": "Mask max area ratio", + "Mask max area ratio 10th": "Mask max area ratio 10th", + "Mask max area ratio 2nd": "Mask max area ratio 2nd", + "Mask max area ratio 3rd": "Mask max area ratio 3rd", + "Mask max area ratio 4th": "Mask max area ratio 4th", + "Mask max area ratio 5th": "Mask max area ratio 5th", + "Mask max area ratio 6th": "Mask max area ratio 6th", + "Mask max area ratio 7th": "Mask max area ratio 7th", + "Mask max area ratio 8th": "Mask max area ratio 8th", + "Mask max area ratio 9th": "Mask max area ratio 9th", + "Mask merge mode": "Mask merge mode", + "Mask merge mode 10th": "Mask merge mode 10th", + "Mask merge mode 2nd": "Mask merge mode 2nd", + "Mask merge mode 3rd": "Mask merge mode 3rd", + "Mask merge mode 4th": "Mask merge mode 4th", + "Mask merge mode 5th": "Mask merge mode 5th", + "Mask merge mode 6th": "Mask merge mode 6th", + "Mask merge mode 7th": "Mask merge mode 7th", + "Mask merge mode 8th": "Mask merge mode 8th", + "Mask merge mode 9th": "Mask merge mode 9th", + "Mask min area ratio": "Mask min area ratio", + "Mask min area ratio 10th": "Mask min area ratio 10th", + "Mask min area ratio 2nd": "Mask min area ratio 2nd", + "Mask min area ratio 3rd": "Mask min area ratio 3rd", + "Mask min area ratio 4th": "Mask min area ratio 4th", + "Mask min area ratio 5th": "Mask min area ratio 5th", + "Mask min area ratio 6th": "Mask min area ratio 6th", + "Mask min area ratio 7th": "Mask min area ratio 7th", + "Mask min area ratio 8th": "Mask min area ratio 8th", + "Mask min area ratio 9th": "Mask min area ratio 9th", + "Mask Mode": "Mask Mode", + "Mask Mode(Override img2img Mask mode)": "Mask Mode(Override img2img Mask mode)", + "Mask only": "Mask only", + "Mask only the top k largest (0 to disable)": "Mask only the top k largest (0 to disable)", + "Mask only the top k largest (0 to disable) 10th": "Mask only the top k largest (0 to disable) 10th", + "Mask only the top k largest (0 to disable) 2nd": "Mask only the top k largest (0 to disable) 2nd", + "Mask only the top k largest (0 to disable) 3rd": "Mask only the top k largest (0 to disable) 3rd", + "Mask only the top k largest (0 to disable) 4th": "Mask only the top k largest (0 to disable) 4th", + "Mask only the top k largest (0 to disable) 5th": "Mask only the top k largest (0 to disable) 5th", + "Mask only the top k largest (0 to disable) 6th": "Mask only the top k largest (0 to disable) 6th", + "Mask only the top k largest (0 to disable) 7th": "Mask only the top k largest (0 to disable) 7th", + "Mask only the top k largest (0 to disable) 8th": "Mask only the top k largest (0 to disable) 8th", + "Mask only the top k largest (0 to disable) 9th": "Mask only the top k largest (0 to disable) 9th", + "Mask option": "Mask option", + "Mask overlay blur": "Mask overlay blur", + "Mask precision 🡢 mask_precision": "Mask precision 🡢 mask_precision", + "Mask Preprocessing": "Mask Preprocessing", + "Mask schedule": "Mask schedule", + "Mask Setting": "Mask Setting", + "🎭 Mask Settings": "🎭 Mask Settings", + "masks from files: in [], like [mask1.png]": "masks from files: in [], like [mask1.png]", + "Mask size": "Mask size", + "Mask source": "Mask source", + "Mask Target (e.g., girl, cats)": "Mask Target (e.g., girl, cats)", + "Mask Threshold": "Mask Threshold", + "Mask x(→) offset": "Mask x(→) offset", + "Mask x(→) offset 10th": "Mask x(→) offset 10th", + "Mask x(→) offset 2nd": "Mask x(→) offset 2nd", + "Mask x(→) offset 3rd": "Mask x(→) offset 3rd", + "Mask x(→) offset 4th": "Mask x(→) offset 4th", + "Mask x(→) offset 5th": "Mask x(→) offset 5th", + "Mask x(→) offset 6th": "Mask x(→) offset 6th", + "Mask x(→) offset 7th": "Mask x(→) offset 7th", + "Mask x(→) offset 8th": "Mask x(→) offset 8th", + "Mask x(→) offset 9th": "Mask x(→) offset 9th", + "Mask y(↑) offset": "Mask y(↑) offset", + "Mask y(↑) offset 10th": "Mask y(↑) offset 10th", + "Mask y(↑) offset 2nd": "Mask y(↑) offset 2nd", + "Mask y(↑) offset 3rd": "Mask y(↑) offset 3rd", + "Mask y(↑) offset 4th": "Mask y(↑) offset 4th", + "Mask y(↑) offset 5th": "Mask y(↑) offset 5th", + "Mask y(↑) offset 6th": "Mask y(↑) offset 6th", + "Mask y(↑) offset 7th": "Mask y(↑) offset 7th", + "Mask y(↑) offset 8th": "Mask y(↑) offset 8th", + "Mask y(↑) offset 9th": "Mask y(↑) offset 9th", + "Match input size": "Match input size", + "Match net size to input size": "Match net size to input size", + "Matrix": "Matrix", + "Matrix mode guide": "Matrix mode guide", + "Max": "Max", + "Max additional denoise": "Max additional denoise", + "Max Batch Count": "最大批次數量", + "Max batch-size": "Max batch-size", + "Max Crop Size": "Max Crop Size", + "Max faces units (requires restart)": "Max faces units (requires restart)", + "Max. feedback images": "Max. feedback images", + "max frames": "max frames", + "max_frames": "max_frames", + "Max frames": "最大幀數量", + "Max height": "Max height", + "Max Image Size": "最大圖像尺寸", + "Maximum aesthetic_score": "Maximum aesthetic_score", + "Maximum batch size": "Maximum batch size", + "Maximum dimension default": "Maximum dimension default", + "Maximum execution time (seconds)": "Maximum execution time (seconds)", + "Maximum height": "Maximum height", + "Maximum Image Height:": "Maximum Image Height:", + "Maximum image size, in megapixels": "Maximum image size, in megapixels", + "Maximum Image Width:": "Maximum Image Width:", + "Maximum Image Width/Height:": "Maximum Image Width/Height:", + "Maximum keyframe gap": "Maximum keyframe gap", + "Maximum Merge models": "Maximum Merge models", + "Maximum number of faces to detect": "Maximum number of faces to detect", + "Maximum number of tags to be shown in the UI": "Maximum number of tags to be shown in the UI", + "Maximum number of tokens after merging (lower improves VRAM usage)": "Maximum number of tokens after merging (lower improves VRAM usage)", + "Maximum prompt token count": "Maximum prompt token count", + "Maximum ranking": "Maximum ranking", + "Maximum score": "Maximum score", + "Maximum token length to respect. You probably want to leave this at 75.": "Maximum token length to respect. You probably want to leave this at 75.", + "Maximum width": "Maximum width", + "Max key frames": "Max key frames", + "Max Length": "Max Length", + "Max models": "Max models", + "Max number of dataset folders to show": "最多顯示幾個數據集資料夾", + "Max number of top tags to show": "最多顯示幾個常用標記", + "Max Pixels": "Max Pixels", + "Max prompt token count": "Max prompt token count", + "Max resolution of temporary files": "Max resolution of temporary files", + "Max. weight": "Max. weight", + "Max width": "Max width", + "M_B_00": "M_B_00", + "MBW": "MBW", + "MBW Each": "MBW Each", + "mbw weights": "mbw weights", + "mediapipe_face": "mediapipe_face", + "medium": "medium", + "melanin": "melanin", + "Memo": "Memo", + "Memory": "Memory", + "Memory optimization": "Memory optimization", + "Memory usage": "Memory usage", + "Merge!": "Merge!", + "merge and apply": "merge and apply", + "Merge and gen": "Merge and gen", + "Merge And Gen": "Merge And Gen", + "Merge and Invert": "Merge and Invert", + "Merge and save": "Merge and save", + "Merge Block Weights": "Merge Block Weights", + "Merge&Gen": "Merge&Gen", + "Merge LoRAs": "Merge LoRAs", + "Merge mode": "Merge mode", + "Merge Mode": "Merge Mode", + "Merge Model B": "Merge Model B", + "Merge models and load it for generation": "Merge models and load it for generation", + "Merge multiple models and load it for image generation.": "Merge multiple models and load it for image generation.", + "Merge to Checkpoint": "Merge to Checkpoint", + "Message": "Message", + "messy": "messy", + "Metadata to show in XY-Grid label for Model axes, comma-separated (example: \"ss_learning_rate, ss_num_epochs\")": "顯示於 X/Y 圖表的中繼資料,以逗號分隔(例如:\"ss_learning_rate, ss_num_epochs\")", + "Metadeta": "Metadeta", + "MiDaS weight (vid2depth)": "MiDaS weight (vid2depth)", + "Middle Name": "Middle Name", + "Min": "Min", + "Min batch-size": "Min batch-size", + "Minecraft": "Minecraft", + "Min height": "Min height", + "(minimal change in the generated picture)": "(minimal change in the generated picture)", + "Minimalist": "Minimalist", + "minimum aesthetic_score": "最小美學分數", + "Minimum aesthetic_score": "Minimum aesthetic_score", + "Minimum batch size": "Minimum batch size", + "Minimum dimension default": "Minimum dimension default", + "Minimum feedback strength at every diffusion step.": "Minimum feedback strength at every diffusion step.", + "Minimum height": "Minimum height", + "Minimum keyframe gap": "Minimum keyframe gap", + "Minimum lighting ratio": "Minimum lighting ratio", + "Minimum number of pages per load": "每次載入的最小頁數", + "Minimum prompt token count": "Minimum prompt token count", + "Minimum ranking": "Minimum ranking", + "Minimum score": "Minimum score", + "Minimum steps": "Minimum steps", + "Minimum width": "Minimum width", + "min_mask_region_area": "min_mask_region_area", + "Min-max": "Min-max", + "Min prompt token count": "Min prompt token count", + "Min reference similarity": "Min reference similarity", + "Min similarity": "Min similarity", + "Min. strength": "Min. strength", + "Min tag fraction in batch and interrogations": "Min tag fraction in batch and interrogations", + "Min. weight": "Min. weight", + "Min width": "Min width", + "Mirror webcam": "Mirror webcam", + "missing metadata": "缺少中繼資料", + "Mixed": "Mixed", + "Mixed precision (If your graphics card supports bf16 better)": "Mixed precision (If your graphics card supports bf16 better)", + "MLDanbooru Tagger": "MLDanbooru Tagger", + "mlsd": "mlsd", + "MLSD Value Threshold": "MLSD Value Threshold", + "Mobile Phone": "Mobile Phone", + "mode": "mode", + "Mode": "Mode", + "model": "模型", + "model.": "model.", + "Model:": "Model:", + "Model 1": "模型 1️⃣", + "Model 2": "模型 2️⃣", + "Model 3": "模型 3️⃣", + "Model 4": "模型 4️⃣", + "Model 5": "模型 5️⃣", + "model A": "model A", + "model_A": "model_A", + "Model A": "Model A", + "Model_A": "Model_A", + "Model autoload on server start": "Model autoload on server start", + "model B": "model B", + "model_B": "model_B", + "Model B": "Model B", + "Model_B": "Model_B", + "model C": "model C", + "model_C": "model_C", + "Model C": "Model C", + "Model compile backend (experimental)": "Model compile backend (experimental)", + "Model compile mode (experimental)": "Model compile mode (experimental)", + "Model configuration": "Model configuration", + "Model D": "Model D", + "Model description/readme/notes/instructions": "模型的描述資訊", + "Model filename:": "Model filename:", + "Model Filename:": "Model Filename:", + "Model format": "Model format", + "Model Format": "Model Format", + "model hash": "模型雜湊值", + "Model hash": "模型雜湊值", + "ModelID": "ModelID", + "Model is recompiled when resolution, batchsize, device, or samplers like DPM++ or Karras are changed.": "Model is recompiled when resolution, batchsize, device, or samplers like DPM++ or Karras are changed.", + "model list": "model list", + "Model list": "Model list", + "model_name": "model_name", + "Model name": "Model name", + "Model Name": "Model Name", + "(Model names [with file extension] or their hashes, separated by commas)": "(Model names [with file extension] or their hashes, separated by commas)", + "Model not found, please download one and reload automatic 1111": "Model not found, please download one and reload automatic 1111", + "model of the upscaler to use. 'realesr-animevideov3' is much faster but yields smoother, less detailed results. the other models only do x4": "model of the upscaler to use. 'realesr-animevideov3' is much faster but yields smoother, less detailed results. the other models only do x4", + "Model params": "Model params", + "model path": "model path", + "Model path": "模型路徑", + "Model path filter": "模型路徑過濾器", + "Model precision": "Model precision", + "Model pruning methods": "Model pruning methods", + "Models": "Models", + "Models...": "Models...", + "Model scheduler to use. Only applies to models before 2.0.": "Model scheduler to use. Only applies to models before 2.0.", + "Models directory (if not default)": "Models directory (if not default)", + "Model Selection": "Model Selection", + "(models in subdirectories like photo/sd15.ckpt will be listed as just sd15.ckpt)": "(models in subdirectories like photo/sd15.ckpt will be listed as just sd15.ckpt)", + "Model sort order": "Model sort order", + "Model Toolkit": "Model Toolkit", + "model to use": "model to use", + "Model type": "Model type", + "Model Type": "Model Type", + "Model Types": "Model Types", + "Model URL or Model ID": "Model URL or Model ID", + "Model variants": "Model variants", + "Model Version": "Model Version", + "Model versions": "Model versions", + "Modified:": "Modified:", + "Module Color": "Module Color", + "monochromatic": "monochromatic", + "Monochrome": "Monochrome", + "More info about Anti Burn": "More info about Anti Burn", + "More Information": "More Information", + "Most Downloaded": "Most Downloaded", + "Most Liked": "Most Liked", + "Motion": "Motion", + "Motion module": "Motion module", + "Motion preview mode (dry run).": "Motion preview mode (dry run).", + "Motion use prev img": "Motion use prev img", + "Mouse over image: Press": "Mouse over image: Press", + "Mov2Mov output path for image": "Mov2Mov output path for image", + "Mov2Mov output path for vedio": "Mov2Mov output path for vedio", + "Mov2Mov output path for video": "Mov2Mov output path for video", + "Move": "移動", + "Move buttons copy instead of move": "將移動按鈕以複製取代", + "move canvas left/right in pixels per frame": "move canvas left/right in pixels per frame", + "move canvas towards/away from view [speed set by FOV]": "move canvas towards/away from view [speed set by FOV]", + "move canvas up/down in pixels per frame": "move canvas up/down in pixels per frame", + "Move ControlNet tensor to CPU (if applicable)": "Move ControlNet tensor to CPU (if applicable)", + "Move/Copy/Delete matching .txt files": "Move/Copy/Delete matching .txt files", + "Move File(s)": "Move File(s)", + "Move Mode (X key)": "Move Mode (X key)", + "Move motion module to CPU (default if lowvram)": "Move motion module to CPU (default if lowvram)", + "Move or Delete": "Move or Delete", + "Move or Delete Files": "Move or Delete Files", + "Move quicksettings to image setting panel": "Move quicksettings to image setting panel", + "Move to archive": "Move to archive", + "Move to directory": "移動到目錄", + "Move to favorites": "移動到收藏夾", + "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.", + "Move VAE and CLIP to RAM when training if possible": "Move VAE and CLIP to RAM when training if possible", + "Move VAE to GPU (if possible)": "Move VAE to GPU (if possible)", + "Move visible tags to exclude tags": "Move visible tags to exclude tags", + "Move visible tags to keep tags": "Move visible tags to keep tags", + "Mul": "Mul", + "Multi ControlNet: Max models amount (requires restart)": "Multi ControlNet: Max models amount (requires restart)", + "Multi-frame rendering": "Multi-frame rendering", + "Multi Merge": "Multi Merge", + "Multi Model Merge": "Multi Model Merge", + "Multiplication (2^N)": "倍率 (2^N)", + "Multiplication (x0.5, x1.5)": "倍率 (x0.5, x1.5)", + "multiplier": "multiplier", + "Multiplier": "Multiplier", + "multiply": "色彩增值", + "Multi Proc Cmd": "Multi Proc Cmd", + "Multi string search: part1, part2.. (Enter key to update)": "Multi string search: part1, part2.. (Enter key to update)", + "must be enabled": "must be enabled", + "naive": "naive", + "naive_interpolating": "naive_interpolating", + "name": "名稱", + "Name 0": "Name 0", + "Name 1": "Name 1", + "Name 10": "Name 10", + "Name 11": "Name 11", + "Name 12": "Name 12", + "Name 13": "Name 13", + "Name 14": "Name 14", + "Name 15": "Name 15", + "Name 2": "Name 2", + "Name 3": "Name 3", + "Name 4": "Name 4", + "Name 5": "Name 5", + "Name 6": "Name 6", + "Name 7": "Name 7", + "Name 8": "Name 8", + "Name 9": "Name 9", + "Name of saved checkpoint to load weights from": "Name of saved checkpoint to load weights from", + "Name of the character": "Name of the character", + "nature": "nature", + "Nautical": "Nautical", + "Near clip": "Near clip", + "Near schedule": "Near schedule", + "Neck": "Neck", + "need help? please visit our": "need help? please visit our", + "need input your want to translate": "need input your want to translate", + "Negative Filter": "Negative Filter", + "➖ Negative Mask": "➖ Negative Mask", + "Negative Prompt": "Negative Prompt", + "Negative prompt for upscale (replaces generation prompt)": "Negative prompt for upscale (replaces generation prompt)", + "Negative prompt mode": "Negative prompt mode", + "Negative prompt: please enter Y if this prompt is a negative prompt.": "Negative prompt: please enter Y if this prompt is a negative prompt.", + "Negative Prompts": "Negative Prompts", + "negative prompt to be appended to *all* prompts. DON'T use --neg here!": "negative prompt to be appended to *all* prompts. DON'T use --neg here!", + "Negative Prompt, will also be appended": "Negative Prompt, will also be appended", + "Negative weight": "Negative weight", + "neg_prompt": "neg_prompt", + "Neon Noir": "Neon Noir", + "Neon Punk": "Neon Punk", + "Net dim (128 ~ 144MB)": "Net dim (128 ~ 144MB)", + "Net height": "Net height", + "Net width": "Net width", + "Network module": "附加網路類型", + "Network module 1": "附加網路類型 1️⃣", + "Network module 2": "附加網路類型 2️⃣", + "Network module 3": "附加網路類型 3️⃣", + "Network module 4": "附加網路類型 4️⃣", + "Network module 5": "附加網路類型 5️⃣", + "Nevysha Cozy Nest Settings": "Nevysha Cozy Nest Settings", + "Nevysha Cozy Nest Update Info": "Nevysha Cozy Nest Update Info", + "new commits": "new commits", + "Newest": "Newest", + "New model name": "New model name", + "New Preset": "New Preset", + "New preset name ": "New preset name ", + "New row": "New row", + "New Scribble Drawing Height": "New Scribble Drawing Height", + "New Scribble Drawing Width": "New Scribble Drawing Width", + "New text file will be created if you are using filename as captions.": "New text file will be created if you are using filename as captions.", + "Next batch": "Next batch", + "Next page": "Next page", + "Next Page": "下一頁", + "Nickname": "Nickname", + "No": "No", + "no-ema": "no-ema", + "no find:": "no find:", + "No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking.": "No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking.", + "Noise": "Noise", + "Noise mask schedule": "Noise mask schedule", + "noise multiplier; applies to Euler a and other samplers that have the letter 'a' in them": "noise multiplier; applies to Euler a and other samplers that have the letter 'a' in them", + "noise multiplier; applies to Euler A and other samplers that have the letter 'a' in them": "noise multiplier; applies to Euler A and other samplers that have the letter 'a' in them", + "Noise multiplier for img2img 10th": "Noise multiplier for img2img 10th", + "Noise multiplier for img2img 2nd": "Noise multiplier for img2img 2nd", + "Noise multiplier for img2img 3rd": "Noise multiplier for img2img 3rd", + "Noise multiplier for img2img 4th": "Noise multiplier for img2img 4th", + "Noise multiplier for img2img 5th": "Noise multiplier for img2img 5th", + "Noise multiplier for img2img 6th": "Noise multiplier for img2img 6th", + "Noise multiplier for img2img 7th": "Noise multiplier for img2img 7th", + "Noise multiplier for img2img 8th": "Noise multiplier for img2img 8th", + "Noise multiplier for img2img 9th": "Noise multiplier for img2img 9th", + "Noise multiplier schedule": "Noise multiplier schedule", + "Noise schedule": "Noise schedule", + "Noise Tolerance": "Noise Tolerance", + "Noise type": "Noise type", + "No model has new version": "No model has new version", + "None (just grayscale)": "None (just grayscale)", + "None - prompt only": "None - prompt only", + "normal": "普通", + "Normal Background Threshold": "Normal Background Threshold", + "normal_bae": "normal_bae", + "Normalizes each image separately by mean and standard deviation in your dataset. Useful to preserve likeness to your images.": "Normalizes each image separately by mean and standard deviation in your dataset. Useful to preserve likeness to your images.", + "Normally, changing the resolution will completely change an image, even when using the same seed. If you generated an image with a particular seed and then changed the resolution, put the original resolution here to get an image that more closely resemles the original": "Normally, changing the resolution will completely change an image, even when using the same seed. If you generated an image with a particular seed and then changed the resolution, put the original resolution here to get an image that more closely resemles the original", + "Normally, it creates a single random prompt. With prompt compounder, it will generate multiple prompts and compound them together.": "Normally, it creates a single random prompt. With prompt compounder, it will generate multiple prompts and compound them together.", + "normal_midas": "normal_midas", + "Normal with batch": "Normal with batch", + "Nose To Neck": "Nose To Neck", + "not": "not", + "note": "note", + "Note": "Note", + "Note: Moved or deleted images will be unloaded.": "Note: Moved or deleted images will be unloaded.", + "Note that parseq overrides:": "Note that parseq overrides:", + "(not for Video Input mode)": "(not for Video Input mode)", + "(notification.mp3 should be present in the root directory)": "(notification.mp3 should be present in the root directory)", + "Notification sound volume": "Notification sound volume", + "(No Trigger Word)": "(No Trigger Word)", + "not save grid": "not save grid", + "Now use it like this:": "Now use it like this:", + "NSFW content": "NSFW content", + "NSFW score threshold. Any image part with a score above this value will be treated as NSFW (use extension responsibly !). 1=Disable filtering": "NSFW score threshold. Any image part with a score above this value will be treated as NSFW (use extension responsibly !). 1=Disable filtering", + "Nucleus Sampling": "Nucleus Sampling", + "nudity": "nudity", + "NUl Image Enhancer": "NUl Image Enhancer", + "Number of cached models": "Number of cached models", + "Number of cached VAEs": "Number of cached VAEs", + "Number of columns for added settings": "Number of columns for added settings", + "Number of columns on the page": "每頁列數", + "Number of connections to use for downloading a model": "Number of connections to use for downloading a model", + "Number of epochs": "Number of epochs", + "Number of epochs after which training will be paused for the specified time. Useful if you want to give your GPU a rest.": "Number of epochs after which training will be paused for the specified time. Useful if you want to give your GPU a rest.", + "Number of frames": "Number of frames", + "Number of iterations": "Number of iterations", + "Number of parallel calculations": "Number of parallel calculations", + "Number of rows on the page": "每頁行數", + "Number of samples to generate": "Number of samples to generate", + "number of seed": "number of seed", + "Number of steps for the warmup in the lr scheduler. LR will start at 0 and increase to this value over the specified number of steps.": "Number of steps for the warmup in the lr scheduler. LR will start at 0 and increase to this value over the specified number of steps.", + "number of the frames that we will blend between current imagined image and input frame image": "number of the frames that we will blend between current imagined image and input frame image", + "Number of updates steps to accumulate before performing a backward/update pass.": "Number of updates steps to accumulate before performing a backward/update pass.", + "(O10) Output ckpt Name": "(O10) Output ckpt Name", + "(O1) Output ckpt Name": "(O1) Output ckpt Name", + "(O2) Output ckpt Name": "(O2) Output ckpt Name", + "(O3) Output ckpt Name": "(O3) Output ckpt Name", + "(O4) Output ckpt Name": "(O4) Output ckpt Name", + "(O5) Output ckpt Name": "(O5) Output ckpt Name", + "(O6) Output ckpt Name": "(O6) Output ckpt Name", + "(O7) Output ckpt Name": "(O7) Output ckpt Name", + "(O8) Output ckpt Name": "(O8) Output ckpt Name", + "(O9) Output ckpt Name": "(O9) Output ckpt Name", + "object": "object", + "object - Can be a random object, a building or a vehicle.": "object - Can be a random object, a building or a vehicle.", + "octane render": "octane render", + "official Deforum Discord": "官方 Deforum Discord", + "Official Deforum Wiki:": "官方 Deforum Wiki:", + "# of in-between frames that will not be directly diffused": "# of in-between frames that will not be directly diffused", + "# of threads to use for hash calculation (increase if using an SSD)": "用於雜湊值計算的線程數(如果使用 SSD 可適量增加)", + "On": "On", + "Once turned on, it will retry for n amount of times to get an image with the quality score. If not, it will take the best image so far and continue.": "Once turned on, it will retry for n amount of times to get an image with the quality score. If not, it will take the best image so far and continue.", + "Once you select a model it will take some time to load.": "Once you select a model it will take some time to load.", + "On, concurrent (don't pause generation)": "On, concurrent (don't pause generation)", + "One Button Run and Upscale": "One Button Run and Upscale", + "Online resources:": "Online resources:", + "Only": "只有", + "only 2D motion parameters will be used, but this mode uses the least amount of VRAM. You can optionally enable flip_2d_perspective to enable some psuedo-3d animation parameters while in 2D mode.": "only 2D motion parameters will be used, but this mode uses the least amount of VRAM. You can optionally enable flip_2d_perspective to enable some psuedo-3d animation parameters while in 2D mode.", + "only affects the 'Blend'": "only affects the 'Blend'", + "(Only applies to Aria2)": "(Only applies to Aria2)", + "Only copy to models with no metadata": "僅複製到沒有中繼資料的模型(不覆蓋原中繼資料)", + "Only copy to models with same session ID": "僅複製到具有相同作業階段 ID 的模型", + "Only Hand": "Only Hand", + "ONLY in use when working with a P2P ckpt!": "ONLY in use when working with a P2P ckpt!", + "*Only in use with pix2pix checkpoints!*": "*Only in use with pix2pix checkpoints!*", + "Only Mouth": "Only Mouth", + "only other types --> Will pick only from the more unique types, such as stained glass window or a funko pop": "only other types --> Will pick only from the more unique types, such as stained glass window or a funko pop", + "(Only relevant if the above option is enabled)": "(Only relevant if the above option is enabled)", + "Only Selected Tags": "Only Selected Tags", + "Only send requests to these workers": "Only send requests to these workers", + "Only send requests to trusted workers": "Only send requests to trusted workers", + "Only Show Models have no Info": "Only Show Models have no Info", + "Only show models that have/don't have user-added metadata": "僅顯示(有 / 無)使用者中繼資料", + "Only show .safetensors format models": "僅顯示 .safetensors 檔案格式的模型", + "Only store preferences locally": "Only store preferences locally", + "Only track the mouth": "Only track the mouth", + "Only upscale will not use txt2img to generate an image.": "Only upscale will not use txt2img to generate an image.", + "Only use mid-control when inference": "Only use mid-control when inference", + "Only user list": "Only user list", + "Onnx model filename": "Onnx model filename", + "ONNX opset version": "ONNX opset version", + "(O)Output Model Name": "(O)Output Model Name", + "Opacity": "Opacity", + "Open...": "Open...", + "Open intermediate results": "Open intermediate results", + "Open model Url on the user's client side, rather than server side": "Open model Url on the user's client side, rather than server side", + "Open model Url on the user's client side, rather than server side. If you are running WebUI locally, disabling this may open URLs in your default internet browser if it is different than the one you are running WebUI in": "Open model Url on the user's client side, rather than server side. If you are running WebUI locally, disabling this may open URLs in your default internet browser if it is different than the one you are running WebUI in", + "Open model Url on the user's client side, rather than server side.If you are running WebUI locally, disabling this may open URLs in yourdefault internet browser if it is different than the one you are runningWebUI in": "Open model Url on the user's client side, rather than server side.If you are running WebUI locally, disabling this may open URLs in yourdefault internet browser if it is different than the one you are runningWebUI in", + "Open new canvas": "Open new canvas", + "Open New Scribble Drawing Canvas": "Open New Scribble Drawing Canvas", + "Open output directory": "Open output directory", + "openpose": "openpose", + "openpose_face": "openpose_face", + "openpose_faceonly": "openpose_faceonly", + "openpose_full": "openpose_full", + "openpose_hand": "openpose_hand", + "📁\nOpen PSD folder": "📁\nOpen PSD folder", + "Open results": "Open results", + "📂 Open templates folder": "📂 Open templates folder", + "Open TextEditor": "Open TextEditor", + "Open the generated .ebs under project directory and press [Run All] button.": "Open the generated .ebs under project directory and press [Run All] button.", + "Open this model's civitai url": "Open this model's civitai url", + "Open Url At Client Side": "Open Url At Client Side", + "open_url_button": "open_url_button", + "Optical Flow": "Optical Flow", + "Optical flow cadence": "Optical flow cadence", + "Optical flow generation": "Optical flow generation", + "Optimal batch-size": "Optimal batch-size", + "Optimal height": "Optimal height", + "Optimal prompt token count": "Optimal prompt token count", + "Optimal width": "Optimal width", + "Optimize attention layers with sdp (torch >= 2.0.0 required)": "Optimize attention layers with sdp (torch >= 2.0.0 required)", + "Optimize attention layers with xformers": "Optimize attention layers with xformers", + "Optimize GIFs with gifsicle, reduces file size": "Optimize GIFs with gifsicle, reduces file size", + "Optimizer algorithm.\nRecommended settings (LR = Learning Rate, WD = Weight Decay):\nTorch / 8Bit AdamW - LR: 2e-6, WD: 0.01\nLion - LR: 5e-7, WD: 0.02\nAdamW Adapt - LR: 0.05, WD: 0\nLion Adapt - LR: ??, WD:0\nSGD Adapt - LR: 1, WD: 0\nAdan Adapt - LR: 0.2, WD: 0.01": "Optimizer algorithm.\nRecommended settings (LR = Learning Rate, WD = Weight Decay):\nTorch / 8Bit AdamW - LR: 2e-6, WD: 0.01\nLion - LR: 5e-7, WD: 0.02\nAdamW Adapt - LR: 0.05, WD: 0\nLion Adapt - LR: ??, WD:0\nSGD Adapt - LR: 1, WD: 0\nAdan Adapt - LR: 0.2, WD: 0.01", + "Optimizer type": "Optimizer type", + "optimizes the self-attention layer within U-Net and VAE models,\n resulting in a reduction in computation time ranging from 1 to 4 times. The larger the generated image is, the greater the\n benefit.": "optimizes the self-attention layer within U-Net and VAE models,\n resulting in a reduction in computation time ranging from 1 to 4 times. The larger the generated image is, the greater the\n benefit.", + "(optional) separated by commas. EX: Character Name/Style Attributes": "(optional) separated by commas. EX: Character Name/Style Attributes", + "Options": "Options", + "Options are all options hardcoded, and additional you added in additional_components.py": "Options are all options hardcoded, and additional you added in additional_components.py", + "Options in main UI": "Options in main UI", + "opts onchange": "opts onchange", + "- or -": "- or -", + "OR": "OR", + "(Order for extra network models and wildcards in dropdown)": "(Order for extra network models and wildcards in dropdown)", + "Organization": "Organization", + "Origami": "Origami", + ": Original extension": ": Original extension", + ": Original extension\nAvailable algorithms:": ": Original extension\nAvailable algorithms:", + ": Original filename without extension": ": Original filename without extension", + "Original filename without extension": "Original filename without extension", + "Original file’s hash (good for deleting duplication)": "Original file’s hash (good for deleting duplication)", + "Original First": "原文優先", + "Original FPS": "Original FPS", + "Original Image": "Original Image", + "OriginalImg": "OriginalImg", + "Original model": "Original model", + "Original Movie Path": "Original Movie Path", + "Original Text = \"A, A, B, C\" Common Tags = \"B, A\" Edit Tags = \"X, Y\"": "Original Text = \"A, A, B, C\" Common Tags = \"B, A\" Edit Tags = \"X, Y\"", + "Original Text = \"A, B, C\" Common Tags = \"(nothing)\" Edit Tags = \"X, Y\"": "Original Text = \"A, B, C\" Common Tags = \"(nothing)\" Edit Tags = \"X, Y\"", + "Original Text = \"A, B, C, D, E\" Common Tags = \"A, B, D\" Edit Tags = \", X, \"": "Original Text = \"A, B, C, D, E\" Common Tags = \"A, B, D\" Edit Tags = \", X, \"", + "Original total duration": "Original total duration", + "Original total frames": "Original total frames", + "original video frame": "original video frame", + "Original Weights": "Original Weights", + "or, manually enhance the image": "or, manually enhance the image", + "or to CTRL+SHIFT": "或是 Ctrl + Shift", + "Other": "其他", + "Other:": "Other:", + "other imports": "other imports", + "Other Info": "Other Info", + "Other prompt fields": "Other prompt fields", + "others": "others", + "- Others": "- Others", + "Others": "其他", + "Other Setting": "Other Setting", + "Otherwise, read the": "Otherwise, read the", + "OUT": "OUT", + "OUT00": "OUT00", + "OUT01": "OUT01", + "OUT02": "OUT02", + "OUT03": "OUT03", + "OUT04": "OUT04", + "OUT05": "OUT05", + "OUT06": "OUT06", + "OUT07": "OUT07", + "OUT08": "OUT08", + "OUT09": "OUT09", + "OUT10": "OUT10", + "OUT11": "OUT11", + "OUT_A_00": "OUT_A_00", + "OUT_A_01": "OUT_A_01", + "OUT_A_02": "OUT_A_02", + "OUT_A_03": "OUT_A_03", + "OUT_A_04": "OUT_A_04", + "OUT_A_05": "OUT_A_05", + "OUT_A_06": "OUT_A_06", + "OUT_A_07": "OUT_A_07", + "OUT_A_08": "OUT_A_08", + "OUT_A_09": "OUT_A_09", + "OUT_A_10": "OUT_A_10", + "OUT_A_11": "OUT_A_11", + "OUT_B_00": "OUT_B_00", + "OUT_B_01": "OUT_B_01", + "OUT_B_02": "OUT_B_02", + "OUT_B_03": "OUT_B_03", + "OUT_B_04": "OUT_B_04", + "OUT_B_05": "OUT_B_05", + "OUT_B_06": "OUT_B_06", + "OUT_B_07": "OUT_B_07", + "OUT_B_08": "OUT_B_08", + "OUT_B_09": "OUT_B_09", + "OUT_B_10": "OUT_B_10", + "OUT_B_11": "OUT_B_11", + "Outer Fit (Shrink to Fit)": "Outer Fit (Shrink to Fit)", + "outline inflating": "outline inflating", + "output": "output", + "Output animation path": "Output animation path", + "Output DepthMap": "Output DepthMap", + "Output Directory": "輸出目錄", + "Output directory for grids": "Output directory for grids", + ": Output extension (has no dot)": ": Output extension (has no dot)", + "Output filename": "Output filename", + "Output filename format": "Output filename format", + "Output filename formats": "Output filename formats", + "Output format": "Output format", + "output height resolution": "output height resolution", + "output images will be placed in a folder with this name ({timestring} token will be replaced) inside the img2img output folder. Supports params placeholders. e.g {seed}, {w}, {h}, {prompts}": "output images will be placed in a folder with this name ({timestring} token will be replaced) inside the img2img output folder. Supports params placeholders. e.g {seed}, {w}, {h}, {prompts}", + "Output images will be shown here": "Output images will be shown here", + "Output Image Width": "Output Image Width", + "Output Mask directory": "Output Mask directory", + "Output Mode": "Output Mode", + "Output Model Name": "Output Model Name", + "Output name": "Output name", + "Output path": "Output path", + "Output Paths": "Output Paths", + "Output per image:": "Output per image:", + "output resolution": "output resolution", + "outputs": "outputs", + "Output settings: all settings (including fps and max frames)": "Output settings: all settings (including fps and max frames)", + "Output style": "Output style", + "Output type": "Output type", + "output video resolution": "output video resolution", + "Out Res": "Out Res", + "overlay": "覆蓋", + "Overlay mask": "Overlay mask", + "override:": "override:", + "Override": "Override", + "Override options (choose the related subject type first for better results)": "Override options (choose the related subject type first for better results)", + "Override the sampling selection from the main UI (Recommended as only below sampling methods have been validated for OpenVINO)": "Override the sampling selection from the main UI (Recommended as only below sampling methods have been validated for OpenVINO)", + "overwrite": "overwrite", + "Overwrite": "Overwrite", + "Overwrite Checkpoint if exist (else will add number)": "Overwrite Checkpoint if exist (else will add number)", + "Overwrite extracted frames": "Overwrite extracted frames", + "Overwrite input frames": "Overwrite input frames", + "Overwrite priority": "Overwrite priority", + "Overwrite subject": "Overwrite subject", + "Overwrite subject:": "Overwrite subject:", + "Overwrite type of image:": "Overwrite type of image:", + "Padding mode": "Padding mode", + "Padding Mode": "Padding Mode", + "Padding options": "Padding options", + "Padding token (ID or single token)": "Padding token (ID or single token)", + "Pad the input images token length to this amount. You probably want to do this.": "Pad the input images token length to this amount. You probably want to do this.", + "Page": "Page", + "Page Index": "頁數", + "Page navigation as header": "Page navigation as header", + "painting": "painting", + "pan canvas left/right in degrees per frame": "pan canvas left/right in degrees per frame", + "Panorama to 3D mesh": "Panorama to 3D mesh", + "Papercut Collage": "Papercut Collage", + "Papercut Shadow Box": "Papercut Shadow Box", + "Paper Mache": "Paper Mache", + "Paper Quilling": "Paper Quilling", + "Parameter": "Parameter", + "Params": "Params", + "Parse!": "Parse!", + "Parseq": "Parseq", + "Parseq does": "Parseq does", + "Parseq Manifest (JSON or URL)": "Parseq Manifest (JSON or URL)", + "Parses instance prompts separated by the following characters [,;.!?], and prevents breaking up tokens when using the tokenizer. Useful if you have prompts separated by a lot of tags.": "Parses instance prompts separated by the following characters [,;.!?], and prevents breaking up tokens when using the tokenizer. Useful if you have prompts separated by a lot of tags.", + "Passing ControlNet parameters with \"Send to img2img\"": "Passing ControlNet parameters with \"Send to img2img\"", + "Password": "Password", + "paste": "paste", + "paste_append": "paste_append", + "Path": "Path", + "Path for saving your persistent settings file:": "Path for saving your persistent settings file:", + "path name": "路徑名", + "path_recorder": "path_recorder", + "Path relative to the webui folder": "Path relative to the webui folder", + "path/to/classify": "path/to/classify", + "/path/to/images or /path/to/images/**/*": "/path/to/images or /path/to/images/**/*", + "path/to/output": "path/to/output", + "Path to save AnimateDiff motion modules": "Path to save AnimateDiff motion modules", + "Path to save results of sadtalker": "Path to save results of sadtalker", + "Path to settings file you want to load. Path can be relative to webui folder OR full - absolute": "Path to settings file you want to load. Path can be relative to webui folder OR full - absolute", + "Pause": "Pause", + "Percentage of tokens to be merged (higher improves speed)": "Percentage of tokens to be merged (higher improves speed)", + "performance": "performance", + "performance is measured in iterations per second (it/s) and reported for different batch sizes (e.g. 1, 2, 4, 8, 16...)": "performance is measured in iterations per second (it/s) and reported for different batch sizes (e.g. 1, 2, 4, 8, 16...)", + "Perform warmup": "Perform warmup", + "perlin": "perlin", + "Perlin noise is a more natural looking noise. It is heterogeneous and less sharp than uniform noise, this way it is more likely that new details will appear in a more coherent way. This is the new default setting.": "Perlin noise is a more natural looking noise. It is heterogeneous and less sharp than uniform noise, this way it is more likely that new details will appear in a more coherent way. This is the new default setting.", + "Perlin octaves": "Perlin octaves", + "Perlin persistence": "Perlin persistence", + "per side": "per side", + "persistent cond cache": "persistent cond cache", + "Personal CivitAI API key": "Personal CivitAI API key", + "Perspective": "Perspective", + "Perspective Flip": "Perspective Flip", + "Perspective flip gamma": "Perspective flip gamma", + "Perspective flip phi": "Perspective flip phi", + "Perspective flip theta": "Perspective flip theta", + "Perspective flip tv": "Perspective flip tv", + "Phone": "Phone", + "photograph": "photograph", + "Photographic": "Photographic", + "photography": "photography", + "Pick Subfolder and Model Version": "Pick Subfolder and Model Version", + "Pics to Interpolate": "Pics to Interpolate", + "pin_light": "小光源", + "pipeline": "pipeline", + "Pipeline": "Pipeline", + "Pix2Pix img CFG schedule": "Pix2Pix 圖像CFG排程", + "Pixelize": "Pixelize", + "Place added settings into an accordion": "Place added settings into an accordion", + "(PLACEHOLDER, USE THE ONE IN 2IMG) Enable debug mode": "(PLACEHOLDER, USE THE ONE IN 2IMG) Enable debug mode", + "Place options in main UI into an accordion": "Place options in main UI into an accordion", + "Place this at back of generated prompt (suffix)": "Place this at back of generated prompt (suffix)", + "Place this in front of generated prompt (prefix)": "Place this in front of generated prompt (prefix)", + "Plain text / URL / Custom format": "Plain text / URL / Custom format", + "Platform": "Platform", + "Play notification sound after image generation": "Play notification sound after image generation", + "Please add text prompts to generate masks": "Please add text prompts to generate masks", + "Please always keep values in math functions above 0.": "Please always keep values in math functions above 0.", + "Please click": "Please click", + "Please enable the following settings to use controlnet from this script.": "Please enable the following settings to use controlnet from this script.", + "Please press 'Refresh' to load selected content!": "Please press 'Refresh' to load selected content!", + "Please select a valid lightdiffusionflow or image file!": "Please select a valid lightdiffusionflow or image file!", + "Please switch to 3D animation mode to view this section.": "Please switch to 3D animation mode to view this section.", + "Please use smaller tile size when got CUDA error: out of memory.": "Please use smaller tile size when got CUDA error: out of memory.", + "Plugin first": "Plugin first", + "PNGs": "PNGs", + "point1 x": "point1 x", + "point1 y": "point1 y", + "point2 x": "point2 x", + "point2 y": "point2 y", + "point3 x": "point3 x", + "point3 y": "point3 y", + "points_per_batch": "points_per_batch", + "points_per_side": "points_per_side", + "Pokémon": "Pokémon", + "polylines_sharp": "polylines_sharp", + "polylines_soft": "polylines_soft", + "Pooling Avg": "Pooling Avg", + "Pooling Max": "Pooling Max", + "pop art": "pop art", + "popular": "popular", + ", port for AUTOMATIC1111's webui maintained by": ",由 AUTOMATIC1111 的 WebUI 維護的端口", + "portrait": "portrait", + "Pose style": "Pose style", + "Position (center to edge)": "Position (center to edge)", + "Positioning the effect on the y-axis": "Positioning the effect on the y-axis", + "Position (left to right)": "Position (left to right)", + "Positive Filter": "Positive Filter", + "➕ Positive Mask": "➕ Positive Mask", + "Positive / Negative Prompts": "Positive / Negative Prompts", + "Positive prompt mode": "Positive prompt mode", + "positive prompt to be appended to *all* prompts": "positive prompt to be appended to *all* prompts", + "Post-Inpainting (After swapping)": "Post-Inpainting (After swapping)", + "Postprocess": "Postprocess", + "Post Processing": "Post Processing", + "Post-Processing": "後處理", + "Post-Processing & Advanced Mask Options": "Post-Processing & Advanced Mask Options", + "Post-processing and mask settings for unit faces. Best result : checks all, use LDSR, use Codeformer": "Post-processing and mask settings for unit faces. Best result : checks all, use LDSR, use Codeformer", + "Postprocess (latent)": "Postprocess (latent)", + "Precision": "Precision", + "Precision for (attention:1.1) when editing the prompt with Ctrl+up/down": "Precision for (attention:1.1) when editing the prompt with Ctrl+up/down", + "Precision for when editing the prompt with Ctrl+up/down": "Precision for when editing the prompt with Ctrl+up/down", + "Pre-defined aspect ratio buttons (1:1, 4:3, 16:9, 9:16, 21:9)": "Pre-defined aspect ratio buttons (1:1, 4:3, 16:9, 9:16, 21:9)", + "Pre-defined percentage buttons (75, 125, 150)": "Pre-defined percentage buttons (75, 125, 150)", + "Pre-defined percentage display format": "Pre-defined percentage display format", + "predicted_iou_threshold": "predicted_iou_threshold", + "pred_iou_thresh": "pred_iou_thresh", + "Prefix": "Prefix", + "prefix AND prompt + suffix": "prefix AND prompt + suffix", + "prefix + prefix + prompt + suffix": "prefix + prefix + prompt + suffix", + "Pre-Inpainting (before swapping)": "Pre-Inpainting (before swapping)", + "Pre-inpainting sends face to inpainting before swapping": "Pre-inpainting sends face to inpainting before swapping", + "Preload images at startup for first tab": "Preload images at startup for first tab", + "prepare ebsynth": "prepare ebsynth", + "prepare environment": "prepare environment", + "Prepend": "Prepend", + "Prepend additional tags": "Prepend additional tags", + "preprocess": "preprocess", + "Pre-Processing": "Pre-Processing", + "Preprocessing steps": "Preprocessing steps", + "Prerequisites and Important Info:": "Prerequisites and Important Info:", + "Preset": "Preset", + "Preset Manager": "Preset Manager", + "Preset Weights": "Preset Weights", + "Preset_Weights": "Preset_Weights", + "Prev batch": "Prev batch", + "Prevents overfit by clipping gradient norms. Default value is 0.0. Recommended value for Lora is 1.0": "Prevents overfit by clipping gradient norms. Default value is 0.0. Recommended value for Lora is 1.0", + "Preview annotator result": "Preview annotator result", + "Preview automatically when add/remove points": "Preview automatically when add/remove points", + "Preview Image": "Preview Image", + "Preview image negative prompt": "Preview image negative prompt", + "Preview image prompt": "Preview image prompt", + "Preview Image Selection": "Preview Image Selection", + "Preview mask": "Preview mask", + "Preview motion only. Uses a static picture for init, and draw motion reference rectangle.": "Preview motion only. Uses a static picture for init, and draw motion reference rectangle.", + "Preview Segmentation": "Preview Segmentation", + "Preview segmentation image": "Preview segmentation image", + "Previous": "Previous", + "Prev page": "Prev page", + "Prev Page": "上一頁", + "Primary model": "Primary model", + "primitivism": "primitivism", + "Print": "Print", + "Print all deviations": "Print all deviations", + "print change": "print change", + "Print debug logs to the console": "在控制台顯示除錯訊息。", + "Print image deletion messages to the console": "將圖像刪除訊息打印到控制台", + "Print prompts to console when generating with txt2img and img2img.": "Print prompts to console when generating with txt2img and img2img.", + "Print stack traces before exiting the program with ctrl+c.": "Print stack traces before exiting the program with ctrl+c.", + "Print warning logs to the console": "在控制台顯示警告訊息", + "Prior loss weight.": "Prior loss weight.", + "Process given file(s) under the input folder, seperate by comma": "Process given file(s) under the input folder, seperate by comma", + "Process Hires. fix": "Process Hires. fix", + "Process & Save": "Process & Save", + "Process Stage": "Process Stage", + "Process Text": "Process Text", + "Process type": "Process type", + "Progressbar/preview update period, in milliseconds": "Progressbar/preview update period, in milliseconds", + "Project": "Project", + "Project directory": "Project directory", + "project setting": "project setting", + "🎉 Promo": "🎉 Promo", + "prompt 1": "prompt 1", + "prompt 2": "prompt 2", + "prompt 4": "prompt 4", + "prompt 5": "prompt 5", + "Prompt attention parser": "Prompt attention parser", + "Prompt Changes": "Prompt Changes", + "Prompt compounder": "Prompt compounder", + "Prompt editing": "Prompt editing", + "Prompt-Ex": "Prompt-Ex", + "Prompt fields": "Prompt fields", + "Prompt for face": "Prompt for face", + "Prompt for Face": "Prompt for Face", + "Prompt Formatter": "Prompt Formatter", + "Prompt for upscale (added to generation prompt)": "Prompt for upscale (added to generation prompt)", + "Prompt mode": "Prompt mode", + "Prompt Mode": "Prompt Mode", + "Prompt mode guide": "Prompt mode guide", + "Prompts": "提示詞", + "Prompts are stored in JSON format. If you've got an error, check it in a": "Prompts are stored in JSON format. If you've got an error, check it in a", + "Prompts are stored in JSON format. If you've got an error, check it in the validator,": "Prompts are stored in JSON format. If you've got an error, check it in the validator,", + "Prompts are stored in JSON format. If you've got an error, check it in validator,": "Prompts are stored in JSON format. If you've got an error, check it in validator,", + "Prompt seperator": "Prompt seperator", + "Prompt seperator mode": "Prompt seperator mode", + "prompts for your animation in a JSON format. Use --neg words to add 'words' as negative prompt": "prompts for your animation in a JSON format. Use --neg words to add 'words' as negative prompt", + "Prompts negative": "Prompts negative", + "Prompts positive": "Prompts positive", + "Prompt to be visualized": "Prompt to be visualized", + "Prompt, will append to your t2i prompt": "Prompt, will append to your t2i prompt", + "Provider": "Provider", + "Proxy": "Proxy", + "Proxy to use for fetching models and model data. Format: http://127.0.0.1:port": "Proxy to use for fetching models and model data. Format: http://127.0.0.1:port", + "prune": "prune", + "Pruning Methods": "Pruning Methods", + "psychedelic": "psychedelic", + "Psychedelic": "Psychedelic", + "Pure Noise": "Pure Noise", + "Push File To 🤗": "Push File To 🤗", + "Push File to 🤗 Hugging Face": "Push File to 🤗 Hugging Face", + "Push Folder To 🤗": "Push Folder To 🤗", + "Push Folder to 🤗 Hugging Face": "Push Folder to 🤗 Hugging Face", + "(puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right)": "(puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right)", + "Put weight sets. float number x 25": "Put weight sets. float number x 25", + "Quality": "Quality", + "Quality Gate": "Quality Gate", + "Quantize": "Quantize", + "Queued": "Queued", + "Queued At": "Queued At", + "Queue front": "Queue front", + "quick": "quick", + "Quick": "Quick", + "Quick batch": "Quick batch", + "Quick Guide": "Quick Guide", + "Quick Save": "Quick Save", + "Quicksettings position": "Quicksettings position", + "radio-buttons": "radio-buttons", + "Randomize For Each Iteration": "Randomize For Each Iteration", + "Randomize Sampler": "Randomize Sampler", + "Randomize Style": "Randomize Style", + "Randomly decide to flip images horizontally.": "Randomly decide to flip images horizontally.", + "Random Mode": "Random Mode", + "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.": "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", + "Random seed for ToMe partition": "Random seed for ToMe partition", + "range": "range", + "Range": "Range", + "ranking": "評等", + "ranking filter": "以評等篩選", + "Ranking filter": "Ranking filter", + "rating": "評分", + "Rating": "評分", + "Rating confidences": "Rating confidences", + "Rating confidents": "Rating confidents", + "Ratings and included tags": "Ratings and included tags", + "Raw percentage (50%, 150%)": "原始百分比(50%,150%)", + "Read Caption from Selected Image": "Read Caption from Selected Image", + "read_last_image": "read_last_image", + "read_last_settings": "read_last_settings", + "README": "README", + "*READ ME before you use this mode!*": "*READ ME before you use this mode!*", + "read metadata": "read metadata", + "Read prompts from text boxes": "Read prompts from text boxes", + "Read tabular commands": "Read tabular commands", + "Read tags from text files": "Read tags from text files", + "Real Estate": "Real Estate", + "realism": "realism", + "reallybigname": "reallybigname", + "Reapply ranking after moving files": "Reapply ranking after moving files", + "Rebuild exif cache": "Rebuild exif cache", + "Recipe": "Recipe", + "recolor_intensity": "recolor_intensity", + "recolor_luminance": "recolor_luminance", + "recombine ebsynth": "recombine ebsynth", + "Recommended inference settings": "Recommended inference settings", + "Recommended to set tile sizes as large as possible before got CUDA error: out of memory.": "Recommended to set tile sizes as large as possible before got CUDA error: out of memory.", + "Recommend enabling the following settings.": "Recommend enabling the following settings.", + "Reconstruct prompt from existing image and put it into the prompt field.": "從現有的圖像中重構出提示詞,並將其放入提示詞的輸入文字方塊", + "record": "record", + "Recreate hash for existing files": "Recreate hash for existing files", + "Rectangular": "Rectangular", + "Recursively search for wildcards": "Recursively search for wildcards", + "red-cyan-anaglyph": "red-cyan-anaglyph", + "red/green/blue": "red/green/blue", + "Redo generation": "Redo generation", + "Re-enable the bug that trained the tenc embedding layer for LoRA.": "Re-enable the bug that trained the tenc embedding layer for LoRA.", + "reference_adain": "reference_adain", + "reference_adain+attn": "reference_adain+attn", + "Reference Control (enabled with image below)": "Reference Control (enabled with image below)", + "Reference Control Mode": "Reference Control Mode", + "Reference Control Weight": "Reference Control Weight", + "Reference Image": "Reference Image", + "Reference Image Resize Mode": "Reference Image Resize Mode", + "Reference is an image. First face will be extracted.": "Reference is an image. First face will be extracted.", + "reference_only": "reference_only", + "Reference-Only": "Reference-Only", + "Reference-Only Control (enabled only when a reference image below is present)": "Reference-Only Control (enabled only when a reference image below is present)", + "Reference source face : start from 0": "Reference source face : start from 0", + "Reference Type for Reference-Only": "Reference Type for Reference-Only", + "Refiner checkpoint": "Refiner checkpoint", + "Refiner Denosing Fraction:": "Refiner Denosing Fraction:", + "Refiner model": "Refiner model", + "Refiner Model": "Refiner Model", + "Refiner Model ID": "Refiner Model ID", + "Refiner options": "Refiner options", + "Refiner switch at": "Refiner switch at", + "reflection": "reflection", + "reflection will attempt to approximate the image and tile/repeat pixels": "reflection will attempt to approximate the image and tile/repeat pixels", + "refresh": "重新整理", + "Refresh all checkpoint preview info": "Refresh all checkpoint preview info", + "Refresh Civitai Helper": "Refresh Civitai Helper", + "Refresh Civitai Helper's additional buttons": "Refresh Civitai Helper's additional buttons", + "Refresh Civitai Helper's model card buttons": "Refresh Civitai Helper's model card buttons", + "Refresh data": "Refresh data", + "Refresh extension list": "Refresh extension list", + "Refresh internal temp files": "Refresh internal temp files", + "Refresh models": "重新整理模型列表", + "Refresh page": "Refresh page", + "Refresh Style": "Refresh Style", + "Refresh TAC temp files": "Refresh TAC temp files", + "refresh textual inversion templates": "refresh textual inversion templates", + "refresh VAE": "refresh VAE", + "Regenerate": "Regenerate", + "regex - e.g. ^(?!.*Hires).*$": "regex - e.g. ^(?!.*Hires).*$", + "Region": "Region", + "Related to original file": "Related to original file", + "Related to output file": "Related to output file", + "Relative": "Relative", + "Reload Cache List": "Reload Cache List", + "Reload checkpoint": "Reload checkpoint", + "Reload Checkpoints": "Reload Checkpoints", + "🔄 Reload ComfyUI interfaces (client side)": "🔄 Reload ComfyUI interfaces (client side)", + "reload hypernetworks": "reload hypernetworks", + "Reloading...": "Reloading...", + "Reload model back to VRAM": "Reload model back to VRAM", + "Reload Presets": "Reload Presets", + "Reload/Save Settings (config.json)": "Reload/Save Settings (config.json)", + "Reload scripts": "Reload scripts", + "🔄 Reload settings": "🔄 Reload settings", + "Reload settings": "Reload settings", + "Reload Tags": "Reload Tags", + "Reload UI needed to apply": "Reload UI needed to apply", + "Reloat List": "Reloat List", + "remake dimension": "remake dimension", + "Remember this choice?": "Remember this choice?", + "➖ Remove": "➖ Remove", + "Remove": "Remove", + "Remove all point prompts": "Remove all point prompts", + "Remove Auto Trans": "移除自動翻譯的文字", + "Remove background": "移除背景", + "Remove background image": "Remove background image", + "Remove batch": "Remove batch", + "Remove cfg": "Remove cfg", + "Remove checkboxes": "Remove checkboxes", + "Remove dimensions": "Remove dimensions", + "Remove duplicated tag": "Remove duplicated tag", + "Remove duplicate prompts": "Remove duplicate prompts", + "Remove duplicate tags": "Remove duplicate tags", + "Remove empty prompts": "Remove empty prompts", + "Remove from saved directories": "從已儲存的目錄移除", + "Remove head": "Remove head", + "Remove hires_fix": "Remove hires_fix", + "Remove Image": "Remove Image", + "Remove inpaint": "Remove inpaint", + "Remove last": "Remove last", + "remove_model_button": "remove_model_button", + "Remove motion module from any memory": "Remove motion module from any memory", + "Remove occluded edges": "Remove occluded edges", + "Remove override_settings": "Remove override_settings", + "Remove sampler": "Remove sampler", + "Remove scripts": "Remove scripts", + "Remove sd_model_checkpoint": "Remove sd_model_checkpoint", + "Remove sd_vae": "Remove sd_vae", + "Remove seed": "Remove seed", + "Remove selected": "Remove selected", + "Remove selected tags": "Remove selected tags", + "Remove selection [Delete]": "Remove selection [Delete]", + "Remove tail": "Remove tail", + "Remove this model": "Remove this model", + "Remove token_merging_ratio": "Remove token_merging_ratio", + "renaissance": "renaissance", + "Rename keyframes.": "Rename keyframes.", + "rename_model_button": "rename_model_button", + "Rename New": "Rename New", + "Rename this model": "Rename this model", + "repeats the edge of the pixels, and extends them. Animations with quick motion may yield lines where this border function was attempting to populate pixels into the empty space created.": "repeats the edge of the pixels, and extends them. Animations with quick motion may yield lines where this border function was attempting to populate pixels into the empty space created.", + "Replace": "Replace", + "Replace new-line character with comma": "Replace new-line character with comma", + "Replace Old Metadata Formats*": "Replace Old Metadata Formats*", + "Replace or save the selected component.": "Replace or save the selected component.", + "replace_preview_button": "replace_preview_button", + "replace preview image with currently selected in gallery": "replace preview image with currently selected in gallery", + "-> Replace tag, ..": "-> Replace tag, ..", + "Replace Text": "Replace Text", + "Replace '_' with ' '(Does not affect the function to add tokens using add_token.txt.)": "Replace '_' with ' '(Does not affect the function to add tokens using add_token.txt.)", + "replicate": "複製", + "(Requires the": "(Requires the", + "Requires the": "Requires the", + "(Requires UI reload)": "(Requires UI reload)", + ", Requires UI reload)": ", Requires UI reload)", + "(Requires Web-UI restart)": "(Requires Web-UI restart)", + "(Requires Web-UI Restart)": "(Requires Web-UI Restart)", + "reroll": "reroll", + "Reroll blank frames": "Reroll blank frames", + "Reroll patience": "Reroll patience", + "Reserved: total amout of video memory allocated by the Torch library ": "Reserved: total amout of video memory allocated by the Torch library ", + "reset": "reset", + "Reset": "重置", + "🎥 Reset Camera": "🎥 Reset Camera", + "Reset CLIP ids": "Reset CLIP ids", + "reset current": "reset current", + "reset default": "reset default", + "Reset default (Reload UI needed to apply)": "Reset default (Reload UI needed to apply)", + "Reset mixer": "Reset mixer", + "🧍 Reset Pose": "🧍 Reset Pose", + "♻️ Reset settings": "♻️ Reset settings", + "Reset Tile Size": "Reset Tile Size", + "resizable": "resizable", + "resize": "resize", + "Resize result back to original dimensions": "Resize result back to original dimensions", + "Resolution": "解析度", + "Resolution:": "Resolution:", + "Resolutions:": "Resolutions:", + "Restart debug": "Restart debug", + "Restart server": "Restart server", + "Restart UI": "Restart UI", + "Restore Checkpoint": "Restore Checkpoint", + "restore config state file": "restore config state file", + "Restore defaults": "Restore defaults", + "Restore Face": "Restore Face", + "Restore Faces": "面部修復", + "Restore faces after ADetailer": "Restore faces after ADetailer", + "Restore faces after ADetailer 10th": "Restore faces after ADetailer 10th", + "Restore faces after ADetailer 2nd": "Restore faces after ADetailer 2nd", + "Restore faces after ADetailer 3rd": "Restore faces after ADetailer 3rd", + "Restore faces after ADetailer 4th": "Restore faces after ADetailer 4th", + "Restore faces after ADetailer 5th": "Restore faces after ADetailer 5th", + "Restore faces after ADetailer 6th": "Restore faces after ADetailer 6th", + "Restore faces after ADetailer 7th": "Restore faces after ADetailer 7th", + "Restore faces after ADetailer 8th": "Restore faces after ADetailer 8th", + "Restore faces after ADetailer 9th": "Restore faces after ADetailer 9th", + "Restore Faces, Tiling & more": "恢復臉部、平鋪和更多功能", + "Restore Last Scene": "Restore Last Scene", + "Restore settings after rendering a job": "Restore settings after rendering a job", + "Restore settings to default": "Restore settings to default", + "Restore system defaults": "Restore system defaults", + "Restore visibility": "Restore visibility", + "Result = \"A, B, C, X, Y\" (add X and Y to the end (default))": "Result = \"A, B, C, X, Y\" (add X and Y to the end (default))", + "Results": "Results", + "Result = \"X, C, E\" (A->\"\", B->X, D->\"\")": "Result = \"X, C, E\" (A->\"\", B->X, D->\"\")", + "Result = \"X, Y, A, B, C\" (add X and Y to the beginning (\"Prepend additional tags\" checked))": "Result = \"X, Y, A, B, C\" (add X and Y to the beginning (\"Prepend additional tags\" checked))", + "Result = \"Y, Y, X, C\" (B->X, A->Y)": "Result = \"Y, Y, X, C\" (B->X, A->Y)", + "Resume Animation": "繼續動畫", + "Resume from timestring": "從時間字串繼續執行", + "Resume & Run from file": "繼續執行並從檔案運行", + "Resume timestring": "從時間字串繼續執行", + "Retro Arcade": "Retro Arcade", + "Retro Game": "Retro Game", + "Return mask": "Return mask", + "Reuse original image": "復用原圖", + "Reuse seed from last generation, mostly useful if it was randomized": "Reuse seed from last generation, mostly useful if it was randomized", + "Reverse": "Reverse", + "Reverse model sort order": "反向排序", + "revision_clipvision": "revision_clipvision", + "revision_ignore_prompt": "revision_ignore_prompt", + "rgb": "rgb", + "RGB to BGR": "RGB to BGR", + "RIFE v4.6 and FILM.": "RIFE v4.6 and FILM.", + "right-left": "right-left", + "roll canvas clockwise/anticlockwise": "roll canvas clockwise/anticlockwise", + "Rolling factor": "Rolling factor", + "romanticism": "romanticism", + "rotate canvas clockwise/anticlockwise in degrees per frame": "rotate canvas clockwise/anticlockwise in degrees per frame", + "Rotate images (clockwise)": "Rotate images (clockwise)", + "Rotation 3D X": "3D 旋轉 X", + "Rotation 3D Y": "3D 旋轉 Y", + "Rotation 3D Z": "3D 旋轉 Z", + "Round": "Round", + "RPG Fantasy Game": "RPG Fantasy Game", + "run": "run", + "Run benchmark": "執行效能評測", + "Run Cleaner": "Run Cleaner", + "run command": "run command", + "Run ControlNet Inpaint": "Run ControlNet Inpaint", + "run extensions installers": "run extensions installers", + "run from a list of setting .txt files. Upload them to the box on the right (visible when enabled)": "run from a list of setting .txt files. Upload them to the box on the right (visible when enabled)", + "Run from Settings file": "從設定檔運行", + "Run Inpainting": "Run Inpainting", + "Run Inpainting on offline network (Models not auto-downloaded)": "Run Inpainting on offline network (Models not auto-downloaded)", + "Run Merge": "Run Merge", + "running benchmark may take a while. extensive tests may result in gpu out-of-memory conditions.": "running benchmark may take a while. extensive tests may result in gpu out-of-memory conditions.", + "Running ebsynth.(on your self)": "Running ebsynth.(on your self)", + "Run on incomplete": "Run on incomplete", + "Run Padding": "Run Padding", + "Run preprocessor": "Run preprocessor", + "Run Rand": "Run Rand", + "Run: Sampler, Width, Height, tiling, resize seed.": "Run: Sampler, Width, Height, tiling, resize seed.", + "Run: seed, subseed, subseed strength.": "Run: seed, subseed, subseed strength.", + "Run Segment Anything": "Run Segment Anything", + "Run Segment Anything on CPU": "Run Segment Anything on CPU", + "😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023)": "😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023)", + "SAG Mask Threshold": "SAG Mask Threshold", + "Same Gender": "Same Gender", + "same to Strength": "same to Strength", + "SAM Model": "SAM Model", + "Sampler schedule": "取樣器排程", + "Sampler Settings": "Sampler Settings", + "Sampling method webui": "Sampling method webui", + "Sampling settings": "Sampling settings", + "Sampling steps webui": "Sampling steps webui", + "SAM requires an input image. Please upload an image first.": "SAM requires an input image. Please upload an image first.", + "Sanity Samples": "Sanity Samples", + "saturation": "saturation", + "Saturation": "Saturation", + "save": "儲存", + "💾 Save": "💾 Save", + "Save 3D depth maps": "Save 3D depth maps", + "Save a checkpoint every N epochs.": "Save a checkpoint every N epochs.", + "Save all changes": "Save all changes", + "save animation's depth maps as extra files": "save animation's depth maps as extra files", + "save anime gif": "save anime gif", + "Save an .srt (subtitles) file with the generation info along with each animation": "Save an .srt (subtitles) file with the generation info along with each animation", + "Save as": "Save as", + "save as csv": "save as csv", + "Save as csv(default)": "Save as csv(default)", + "Save as half": "Save as half", + "Save as preset": "Save as preset", + "Save as safetensors": "Save as safetensors", + "save as txt": "save as txt", + "Save as txt": "Save as txt", + "Save background instead of foreground": "Save background instead of foreground", + "Save Caption to .txt File": "Save Caption to .txt File", + "Save changes": "Save changes", + "Save Controlnet Data in ...": "Save Controlnet Data in ...", + "save csv": "save csv", + "Save current settings": "Save current settings", + "Save current settings as default": "Save current settings as default", + "Saved directories": "已儲存的目錄", + "Save depth map": "Save depth map", + "Save DepthMap": "儲存深度圖", + "Save depth maps": "Save depth maps", + "Save every n epochs": "Save every n epochs", + "Save format": "Save format", + "Save images": "Save images", + "Save Images": "Save Images", + "Save images before ADetailer": "Save images before ADetailer", + "Save JSON": "儲存為 JSON", + "Save LyCORIS models to Lora directory. Do not use this if you are on older versions of webui or you use an extension that handles LyCORIS models.": "Save LyCORIS models to Lora directory. Do not use this if you are on older versions of webui or you use an extension that handles LyCORIS models.", + "Save mask": "Save mask", + "Save masked image": "Save masked image", + "Save mask previews": "Save mask previews", + "save merged model ID to": "save merged model ID to", + "save metadata": "save metadata", + "Save Metadata": "儲存中繼資料", + "Save metadata as caption": "Save metadata as caption", + "save model": "save model", + "Save Name": "Save Name", + "Save Only": "Save Only", + "Save Original": "Save Original", + "Save original image": "Save original image", + "Save original image if face detection fails": "Save original image if face detection fails", + "Save original image with mask and bounding box": "Save original image with mask and bounding box", + "Save Outputs": "Save Outputs", + "Save path for images": "Save path for images", + "Save PNG": "儲存為 PNG", + "💾🧍 Save Pose": "💾🧍 Save Pose", + "save precision": "save precision", + "Save Preset": "Save Preset", + "Save Presets": "Save Presets", + "Save Preview(s) Frequency": "Save Preview(s) Frequency", + "Save prompts as text file": "Save prompts as text file", + "Save ranking in image's pnginfo": "Save ranking in image's pnginfo", + "Save & Restart": "Save & Restart", + "Save Scene": "Save Scene", + "save setting": "save setting", + "Save Setting": "儲存設定", + "save settings": "save settings", + "Save settings as default": "Save settings as default", + "Save Style": "Save Style", + "save successful": "save successful", + "Save tags": "Save tags", + "Save Tags": "Save Tags", + "Save tags after download": "Save tags after download", + "Save the current merged model": "Save the current merged model", + "Save to tags files": "Save to tags files", + "Save vector to text file": "Save vector to text file", + "Save WebP in lossless format (highest quality, largest file size)": "Save WebP in lossless format (highest quality, largest file size)", + "Save with JSON": "Save with JSON", + "Saving images": "Saving images", + "Saving the image to an existing file": "Saving the image to an existing file", + "Scale Height": "Scale Height", + "scale the canvas size, multiplicatively. [static = 1.0]": "scale the canvas size, multiplicatively. [static = 1.0]", + "Scale Width": "Scale Width", + "Scan": "Scan", + "Scan Exif-/.txt-data (initially slower, but required for many features to work)": "Scan Exif-/.txt-data (initially slower, but required for many features to work)", + "Scan for:": "Scan for:", + "Scan for available updates": "Scan for available updates", + "Scan for duplicate models": "Scan for duplicate models", + "Scan Models for Civitai": "Scan Models for Civitai", + "Scanning takes time, just wait. Check console log for detail": "Scanning takes time, just wait. Check console log for detail", + "Scanning takes time, just wait. Check console log for details": "Scanning takes time, just wait. Check console log for details", + "scene": "scene", + "schedule": "schedule", + "Schedule controls how much the composite video is mixed in, whether set to mask is None or using a mask. This is the master mix.": "Schedule controls how much the composite video is mixed in, whether set to mask is None or using a mask. This is the master mix.", + "Scheduler": "Scheduler", + "Scheduler:": "Scheduler:", + "scheduler type": "scheduler type", + "sci-fi": "sci-fi", + "Scoring type": "Scoring type", + "screen": "濾色", + "Scribble": "Scribble", + "scribble_hed": "scribble_hed", + "scribble_pidinet": "scribble_pidinet", + "scribble_xdog": "scribble_xdog", + "Script Enabled": "Script Enabled", + "Script first": "Script first", + "Script names to apply to ADetailer (separated by comma)": "Script names to apply to ADetailer (separated by comma)", + "scroll": "scroll", + "sculpture": "sculpture", + "sd model (experimental)": "sd model (experimental)", + "sd-parseq manifest": "sd-parseq manifest", + "(SD-WebUI v1.5 and higher treats LoCON's the same as LORA's, Requires UI reload)": "(SD-WebUI v1.5 and higher treats LoCON's the same as LORA's, Requires UI reload)", + "(SD-WebUI v1.5 and higher treats LoCON's the same as LORA's, so they can be placed in the LORA folder.)": "(SD-WebUI v1.5 and higher treats LoCON's the same as LORA's, so they can be placed in the LORA folder.)", + "Seach models": "Seach models", "Seams denoise strenght": "Seams denoise strenght", - "Seams Width": "Seams Width", - "Seams padding": "Seams padding", + "Seams fix": "儲存接縫修復圖像", "Seams Mask blur (offset pass only)": "Seams Mask blur (offset pass only)", - "Enable controlnet tile resample": "Enable controlnet tile resample", - "also enable wierd blocky upscale mode": "also enable wierd blocky upscale mode", - "Controlnet tile model name": "Controlnet tile model name", + "Seams padding": "Seams padding", + "Seams Width": "Seams Width", + "search": "search", + "Search:": "Search:", + "Search and Replace": "Search and Replace", + "Search and Replace for all images displayed.": "Search and Replace for all images displayed.", + "Search and Replace in": "Search and Replace in", + "Search anything : Prompt, Size, Model, ...": "Search anything : Prompt, Size, Model, ...", + "Search CivitAI": "Search CivitAI", + "Search for LyCORIS/LoHa": "Search for LyCORIS/LoHa", + "Search for models": "Search for models", + "search huggingface models": "search huggingface models", + "Search in wildcard names...": "Search in wildcard names...", + "Search Mode": "Search Mode", + "Search models": "Search models", + "Search negative prompt": "搜尋反向提示詞", + "Search results": "Search results", + "Search tag, .. ->": "Search tag, .. ->", + "Search Tags": "Search Tags", + "Search tags / Filter images by tags": "Search tags / Filter images by tags", + "Search tags / Filter images by tags (INCLUSIVE)": "Search tags / Filter images by tags (INCLUSIVE)", + "Search Term:": "Search Term:", + "Search Text": "Search Text", + "Search type:": "Search type:", + "Search workflows in subdirectories": "Search workflows in subdirectories", + "seascape": "seascape", + "Secondary model": "Secondary model", + "Secondary Upscaler": "Secondary Upscaler", + "seconds": "seconds", + "Second upscaler": "Second upscaler", + "Security": "Security", + "(See": "(See", + "Seed behavior": "Seed behavior", + "'seed_behavior' will be forcibly set to 'schedule'.": "'seed_behavior' will be forcibly set to 'schedule'.", + "Seed for Random Ratio": "Seed for Random Ratio", + "seed for the whole video generation.": "seed for the whole video generation.", + "Seed iter N": "Seed iter N", + "Seed schedule": "Seed schedule", + "seed_schedule should start and end on the same seed.": "seed_schedule should start and end on the same seed.", + "Seed training RNG. Useful for testing, but may cause minor performance reduction.": "Seed training RNG. Useful for testing, but may cause minor performance reduction.", + "See here for explanation of each parameter.": "See here for explanation of each parameter.", + "See the progress.": "See the progress.", + "Seg": "Seg", + "Segment Anything": "Segment Anything", + "Segment Anything Model ID": "Segment Anything Model ID", + "Segment Anything Output": "Segment Anything Output", + "Segment Anything status": "Segment Anything status", + "segmentation prompt": "segmentation prompt", + "Segmentation status": "Segmentation status", + "seg_ofade20k": "seg_ofade20k", + "seg_ofcoco": "seg_ofcoco", + "seg_ufade20k": "seg_ufade20k", + "Select a component class or specific component.": "Select a component class or specific component.", + "Select a device": "Select a device", + "select all": "select all", + "Select a local config for the model from the configs directory of the webui root": "Select a local config for the model from the configs directory of the webui root", + "Select a sampling method": "Select a sampling method", + "Select blocks to change": "Select blocks to change", + "Select brush color": "Select brush color", + "Select components to hide": "Select components to hide", + "(select Disco output format).": "(選擇Disco輸出格式)。", + "selected": "selected", + "Selected": "已選取", + "Selected image": "Selected image", + "Selected Image :": "Selected Image :", + "Selected models for sharing": "Selected models for sharing", + "Selected One": "Selected One", + "Selected Tags": "Selected Tags", + "selected tags from the images displayed.": "selected tags from the images displayed.", + "Select images from the left gallery.": "Select images from the left gallery.", + "Select keyframes to be given to ebsynth.": "Select keyframes to be given to ebsynth.", + "select-list": "select-list", + "Select model": "Select model", + "Select or Create a Model.": "Select or Create a Model.", + "Select preset": "Select preset", + "selects the type of animation": "selects the type of animation", + "Selects the type of Optical Flow to use if Optical Flow is selected in Hybrid motion.": "Selects the type of Optical Flow to use if Optical Flow is selected in Hybrid motion.", + "Select template:": "Select template:", + "Select the face to be swapped, you can sort by size or use the same gender as the desired face:": "Select the face to be swapped, you can sort by size or use the same gender as the desired face:", + "select the frame interpolation engine. hover on the options for more info": "select the frame interpolation engine. hover on the options for more info", + "Select Translater": "翻譯引擎:", + "Select visible tags": "Select visible tags", + "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", + "Send blended image to img2img Inpainting tab": "Send blended image to img2img Inpainting tab", + "Send dimensions to stable diffusion": "Send dimensions to stable diffusion", + "Send first frame to img2img Inpainting tab": "Send first frame to img2img Inpainting tab", + "Send image to tag selection": "Send image to tag selection", + "Send interrupt": "Send interrupt", + "Send pose to /openpose_editor_index for edit.": "Send pose to /openpose_editor_index for edit.", + "send priority": "send priority", + "Send prompt up": "Send prompt up", + "Send this image to ControlNet.": ">> ControlNet", + "Send to": "發送至", + "Send to Blend": ">> 混合器", + "Send to Canvas Editor": "Send to Canvas Editor", + "Send to Controlnet-img2img": "Send to Controlnet-img2img", + "Send to ControlNet (img2img)": "Send to ControlNet (img2img)", + "Send to Controlnet-txt2img": "Send to Controlnet-txt2img", + "Send to ControlNet (txt2img)": "Send to ControlNet (txt2img)", + "Send to Effect": ">> 效果器", + "Send to Extras": "Send to Extras", + "Send to img2img:": ">> 圖生圖\n(數字對應模型號碼)", + "Send to img2img ControlNet": "Send to img2img ControlNet", + "Send to inpaint upload": ">> 局部重繪", + "Send to ip2p": "Send to ip2p", + "Send to Layer1": ">> 圖層 1", + "Send to Layer2": ">> 圖層 2", + "Send to Layer3": ">> 圖層 3", + "Send to Layer4": ">> 圖層 4", + "Send to Layer5": ">> 圖層 5", + "Send to Multi-Merge": "Send to Multi-Merge", + "Send to QR Compare": "Send to QR Compare", + "Send to QR Scanner": "Send to QR Scanner", + "Send to txt2img:": ">> 文生圖\n(數字對應模型號碼)", + "Send to txt2img and img2img": "Send to txt2img and img2img", + "Send to txt2img ControlNet": "Send to txt2img ControlNet", + "Send training samples to a Discord channel after generation.": "Send training samples to a Discord channel after generation.", + "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)": "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)", + "Separate UNet/Text Encoder weights": "單獨設定 UNet 及文字編碼器的權重", + "sequential apply": "sequential apply", + "Sequential Merge Parameters": "Sequential Merge Parameters", + "Sequential XY Merge and Generation": "Sequential XY Merge and Generation", + "Server start time": "Server start time", + "🖼 Set": "🖼 Set", + "Set": "Set", + "Set a variable like so:": "Set a variable like so:", + "Set \"Hires steps\" to [0], if you need": "Set \"Hires steps\" to [0], if you need", + "Set Init tab's strength slider greater than 0. Recommended value (.65 - .80).": "Set Init tab's strength slider greater than 0. Recommended value (.65 - .80).", + "Set new defaults": "Set new defaults", + "Set Random Pose": "Set Random Pose", + "Set ranking to": "Set ranking to", + "sets a midpoint at which a depth-map is to be drawn: range [-1 to +1]": "sets a midpoint at which a depth-map is to be drawn: range [-1 to +1]", + "set samplers": "set samplers", + "Set 'seed_behavior' to 'schedule' under the Seed Scheduling section below.": "Set 'seed_behavior' to 'schedule' under the Seed Scheduling section below.", + "Set the Latent Couple extension to 2 area’s (standard setting)": "Set the Latent Couple extension to 2 area’s (standard setting)", + "Set the prompt compounder to: 2": "Set the prompt compounder to: 2", + "Set the Prompt Seperator mode to: automatic": "Set the Prompt Seperator mode to: automatic", + "Set the Prompt Seperator mode to: prefix AND prompt + suffix": "Set the Prompt Seperator mode to: prefix AND prompt + suffix", + "Set the Prompt seperator to: AND": "Set the Prompt seperator to: AND", + "Set the strength to 0 automatically when no init image is used": "Set the strength to 0 automatically when no init image is used", + " Setting": " Setting", + "(setting entries that also appear in txt2img/img2img interfaces)": "(setting entries that also appear in txt2img/img2img interfaces)", + "Setting files": "Setting files", + "settings": "settings", + "Settings->ControlNet->Allow other script to control this extension": "Settings->ControlNet->Allow other script to control this extension", + "Settings File": "Settings File", + "settings file path can be relative to webui folder OR full - absolute": "settings file path can be relative to webui folder OR full - absolute", + "Settings file Path can be relative to webui folder OR full - absolute": "Settings file Path can be relative to webui folder OR full - absolute", + "settings for": "settings for", + "Settings for img2img": "Settings for img2img", + "Settings for txt2img": "Settings for txt2img", + "Settings in img2img hidden under Accordion": "Settings in img2img hidden under Accordion", + "Settings->Interrogate Option->Interrogate: include ranks of model tags matches in results": "Settings->Interrogate Option->Interrogate: include ranks of model tags matches in results", + "Settings in txt2img hidden under Accordion": "Settings in txt2img hidden under Accordion", + "Settings in UI": "Settings in UI", + "Settings stack. If it's not checked, it wont overwrite. Apply one, then another. Reset is old, update how you need.": "Settings stack. If it's not checked, it wont overwrite. Apply one, then another. Reset is old, update how you need.", + "set to default": "set to default", + "Set Up": "Set Up", + "setup codeformer": "setup codeformer", + "setup gfpgan": "setup gfpgan", + "setup paths": "setup paths", + "setup SD model": "setup SD model", + "Set your total number of keyframes to be 21 more than the last inserted keyframe image.": "Set your total number of keyframes to be 21 more than the last inserted keyframe image.", + "SFW mode 👀 (blur all images)": "SFW mode 👀 (blur all images)", + "SHA256 hash of the current checkpoint": "SHA256 hash of the current checkpoint", + "Shapes": "Shapes", + "Shared memory": "Shared memory", + "sharpen face": "sharpen face", + "Sharpness": "Sharpness", + "Sharpness:": "Sharpness:", + "Shoulder To Hip": "Shoulder To Hip", + "Shoulder Width": "Shoulder Width", + "Show Additional Generation Info": "Show Additional Generation Info", + "Show API Key": "Show API Key", + "Show Aria2 logs in CMD": "Show Aria2 logs in CMD", + "Show Aria2 Logs in CMD": "Show Aria2 Logs in CMD", + "Show Aria2 logs in console": "Show Aria2 logs in console", + "Show batch images in gradio gallerie output": "Show batch images in gradio gallerie output", + "Show Button On Thumb Mode": "Show Button On Thumb Mode", + "Show Button On Thumb Mode in SD webui versions before 1.5.0": "Show Button On Thumb Mode in SD webui versions before 1.5.0", + "Show Civitai Link events in the console": "Show Civitai Link events in the console", + "Show command for conversion": "Show command for conversion", + "Show console debug": "Show console debug", + "Show console logs during update scanning": "Show console logs during update scanning", + "Show debug message": "Show debug message", + "Show DepthMap": "顯示深度圖", + "Show description of how to edit tags": "Show description of how to edit tags", + "Show extra networks": "Show extra networks", + "Show extra options": "Show extra options", + "Show grid in gallery": "Show grid in gallery", + "Show HeatMap": "顯示熱度圖", + "Show/Hide Generation Info": "Show/Hide Generation Info", + "Show Images": "Show Images", + "Show intermediate steps": "Show intermediate steps", + "Show internal metadata": "Show internal metadata", + "Show live tag translation below prompt": "Show live tag translation below prompt", + "Show live tag translation below prompt (WIP, expect some bugs)": "Show live tag translation below prompt (WIP, expect some bugs)", + "Show maximum dimension button": "Show maximum dimension button", + "Show minimum dimension button": "Show minimum dimension button", + "Show more info": "Show more info", + "Show '?' next to tags, linking to its Danbooru or e621 wiki page": "Show '?' next to tags, linking to its Danbooru or e621 wiki page", + "Show only one page": "Show only one page", + "Show only the tags selected in the Positive Filter": "Show only the tags selected in the Positive Filter", + "Show original image": "Show original image", + "Show pre-defined aspect ratio buttons": "Show pre-defined aspect ratio buttons", + "Show pre-defined percentage buttons": "Show pre-defined percentage buttons", + "Show Preview": "Show Preview", + "Show preview thumbnails for extra networks if available": "Show preview thumbnails for extra networks if available", + "Show progress indicator": "Show progress indicator", + "Show the first N batch img2img results in UI": "Show the first N batch img2img results in UI", + "Show verbose debug info at console": "Show verbose debug info at console", + "shuffle": "shuffle", + "Shuffleing tags by ',' when create texts.": "Shuffleing tags by ',' when create texts.", + "Shuffle wildcards before use for more random outputs": "Shuffle wildcards before use for more random outputs", + "Shutdown server": "Shutdown server", + "Sidebar": "Sidebar", + "Sides": "Sides", + "Sigma Noise": "Sigma Noise", + "Sigma schedule": "Sigma schedule", + "Silhouette": "Silhouette", + "Similarity": "Similarity", + "Simple :": "Simple :", + "Simple (Auto-value)": "Simple (Auto-value)", + "Since compilation happens only on the first run, the first inference (or warm up inference) will be slower than subsequent inferences.": "Since compilation happens only on the first run, the first inference (or warm up inference) will be slower than subsequent inferences.", + "Single": "Single", + "single image": "single image", + "Single image": "Single image", + "Single process": "Single process", + "Single text file with all prompts": "Single text file with all prompts", + "Size": "Size", + "Size of the face when recreating": "Size of the face when recreating", + "Size of the thumbnails (px)": "Size of the thumbnails (px)", + "Size to generate": "Size to generate", + "➠ Sketch": "➠ Sketch", + "Skip generation and use (edited/custom) depthmaps in output directory when a file already exists.": "Skip generation and use (edited/custom) depthmaps in output directory when a file already exists.", + "Skip generation and use (edited/custom) depthmaps in output directory when a file exists.": "Skip generation and use (edited/custom) depthmaps in output directory when a file exists.", + "Skip img2img processing when using img2img initial image": "Skip img2img processing when using img2img initial image", + "Skip NSFW Preview Images": "Skip NSFW Preview Images", + "SKip NSFW Preview images": "SKip NSFW Preview images", + "SKip NSFW Preview Images": "SKip NSFW Preview Images", + "Skip/Reset CLIP position_ids": "Skip/Reset CLIP position_ids", + "Skip video creation": "Skip video creation", + "slow": "slow", + "slower": "slower", + "Slow Mo": "Slow Mo", + "Slow-Mo the interpolated video, audio will not be used if enabled": "Slow-Mo the interpolated video, audio will not be used if enabled", + "Smart subject": "Smart subject", + "Smart subject tries to determine what to and not to generate based on your subject. Example, if your Overwrite subject is formed like this: Obese man wearing a kimono": "Smart subject tries to determine what to and not to generate based on your subject. Example, if your Overwrite subject is formed like this: Obese man wearing a kimono", + "Smoothing": "Smoothing", + "Socket port for image browser": "Socket port for image browser", + "softedge_hed": "softedge_hed", + "softedge_hedsafe": "softedge_hedsafe", + "softedge_pidinet": "softedge_pidinet", + "softedge_pidisafe": "softedge_pidisafe", + "soft_light": "柔光", + "So it's normal for the first inference after a settings change to be slower, while subsequent inferences use the optimized compiled model and run faster.": "So it's normal for the first inference after a settings change to be slower, while subsequent inferences use the optimized compiled model and run faster.", + "solution.": "solution.", + "Sort bounding boxes by": "Sort bounding boxes by", + "sort by": "排序方式", + "Sort by": "Sort by", + "Sort by:": "Sort by:", + "Sort By:": "Sort By:", + "Sort by alphabetical order": "Sort by alphabetical order", + "Sort by size (larger>smaller)": "Sort by size (larger>smaller)", + "Sort caption on save": "Sort caption on save", + "Sorting": "Sorting", + "Sort LoRA models by": "LoRA 模型的排序方式", + "Sort Order": "Sort Order", + "Sort tags": "Sort tags", + "Sort tags in the images displayed.": "Sort tags in the images displayed.", + "Sort wildcard file contents alphabetically": "Sort wildcard file contents alphabetically", + "Soundtrack path": "Soundtrack path", + "Source image": "Source image", + "Source Images": "Source Images", + "Source image URL": "Source image URL", + "Source URL where this model could be found": "模型的發布網址", + "Space": "Space", + "specify a custom settings file and ignore settings displayed in the interface": "specify a custom settings file and ignore settings displayed in the interface", + "Specify model revision": "Specify model revision", + "Specify model variant": "Specify model variant", + "Specify the amount that you wish to expand the mask by (recommend 0-10)": "Specify the amount that you wish to expand the mask by (recommend 0-10)", + "Specify the amount that you wish to expand the mask by (recommend 30)": "Specify the amount that you wish to expand the mask by (recommend 30)", + "specify your own seed schedule": "specify your own seed schedule", + "specify your own seed schedule (found on the Keyframes page)": "specify your own seed schedule (found on the Keyframes page)", + "Speech": "Speech", + "split": "split", + "Split based on cuts (as well)": "Split based on cuts (as well)", + "Split mode": "Split mode", + "Split over-sized images": "Split over-sized images", + "Split oversized images into two": "Split oversized images into two", + "Split sides again after inpainting?": "Split sides again after inpainting?", + "Splits model loss during training into Instance Loss and Prior Loss. Requires class images to be enabled.": "Splits model loss during training into Instance Loss and Prior Loss. Requires class images to be enabled.", + "Split Video": "Split Video", + "square": "square", + "SR Model": "SR Model", + "SSAA": "SSAA", + "Stability API Settings": "Stability API Settings", + "stability_score_offset": "stability_score_offset", + "stability_score_thresh": "stability_score_thresh", + "stability_score_threshold": "stability_score_threshold", + "Stable Diffusion backend": "Stable Diffusion backend", + "Stable Diffusion checkpoint autoload on server start": "Stable Diffusion checkpoint autoload on server start", + "Stable Horde API Key": "Stable Horde API Key", + "Stable Horde Endpoint": "Stable Horde Endpoint", + "Stable Horde Interrogation": "Stable Horde Interrogation", + "Stable Horde Settings": "Stable Horde Settings", + "Stackable": "Stackable", + "Stackable checkbox is not used for saves, it's used when making a selection from the dropdown, whether to apply as stackable or not": "Stackable checkbox is not used for saves, it's used when making a selection from the dropdown, whether to apply as stackable or not", + "Stacked Papercut": "Stacked Papercut", + "stage 1": "stage 1", + "stage1": "stage1", + "Stage 1": "Stage 1", + "stage 2": "stage 2", + "stage2": "stage2", + "Stage 2": "Stage 2", + "stage 3": "stage 3", + "(Stage 3, 4, and 6 only show a guide and do nothing actual processing.)": "(Stage 3, 4, and 6 only show a guide and do nothing actual processing.)", + "stage 3.5": "stage 3.5", + "stage 4": "stage 4", + "stage 5": "stage 5", + "stage 6": "stage 6", + "stage 7": "stage 7", + "stage 8": "stage 8", + "stained glass": "stained glass", + "Stained Glass": "Stained Glass", + "Start At Step": "Start At Step", + "Start Auto Translate": "開始自動翻譯", + "Start batch process": "Start batch process", + "Start drawing": "Start drawing", + "start extracting the input video only from this frame number": "start extracting the input video only from this frame number", + "Start generating and upscaling!": "Start generating and upscaling!", + "Starting Control Step schedule": "Starting Control Step schedule", + "Starting seed for the animation. -1 for random": "Starting seed for the animation. -1 for random", + "Start of prompt": "Start of prompt", + "Start parsing settings...": "Start parsing settings...", + "Start steps": "Start steps", + "Start training.": "Start training.", + "Start WebUi with option --api for this to work.": "Start WebUi with option --api for this to work.", + "Static engines support a single specific output resolution and batch size.": "Static engines support a single specific output resolution and batch size.", + "status": "status", + "Status:": "Status:", + "Std": "Std", + "Steal 'em quick, by using": "Steal 'em quick, by using", + "Steampunk": "Steampunk", + "step": "step", + "Step": "Step", + "Steps schedule": "Steps schedule", + "Sticker": "Sticker", + "still life": "still life", + "Still Mode (fewer hand motion, works with preprocess `full`)": "Still Mode (fewer hand motion, works with preprocess `full`)", + "Still Mode (fewer head motion, works with preprocess `full`)": "Still Mode (fewer head motion, works with preprocess `full`)", + "*Stitch frames to video*": "*Stitch frames to video*", + "Stop": "停止", + "Stop At Step": "Stop At Step", + "stop the extraction of the video at this frame number. -1 for no limits": "stop the extraction of the video at this frame number. -1 for no limits", + "Stop XY": "Stop XY", + "Store frames in ram": "Store frames in ram", + "Store images in database": "Store images in database", + "Store preferences and SVG content locally": "Store preferences and SVG content locally", + "storybook realism": "storybook realism", + "straight-line": "straight-line", + "Strategy Game": "Strategy Game", + "street art": "street art", + "Street Fighter": "Street Fighter", + "streetscape": "streetscape", + "strength": "strength", + "Strength 0 no init": "Strength 0 no init", + "Strength of negative feedback relative to positive feedback.": "Strength of negative feedback relative to positive feedback.", + "Strength schedule": "Strength schedule", + "Stride": "Stride", + "Style": "Style", + "Style 1": "Style 1", + "Style 2": "Style 2", + "Style Fidelity (only for \"Balanced\" mode)": "Style Fidelity (only for \"Balanced\" mode)", + "Style Name": "Style Name", + "Style Selector": "Style Selector", + "Sub directory depth": "子目錄深度", + "Sub folder:": "Sub folder:", + "Sub-folder": "Sub-folder", + "Sub Folder:": "Sub Folder:", + "Subject": "Subject", + "Subject Types": "Subject Types", + "Submit": "Submit", + "Submit results": "Submit results", + "SubSeed": "SubSeed", + "Subseed schedule": "Subseed schedule", + "Subseed strength schedule": "Subseed strength schedule", + "subtract": "subtract", + "Suffix": "Suffix", + "Suggested weight": "Suggested weight", + "Sum": "Sum", + "sum Twice:(A*(1-alpha)+B*alpha)*(1-beta)+C*beta": "sum Twice:(A*(1-alpha)+B*alpha)*(1-beta)+C*beta", + "superfast": "superfast", + "Super Mario": "Super Mario", + "Supported engines:": "Supported engines:", + "Supports boolean operations: (! - negation, & - and, | - or, ^ - xor, \\ - difference, () - nested operations)": "Supports boolean operations: (! - negation, & - and, | - or, ^ - xor, \\ - difference, () - nested operations)", + "surrealism": "surrealism", + "Surrealist": "Surrealist", + "Swap axes": "Swap axes", + "Swap hotkey combinations for Zoom and Adjust brush resize": "Swap hotkey combinations for Zoom and Adjust brush resize", + "swap XY": "swap XY", + "Swipe left/right navigates to the next image": "Swipe left/right navigates to the next image", + "Switch prompt between Native language and English": "Switch prompt between Native language and English", + "(switch to another model in the middle of generation)": "(switch to another model in the middle of generation)", + "Switch to Inpaint Upload": "Switch to Inpaint Upload", + "symbolism": "symbolism", + "Synchronize with main seed": "Synchronize with main seed", + "Sync with Default SD checkpoint": "Sync with Default SD checkpoint", + "system": "system", + "System data": "System data", + "System: peak amout of video memory allocated by all running programs, out of total capacity": "System: peak amout of video memory allocated by all running programs, out of total capacity", + "T2IA": "T2IA", + "t2ia_color_grid": "t2ia_color_grid", + "t2ia_sketch_pidi": "t2ia_sketch_pidi", + "t2ia_style_clipvision": "t2ia_style_clipvision", + "Tab": "Tab", + "Tag": "Tag", + "Tag confidences": "Tag confidences", + "Tag confidents": "Tag confidents", + "Tag file output format. Leave blank to use same filename or e.g. \"[name].[hash:sha1].[output_extension]\". Also allowed are [extension] or any other [hash:] supported by hashlib": "Tag file output format. Leave blank to use same filename or e.g. \"[name].[hash:sha1].[output_extension]\". Also allowed are [extension] or any other [hash:] supported by hashlib", + "Tagger": "Tagger", + "Tagging": "Tagging", + "Tagging Confidence Threshold": "Tagging Confidence Threshold", + "Tag images": "Tag images", + "Tag one image": "Tag one image", + "Tags": "標記", + "tags from the images displayed.": "tags from the images displayed.", + "(takes precedence over the regular space setting for that position)": "(takes precedence over the regular space setting for that position)", + "Target": "目標", + "Target ControlNet models": "Target ControlNet models", + "Target dataset num: ": "Target dataset num: ", + "Target face : Comma separated face number(s)": "Target face : Comma separated face number(s)", + "Target Folder": "Target Folder", + "Target language": "Target language", + "Target Language": "Target Language", + "Target tokens (comma separated)": "Target tokens (comma separated)", + "Task History": "Task History", + "Task Id": "Task Id", + "Task Queue": "Task Queue", + "Tattoo": "Tattoo", + "Techwear Fashion": "Techwear Fashion", + "temparature": "temparature", + "temperature": "temperature", + "Templates": "Templates", + "Temporal-Warp": "Temporal-Warp", + "TENC Gradient Clip Norm": "TENC Gradient Clip Norm", + "TEnc Weight 1": "文字編碼器權重 1️⃣", + "TEnc Weight 2": "文字編碼器權重 2️⃣", + "TEnc Weight 3": "文字編碼器權重 3️⃣", + "TEnc Weight 4": "文字編碼器權重 4️⃣", + "TEnc Weight 5": "文字編碼器權重 5️⃣", + "TENC Weight Decay": "TENC Weight Decay", + "TensorRT Exporter": "TensorRT Exporter", + "TensorRT Extension": "TensorRT Extension", + "TensorRT uses optimized engines for specific resolutions and batch sizes. You can generate as many optimized engines as desired. Types:": "TensorRT uses optimized engines for specific resolutions and batch sizes. You can generate as many optimized engines as desired. Types:", + "Tertiary model": "Tertiary model", + "TEST-MAX-ALL": "TEST-MAX-ALL", + "Test prompt": "測試提示詞", + "Text data view": "Text data view", + "Text Editor": "Text Editor", + "text encoder": "text encoder", + "Text Encoder Learning Rate": "Text Encoder Learning Rate", + "Text file for each image": "Text file for each image", + "Text files directory (optional, will load from input dir if not specified)": "Text files directory (optional, will load from input dir if not specified)", + "Text files directory (Optional, will load from input dir if not specified)": "Text files directory (Optional, will load from input dir if not specified)", + "textile": "textile", + "Text input": "文本輸入", + "TextualInversion": "TextualInversion", + "Texture": "Texture", + "The": "The", + "The 1st and last keyframe images should match.": "The 1st and last keyframe images should match.", + "the 2D vanishing point of perspective (rec. range 30-160)": "the 2D vanishing point of perspective (rec. range 30-160)", + "The algorithm is not able to enhance all images.": "The algorithm is not able to enhance all images.", + "The algorithm used to generate noise used in the diffusion process.": "The algorithm used to generate noise used in the diffusion process.", + "The alpha schedule controls overall alpha for video mix, whether using a composite mask or not.": "The alpha schedule controls overall alpha for video mix, whether using a composite mask or not.", + "The annotator directory inside the SAM extension directory is only a symbolic link. This is to save your space and make the extension repository clean.": "The annotator directory inside the SAM extension directory is only a symbolic link. This is to save your space and make the extension repository clean.", + "The Classifier-Free Guidance Scale to use for classifier/regularization images.": "The Classifier-Free Guidance Scale to use for classifier/regularization images.", + "The Classifier-Free Guidance Scale to use for preview images.": "The Classifier-Free Guidance Scale to use for preview images.", + "The code for this extension:": "此擴充功能的程式碼:", + "The current workflow is [text prompt]->[object detection]->[segmentation]. Semantic segmentation support is in Auto SAM panel.": "The current workflow is [text prompt]->[object detection]->[segmentation]. Semantic segmentation support is in Auto SAM panel.", + "The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M": "The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M", + "The directory containing classification/regularization images.": "The directory containing classification/regularization images.", + "The directory containing training images.": "The directory containing training images.", + "the directory in which your mask video is located.": "the directory in which your mask video is located.", + "the directory / URL at which your video file is located for Video Input mode only": "the directory / URL at which your video file is located for Video Input mode only", + "The existing prompt and negative prompt fields are ignored.": "The existing prompt and negative prompt fields are ignored.", + "The \"Export Default Engines\" selection adds support for resolutions between 512x512 and 768x768 for Stable Diffusion 1.5 and 768x768 to 1024x1024 for SDXL with batch sizes 1 to 4.": "The \"Export Default Engines\" selection adds support for resolutions between 512x512 and 768x768 for Stable Diffusion 1.5 and 768x768 to 1024x1024 for SDXL with batch sizes 1 to 4.", + "The following will only affect reference face image (and is not affected by sort by size) :": "The following will only affect reference face image (and is not affected by sort by size) :", + "The frames per second that the video will run at": "The frames per second that the video will run at", + "The Guided images mode exposes the following variables for the prompts and the schedules:": "The Guided images mode exposes the following variables for the prompts and the schedules:", + "The height of the output images, in pixels (must be a multiple of 64)": "The height of the output images, in pixels (must be a multiple of 64)", + "(The 'JumpTo...' keybinds (End & Home key by default) will select the first non-embedding result of their direction on the first press for quick navigation in longer lists.)": "(The 'JumpTo...' keybinds (End & Home key by default) will select the first non-embedding result of their direction on the first press for quick navigation in longer lists.)", + "The learning rate scheduler to use. All schedulers use the provided warmup time except for 'constant'. For dadapt_with_warmup it 10% total steps is recommended. You may need to add additional epochs to compensate.": "The learning rate scheduler to use. All schedulers use the provided warmup time except for 'constant'. For dadapt_with_warmup it 10% total steps is recommended. You may need to add additional epochs to compensate.", + "the location of images to create the video from, when use_manual_settings is checked": "the location of images to create the video from, when use_manual_settings is checked", + "the maximum number of output images to be created": "the maximum number of output images to be created", + "The model to train.": "The model to train.", + "The number of Perlin noise octaves, that is the count of P-noise iterations. Higher values will make the noise more soft and smoke-like, whereas lower values will make it look more organic and spotty. It is limited by 8 octaves as the resulting gain will run out of bounds.": "The number of Perlin noise octaves, that is the count of P-noise iterations. Higher values will make the noise more soft and smoke-like, whereas lower values will make it look more organic and spotty. It is limited by 8 octaves as the resulting gain will run out of bounds.", + "The number of steps per image (Epoch) to train the text encoder for. Set 0.5 for 50% of the epochs": "The number of steps per image (Epoch) to train the text encoder for. Set 0.5 for 50% of the epochs", + "The number of steps to use when generating classifier/regularization images.": "The number of steps to use when generating classifier/regularization images.", + "then use": "then use", + "The other site allows for making keyframes using": "另一個網站允許使用", + "the pan effect angle": "the pan effect angle", + "the path to a custom settings file": "the path to a custom settings file", + "The path to a txt file to use for sample prompts. Use [filewords] or [name] to insert class token in sample prompts": "The path to a txt file to use for sample prompts. Use [filewords] or [name] to insert class token in sample prompts", + "The path to the concepts JSON file, or a JSON string.": "The path to the concepts JSON file, or a JSON string.", + "the path to your init image": "the path to your init image", + "the path to your mask image": "the path to your mask image", + "the path/ URL to an audio file to accompany the video": "the path/ URL to an audio file to accompany the video", + "The position in postprocess at which this script will be executed; 0 means it will be executed before any scripts, 99 means it will probably be executed last.": "The position in postprocess at which this script will be executed; 0 means it will be executed before any scripts, 99 means it will probably be executed last.", + "The process of creating a video can be divided into the following stages.": "The process of creating a video can be divided into the following stages.", + "The prompt to use to generate a sample image": "The prompt to use to generate a sample image", + "The prompt to use when generating preview images.": "The prompt to use when generating preview images.", + "The rate at which the model learns. Default is 2e-6. For optimizers with D-Adaptation recommended learning rate is 1.0": "The rate at which the model learns. Default is 2e-6. For optimizers with D-Adaptation recommended learning rate is 1.0", + "There are a lot of special things build in, based on various research papers. Just try it, and let it surprise you.": "There are a lot of special things build in, based on various research papers. Just try it, and let it surprise you.", + "There are an immense number of image types, not only paintings and photo's, but also isometric renders and funko pops.\nYou can however, overwrite it with the most popular ones.": "There are an immense number of image types, not only paintings and photo's, but also isometric renders and funko pops.\nYou can however, overwrite it with the most popular ones.", + "There is *no* Batch mode like in vanilla deforum. Please Use the txt2img tab for that.": "There is *no* Batch mode like in vanilla deforum. Please Use the txt2img tab for that.", + "the required timestamp to reference when resuming. Currently only available in 2D & 3D mode, the timestamp is saved as the settings .txt file name as well as images produced during your previous run. The format follows: yyyymmddhhmmss - a timestamp of when the run was started to diffuse.": "the required timestamp to reference when resuming. Currently only available in 2D & 3D mode, the timestamp is saved as the settings .txt file name as well as images produced during your previous run. The format follows: yyyymmddhhmmss - a timestamp of when the run was started to diffuse.", + "The resolution of input images. When using bucketing, this is the maximum size of image buckets.": "The resolution of input images. When using bucketing, this is the maximum size of image buckets.", + "The results are stored in timestamp_prompts.txt.": "The results are stored in timestamp_prompts.txt.", + "the roll effect angle": "the roll effect angle", + "The search term that determines the inpainting mask": "The search term that determines the inpainting mask", + "These can be used to add textual inversion and LoRA’s. They can also be used to add your models trigger words.": "These can be used to add textual inversion and LoRA’s. They can also be used to add your models trigger words.", + "The seed to use when generating samples. Set to -1 to use a random seed every time.": "The seed to use when generating samples. Set to -1 to use a random seed every time.", + "The seed to use when generating the validation sample image. -1 is not supported.": "The seed to use when generating the validation sample image. -1 is not supported.", + "the seed value will increment by 1 for each subsequent frame of the animation": "the seed value will increment by 1 for each subsequent frame of the animation", + "the seed will remain fixed across all frames of animation": "the seed will remain fixed across all frames of animation", + "the seed will remain fixed across all frames of animation. **NOT RECOMMENDED.** Unless you know what you are doing, it will *deep fry* the pictures over time": "the seed will remain fixed across all frames of animation. **NOT RECOMMENDED.** Unless you know what you are doing, it will *deep fry* the pictures over time", + "The smaller this value, the narrower the keyframe spacing, and if set to 0, the keyframes will be equally spaced at the value of [Minimum keyframe gap].": "The smaller this value, the narrower the keyframe spacing, and if set to 0, the keyframes will be equally spaced at the value of [Minimum keyframe gap].", + "the tilt effect angle": "the tilt effect angle", + "The type of memory attention to use. 'Xformers' will provide better performance than flash_attention, but requires a separate installation.": "The type of memory attention to use. 'Xformers' will provide better performance than flash_attention, but requires a separate installation.", + "The untranslated characters will be translated automatically and will not affect the old translations. Use the function in the lower right corner to easily check and quickly modify the current translation.1,Save the setting;2,Click start button;3,Reload your browser.": "未被翻譯的字句將會自動翻譯且不會影響原有的翻譯。使用右下角的功能來簡單的查看並快速編輯正確的翻譯:1. 儲存設定 2. 點選開始按鈕 3. 重新載入 UI", + "The URL to the model on huggingface. Should be in the format of 'developer/model_name'.": "The URL to the model on huggingface. Should be in the format of 'developer/model_name'.", + "The WebSocket is currently": "The WebSocket is currently", + "The weight decay for the Text Encoder. Values closer to 0 closely match your training dataset, and values closer to 1 generalize more and deviate from your training dataset. Default is 1e-2. For Dreambooth, recommended value is same as AdamW Weight Decay. For Lora recommended value is 0.01-0.02 higher than AdamW Weight Decay.": "The weight decay for the Text Encoder. Values closer to 0 closely match your training dataset, and values closer to 1 generalize more and deviate from your training dataset. Default is 1e-2. For Dreambooth, recommended value is same as AdamW Weight Decay. For Lora recommended value is 0.01-0.02 higher than AdamW Weight Decay.", + "the whole folder, generated before, not just the output folder": "the whole folder, generated before, not just the output folder", + "The width of the focus region in pixels": "The width of the focus region in pixels", + "The width of the output images, in pixels (must be a multiple of 64)": "The width of the output images, in pixels (must be a multiple of 64)", + "Thick Layered Papercut": "Thick Layered Papercut", + "Thigh": "Thigh", + "Third column (reference) image": "Third column (reference) image", + "[this]": "[this]", + "This extension works well with text captions in comma-separated style (such as the tags generated by DeepBooru interrogator).": "This extension works well with text captions in comma-separated style (such as the tags generated by DeepBooru interrogator).", + "This generator will generate a complete full prompt for you, based on randomness. You can increase the slider, to include more things to put into the prompt. \nRecommended is keeping it around 3-7. Use 10 at your own risk.": "This generator will generate a complete full prompt for you, based on randomness. You can increase the slider, to include more things to put into the prompt. \nRecommended is keeping it around 3-7. Use 10 at your own risk.", + "This is an extra stage.": "This is an extra stage.", + "This is a technique to reduce memory usage by clearing activations of certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time for reduced memory usage.": "This is a technique to reduce memory usage by clearing activations of certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time for reduced memory usage.", + "This is ignored if neither batch run or ebsynth are checked": "This is ignored if neither batch run or ebsynth are checked", + "(this is optional. Perform color correction on the img2img results and expect flickering to decrease. Or, you can simply change the color tone from the generated result.)": "(this is optional. Perform color correction on the img2img results and expect flickering to decrease. Or, you can simply change the color tone from the generated result.)", + "This is the total number of training steps that will be performed on each instance image.": "This is the total number of training steps that will be performed on each instance image.", + "This is your models list.": "This is your models list.", + "this link": "this link", + "This mode works ONLY with 2D/3D animation modes. Interpolation and Video Input modes aren't supported.": "This mode works ONLY with 2D/3D animation modes. Interpolation and Video Input modes aren't supported.", + "this option renders N times before the final render. it is suggested to lower your steps if you up your redo. seed is randomized during redo generations and restored afterwards": "this option renders N times before the final render. it is suggested to lower your steps if you up your redo. seed is randomized during redo generations and restored afterwards", + "this option takes twice as long because it generates twice in order to capture the optical flow from the previous image to the first generation, then warps the previous image and redoes the generation": "this option takes twice as long because it generates twice in order to capture the optical flow from the previous image to the first generation, then warps the previous image and redoes the generation", + "This page allows you to add some settings to the main interface of txt2img and img2img tabs.": "This page allows you to add some settings to the main interface of txt2img and img2img tabs.", + "This panel is for those who want to upload mask to ControlNet inpainting. It is not part of the SAM feature. It might be removed someday when ControlNet support uploading image and mask. It serves as a temporarily workaround to overcome the unavailability of image with mask uploading feature in ControlNet extension.": "This panel is for those who want to upload mask to ControlNet inpainting. It is not part of the SAM feature. It might be removed someday when ControlNet support uploading image and mask. It serves as a temporarily workaround to overcome the unavailability of image with mask uploading feature in ControlNet extension.", + "This preset affects?": "This preset affects?", "This requires Controlnet 1.1 extension and the tile resample model, install this if you haven't\nIn settings for Controlnet, enable \"Allow other script to control this extension\"": "This requires Controlnet 1.1 extension and the tile resample model, install this if you haven't\nIn settings for Controlnet, enable \"Allow other script to control this extension\"", - "Don’t use wierd blocky upscale mode. Or maybe do?": "Don’t use wierd blocky upscale mode. Or maybe do?", - "Enable upscale with extras": "Enable upscale with extras", - "Upscale resize": "Upscale resize", + "This requires the Ultimate SD Upscale extension, install this if you haven't": "This requires the Ultimate SD Upscale extension, install this if you haven't", + "This script is deprecated. Please use the full Deforum extension instead.": "This script is deprecated. Please use the full Deforum extension instead.", + "this subreddit": "this subreddit", + "This was originally a bug in the first release when using multiple batches, now brought back as a feature.\nRaised by redditor drone2222, to bring this back as a toggle, since it did create interesting results. So here it is.": "This was originally a bug in the first release when using multiple batches, now brought back as a feature.\nRaised by redditor drone2222, to bring this back as a toggle, since it did create interesting results. So here it is.", + "This way, you can create unlimited variants of a subject.": "This way, you can create unlimited variants of a subject.", + "This way, you don’t ever have to add it manually again. This file won’t be overwritten during upgrades.": "This way, you don’t ever have to add it manually again. This file won’t be overwritten during upgrades.", + "This Will Override Selected Style": "This Will Override Selected Style", + "Those settings are heavy on DOM modification and might conflict with some others extensions": "Those settings are heavy on DOM modification and might conflict with some others extensions", + "Threshold": "Threshold", + "Thresholding Mode": "Thresholding Mode", + "Threshold of delta frame edge": "Threshold of delta frame edge", + "Threshold schedule": "Threshold schedule", + "Threshold (Use only when basis is absolute)": "Threshold (Use only when basis is absolute)", + "Threshold Value Lower": "Threshold Value Lower", + "Threshold Value Upper": "Threshold Value Upper", + "thumbs": "thumbs", + "tile": "tile", + "Tile": "Tile", + "tile_colorfix": "tile_colorfix", + "tile_colorfix+sharp": "tile_colorfix+sharp", + "Tile count:": "Tile count:", + "tile division BG Removers": "tile division BG Removers", + "tile_gaussian": "tile_gaussian", + "tile height": "tile height", + "Tile height": "圖塊高度", + "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", + "Tile overlap, in pixels for SCUNET upscalers. Low values = visible seam.": "Tile overlap, in pixels for SCUNET upscalers. Low values = visible seam.", + "tile_resample": "tile_resample", + "Tile size:": "Tile size:", + "Tile size for ESRGAN upscalers. 0 = no tiling.": "Tile size for ESRGAN upscalers. 0 = no tiling.", + "Tile size for SCUNET upscalers. 0 = no tiling.": "Tile size for SCUNET upscalers. 0 = no tiling.", + "tile width": "tile width", + "Tile width": "圖塊寬度", + "Tiling direction": "Tiling direction", + "Tilt adjustment threshold": "Tilt adjustment threshold", + "tilt canvas up/down in degrees per frame": "tilt canvas up/down in degrees per frame", + "Tilt shift": "Tilt shift", + "Tilt-Shift": "Tilt-Shift", + "Time": "Time", + "Time in ms to wait before triggering completion again": "Time in ms to wait before triggering completion again", + "Time period:": "Time period:", + "Time Period:": "Time Period:", + "timestamp": "timestamp", + "Tips": "Tips", + "title": "title", + "Title": "Title", + "To apply, go to quick set. Save now works immediately in other tab without restart, filters out non-common between tabs.": "To apply, go to quick set. Save now works immediately in other tab without restart, filters out non-common between tabs.", + "to discuss about Cozy Nest": "to discuss about Cozy Nest", + "To enable, check use_mask in the Init tab": "To enable, check use_mask in the Init tab", + "To first difference": "To first difference", + "to (full path)": "to (full path)", + "To Generate Your Prompt in All Available Styles, Its Better to set batch count to 77 ( Style Count)": "To Generate Your Prompt in All Available Styles, Its Better to set batch count to 77 ( Style Count)", + "🤗 token": "🤗 token", + "Token": "Token", + "Tokenize": "標記拆分", + "Token Length": "Token Length", + "Token Merging": "Token Merging", + "Token Merging - Max downsample": "Token Merging - Max downsample", + "Token Merging - Ratio": "Token Merging - Ratio", + "Token Merging (ToMe)": "Token Merging (ToMe)", + "To Language": "翻譯為:", + "ToMe for Hires. fix": "ToMe for Hires. fix", + "ToMe max. tokens": "ToMe max. tokens", + "ToMe merge ratio": "ToMe merge ratio", + "ToMe Merging Ratio": "ToMe Merging Ratio", + "ToMe seed": "ToMe seed", + "Tone Curve": "Tone Curve", + "To next folder level": "To next folder level", + "Toolkit": "Toolkit", + "Tools": "Tools", + "top": "top", + "top-bottom": "top-bottom", + "Top/Bottom Balance": "Top/Bottom Balance", + "top_centered": "top_centered", + "torch GPU test": "torch GPU test", + "to read the documentation of each parameter.": "to read the documentation of each parameter.", + "to share your creations and suggestions.": "來分享您的創作和建議。", + "total": "total", + "(Total)": "(Total)", + "Total num of layers (reload required)": "Total num of layers (reload required)", + "Total num of point for curve (reload required)": "Total num of point for curve (reload required)", + "to understand following options.": "to understand following options.", + "To use an alternate VAE, you can specify the path to a directory containing a pytorch_model.bin representing your VAE.": "To use an alternate VAE, you can specify the path to a directory containing a pytorch_model.bin representing your VAE.", + "Trained tags (if any):": "Trained tags (if any):", + "Trained Tags (if any):": "Trained Tags (if any):", + "Train EMA": "Train EMA", + "Train embedding": "Train embedding", + "Train hypernetwork": "Train hypernetwork", + "Training info": "訓練資訊", + "Training parameters": "訓練參數", + "Train Model": "Train Model", + "Train multiple concepts from a JSON file or string.": "Train multiple concepts from a JSON file or string.", + "Train Text Encoder": "Train Text Encoder", + "Trajectory": "Trajectory", + "Transform Center X": "Transform Center X", + "Transform Center Y": "Transform Center Y", + "Transition Smoothness": "Transition Smoothness", + "translate": "翻譯", + "Translate": "Translate", + "translated fail:": "translated fail:", + "Translated Negative Prompt": "Translated Negative Prompt", + "Translated Prompt": "Translated Prompt", + "Translated Status": "翻譯狀態", + "translated text": "翻譯", + "Translated Text": "翻譯文字", + "Translate Negative Prompt": "Translate Negative Prompt", + "Translate Prompt": "Translate Prompt", + "Translate prompt words into:": "Translate prompt words into:", + "Translate: x, y, z": "Translate: x, y, z", + "Translation display order": "翻譯顯示順序", + "Translation First": "翻譯優先", + "Translation Service Setting": "Translation Service Setting", + "Translation X": "平移 X", + "Translation Y": "平移 Y", + "Translation Z": "平移 Z", + "Transparent": "透明", + "[transparent-background]": "[transparent-background]", + "transparent-background": "transparent-background", + "transparent-background AND clipseg": "transparent-background AND clipseg", + "transparent-background options": "transparent-background options", + "Transparent PNG": "Transparent PNG", + "Treat LoCon's as LORA's": "Treat LoCon's as LORA's", + "Tribal": "Tribal", + "Trigger Word": "Trigger Word", + "Trim mask by sketch": "Trim mask by sketch", + "Triple sum:A*(1-alpha-beta)+B*alpha+C*beta": "Triple sum:A*(1-alpha-beta)+B*alpha+C*beta", + "True": "True", + "Truncate tags by token count": "Truncate tags by token count", + "Truncate tags by token count.": "Truncate tags by token count.", + "Try to add known trigger words for LORA/LyCO models": "Try to add known trigger words for LORA/LyCO models", + "Tweening frames schedule": "Tweening frames schedule", + "TXT2IMG": "TXT2IMG", + "txt2img-grids": "文生圖網格", + "txt2img history": "txt2img history", + "Type": "類型", + "type of image": "type of image", + "Typography": "Typography", + "UI alternatives": "UI alternatives", + "UI Component order": "UI Component order", + "UI Default global post processing face restorer (requires restart)": "UI Default global post processing face restorer (requires restart)", + "UI Default global post processing face restorer visibility (requires restart)": "UI Default global post processing face restorer visibility (requires restart)", + "UI Default global post processing face restorer weight (requires restart)": "UI Default global post processing face restorer weight (requires restart)", + "UI Default global post processing upscaler (requires restart)": "UI Default global post processing upscaler (requires restart)", + "UI Default global post processing upscaler visibility(requires restart)": "UI Default global post processing upscaler visibility(requires restart)", + "UI Default inpainting negative prompt [gender] (requires restart)": "UI Default inpainting negative prompt [gender] (requires restart)", + "UI Default inpainting prompt [gender] is replaced by man or woman (requires restart)": "UI Default inpainting prompt [gender] is replaced by man or woman (requires restart)", + "UI item order for txt2img/img2img tabs": "UI item order for txt2img/img2img tabs", + "ui text": "使用者介面文字", + "UI theme": "UI theme", + "ukiyo-e": "ukiyo-e", + "ultrafast": "ultrafast", + "ultrawide": "ultrawide", + "Uncategorized": "Uncategorized", + "Uncatgorized": "Uncatgorized", + "Uncheck all copies": "Uncheck all copies", + "Underscore replacement excludes (split by comma)": "Underscore replacement excludes (split by comma)", + "Undo": "Undo", + "U-Net features": "U-Net features", + "UNet Weight 1": "UNet 權重 1️⃣", + "UNet Weight 2": "UNet 權重 2️⃣", + "UNet Weight 3": "UNet 權重 3️⃣", + "UNet Weight 4": "UNet 權重 4️⃣", + "UNet Weight 5": "UNet 權重 5️⃣", + "Unfreeze seed": "Unfreeze seed", + "Unfreezes model layers and allows for potentially better training, but makes increased VRAM usage more likely.": "Unfreezes model layers and allows for potentially better training, but makes increased VRAM usage more likely.", + "Uniform noise covers the entire frame. It somewhat flattens and sharpens the video over time, but may be good for cartoonish look. This is the old default setting.": "Uniform noise covers the entire frame. It somewhat flattens and sharpens the video over time, but may be good for cartoonish look. This is the old default setting.", + "UniPC order (must be < sampling steps)": "UniPC order (must be < sampling steps)", + "Unload": "Unload", + "Unload all interrogate models": "Unload all interrogate models", + "Unload all models from memory": "Unload all models from memory", + "unload model": "unload model", + "Unload model": "Unload model", + "Unload Model": "Unload Model", + "Unload model after running": "Unload model after running", + "Unload models": "Unload models", + "Unload model to free VRAM": "Unload model to free VRAM", + "Unload SD checkpoint to RAM": "Unload SD checkpoint to RAM", + "Unload VAE and CLIP from VRAM when training": "Unload VAE and CLIP from VRAM when training", + "Unnecessary Javascript file": "Unnecessary Javascript file", + "Unprompted Seed": "Unprompted Seed", + "Unprompted Template Editor": "Unprompted Template Editor", + "unzip the file to": "unzip the file to", + "Upcast cross attention layer to FP32": "Upcast cross attention layer to FP32", + "update": "update", + "Update All Tags": "Update All Tags", + "Update assigned tags": "Update assigned tags", + "Updated At": "Updated At", + "Update Dataset": "Update Dataset", + "Update directory names in database": "Update directory names in database", + "Update image generation information (Experimental)": "Update image generation information (Experimental)", + "Update installed extensions": "Update installed extensions", + "update list": "update list", + "Update Mask": "Update Mask", + "Update Models": "Update Models", + "Update Selected Tags": "Update Selected Tags", + "Update tags for:": "Update tags for:", + "uploaded video FPS": "uploaded video FPS", + "uploaded video resolution": "uploaded video resolution", + "uploaded video total frame count": "uploaded video total frame count", + "Upload GIF": "Upload GIF", + "Upload image": "Upload image", + "Upload Image or .cni file": "Upload Image or .cni file", + "Uploading...": "Uploading...", + "Upload mask": "Upload mask", + "Upload mask here cus gradio": "Upload mask here cus gradio", + "Upload mask image": "Upload mask image", + "Upload Mask to ControlNet Inpainting": "Upload Mask to ControlNet Inpainting", + "Upload OR TTS": "Upload OR TTS", + "Upper Arm": "Upper Arm", + "Upper limit for X": "Upper limit for X", + "Upscale": "Upscale", + "Upscale Before Restoring Faces": "Upscale Before Restoring Faces", + "Upscaled swapper mask erosion factor, 1 = default behaviour.": "Upscaled swapper mask erosion factor, 1 = default behaviour.", + "Upscale factor": "Upscale factor", + "Upscale height to": "Upscale height to", + "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back", + "Upscale image with IMG2IMG": "Upscale image with IMG2IMG", + "Upscale method": "Upscale method", + "Upscale model": "Upscale model", + "upscale output imgs when run is finished": "upscale output imgs when run is finished", "upscaler 1": "upscaler 1", "upscaler 2": "upscaler 2", "Upscaler 2 vis.": "Upscaler 2 vis.", - "GFPGAN vis.": "GFPGAN vis.", - "CodeFormer vis.": "CodeFormer vis.", - "OneButtonPrompt": "OneButtonPrompt", - "https://github.com/AIrjen/OneButtonPrompt": "https://github.com/AIrjen/OneButtonPrompt", - "8bd1129c (Sat May 13 18:36:12 2023)": "8bd1129c (Sat May 13 18:36:12 2023)", - "object": "object", - "animal": "animal", - "humanoid": "humanoid", - "landscape": "landscape", - "concept": "concept", - "popular": "popular", - "greg mode": "greg mode", - "3D": "3D", - "abstract": "abstract", - "angular": "angular", - "anime": "anime", - "architecture": "architecture", - "art nouveau": "art nouveau", - "art deco": "art deco", - "baroque": "baroque", - "bauhaus": "bauhaus", - "cartoon": "cartoon", - "character": "character", - "children's illustration": "children's illustration", - "cityscape": "cityscape", - "clean": "clean", - "cloudscape": "cloudscape", - "collage": "collage", - "colorful": "colorful", - "comics": "comics", - "cubism": "cubism", - "dark": "dark", - "detailed": "detailed", - "digital": "digital", - "expressionism": "expressionism", - "fantasy": "fantasy", - "fashion": "fashion", - "fauvism": "fauvism", - "figurativism": "figurativism", - "gore": "gore", - "graffiti": "graffiti", - "graphic design": "graphic design", - "high contrast": "high contrast", - "horror": "horror", - "impressionism": "impressionism", - "installation": "installation", - "light": "light", - "line drawing": "line drawing", - "low contrast": "low contrast", - "luminism": "luminism", - "magical realism": "magical realism", - "manga": "manga", - "melanin": "melanin", - "messy": "messy", - "monochromatic": "monochromatic", - "nature": "nature", - "nudity": "nudity", - "photography": "photography", - "pop art": "pop art", - "primitivism": "primitivism", - "psychedelic": "psychedelic", - "realism": "realism", - "renaissance": "renaissance", - "romanticism": "romanticism", - "scene": "scene", - "sci-fi": "sci-fi", - "sculpture": "sculpture", - "seascape": "seascape", - "stained glass": "stained glass", - "still life": "still life", - "storybook realism": "storybook realism", - "street art": "street art", - "streetscape": "streetscape", - "surrealism": "surrealism", - "symbolism": "symbolism", - "textile": "textile", - "ukiyo-e": "ukiyo-e", - "vibrant": "vibrant", - "watercolor": "watercolor", - "whimsical": "whimsical", - "all - force multiple": "all - force multiple", - "only other types": "only other types", - "AND": "AND", - "BREAK": "BREAK", - "automatic": "automatic", - "prefix AND prompt + suffix": "prefix AND prompt + suffix", - "prefix + prefix + prompt + suffix": "prefix + prefix + prompt + suffix", - "wide": "wide", - "square": "square", - "ultrawide": "ultrawide", - "currently selected model": "currently selected model", - "Chess": "棋盤狀", - "Band pass": "帶通", - "Half tile offset pass": "半圖塊偏移過濾", - "Half tile offset pass + intersections": "半圖塊偏移過濾 + 交集", - "Ebsynth Utility": "Ebsynth Utility", - "Project directory": "Project directory", - "Generation TEST!!(Ignore Project directory and use the image and mask specified in the main UI)": "Generation TEST!!(Ignore Project directory and use the image and mask specified in the main UI)", - "Mask option": "Mask option", - "Mask Mode(Override img2img Mask mode)": "Mask Mode(Override img2img Mask mode)", - "Inpaint Area(Override img2img Inpaint area)": "Inpaint Area(Override img2img Inpaint area)", + "Upscale resize": "Upscale resize", + "Upscaler scale": "Upscaler scale", + "Upscaler visibility (if scale = 1)": "Upscaler visibility (if scale = 1)", + "*Upscale uploaded video*": "*Upscale uploaded video*", + "Upscale width to": "Upscale width to", + "Upscaling is performed on the whole image and all faces (including not swapped). Upscaling happens before face restoration.": "Upscaling is performed on the whole image and all faces (including not swapped). Upscaling happens before face restoration.", + "Usage guide": "Usage guide", + "Use": "Use", + "use 256/512 model?": "use 256/512 model?", + "Use a": "Use a", + "Use A1111 resize mode when input is from A1111.": "Use A1111 resize mode when input is from A1111.", + "Use a grayscale image as a mask on your init image. Whiter areas of the mask are areas that change more.": "Use a grayscale image as a mask on your init image. Whiter areas of the mask are areas that change more.", + "Use alpha as mask": "Use alpha as mask", + "Use an": "Use an", + "Use another image as ControlNet input": "Use another image as ControlNet input", + "Use another image as mask": "Use another image as mask", + "Use automatic height for gallery (requires Gradio >= 3.36.0)": "Use automatic height for gallery (requires Gradio >= 3.36.0)", + "Use brush": "Use brush", + "Use color corrections": "Use color corrections", + "Use Control Net inpaint model": "Use Control Net inpaint model", + "Use CPU": "Use CPU", + "Use CPU for SAM": "Use CPU for SAM", + "Use CPU Only (SLOW)": "Use CPU Only (SLOW)", + "Use custom colors": "Use custom colors", + "Use custom DepthMap": "Use custom DepthMap", + "Use Custom Threshold (Booru)": "Use Custom Threshold (Booru)", + "Use Custom Threshold (WDv1.4 Tagger)": "Use Custom Threshold (WDv1.4 Tagger)", + "Use delta values for movement parameters": "Use delta values for movement parameters", "Use Depth Map If exists in /video_key_depth": "Use Depth Map If exists in /video_key_depth", - "[here]": "[here]", - "for depth map.": "for depth map.", - "ControlNet option": "ControlNet option", - "Control Net Weight": "Control Net Weight", - "Control Net Weight For Face": "Control Net Weight For Face", - "Use Preprocess image If exists in /controlnet_preprocess": "Use Preprocess image If exists in /controlnet_preprocess", - "Please enable the following settings to use controlnet from this script.": "Please enable the following settings to use controlnet from this script.", - "Settings->ControlNet->Allow other script to control this extension": "Settings->ControlNet->Allow other script to control this extension", - "Loopback option": "Loopback option", - "Img2Img Repeat Count (Loop Back)": "Img2Img Repeat Count (Loop Back)", - "Add N to seed when repeating": "Add N to seed when repeating", - "Auto Tagging option": "Auto Tagging option", - "Auto Tagging": "Auto Tagging", - "Add additional prompts to the head": "Add additional prompts to the head", - "Replace '_' with ' '(Does not affect the function to add tokens using add_token.txt.)": "Replace '_' with ' '(Does not affect the function to add tokens using add_token.txt.)", - "The results are stored in timestamp_prompts.txt.": "The results are stored in timestamp_prompts.txt.", - "If you want to use the same tagging results the next time you run img2img, rename the file to prompts.txt": "If you want to use the same tagging results the next time you run img2img, rename the file to prompts.txt", - "Recommend enabling the following settings.": "Recommend enabling the following settings.", - "Settings->Interrogate Option->Interrogate: include ranks of model tags matches in results": "Settings->Interrogate Option->Interrogate: include ranks of model tags matches in results", - "Face Crop option": "Face Crop option", + "Use depth warping": "Use depth warping", + "Used for securing the Web API. Click the refresh button to the right to (re)generate your key, the trash icon to remove it.": "Used for securing the Web API. Click the refresh button to the right to (re)generate your key, the trash icon to remove it.", + "Use EMA for finetuning": "Use EMA for finetuning", "use Face Crop img2img": "use Face Crop img2img", - "Face Detection Method": "Face Detection Method", - "If loading of the Yolov5_anime model fails, check": "If loading of the Yolov5_anime model fails, check", - "[this]": "[this]", - "solution.": "solution.", - "Face Crop Resolution": "Face Crop Resolution", - "Max Crop Size": "Max Crop Size", - "Face Denoising Strength": "Face Denoising Strength", - "Face Area Magnification": "Face Area Magnification", - "Disable at the last loopback time": "Disable at the last loopback time", - "Enable Face Prompt": "Enable Face Prompt", - "Face Prompt": "Face Prompt", - "project setting": "project setting", - "configuration": "configuration", - "Original Movie Path": "Original Movie Path", - "Drop Video Here": "拖曳影片到此", - "If you have trouble entering the video path manually, you can also use drag and drop.For large videos, please enter the path manually.": "If you have trouble entering the video path manually, you can also use drag and drop.For large videos, please enter the path manually.", - "stage 1": "stage 1", - "stage 2": "stage 2", - "stage 3.5": "stage 3.5", - "stage 7": "stage 7", - "stage 8": "stage 8", - "etc": "etc", - "Frame Width": "Frame Width", - "Frame Height": "Frame Height", - "-1 means that it is calculated automatically. If both are -1, the size will be the same as the source size.": "-1 means that it is calculated automatically. If both are -1, the size will be the same as the source size.", - "Masking Method": "Masking Method", - "transparent-background": "transparent-background", - "clipseg": "clipseg", - "transparent-background AND clipseg": "transparent-background AND clipseg", - "transparent-background options": "transparent-background options", - "Mask Threshold": "Mask Threshold", - "configuration for": "configuration for", - "[transparent-background]": "[transparent-background]", "Use Fast Mode(It will be faster, but the quality of the mask will be lower.)": "Use Fast Mode(It will be faster, but the quality of the mask will be lower.)", + "Use FP16 or BF16 (if available) will help improve memory performance. Required when using 'xformers'.": "Use FP16 or BF16 (if available) will help improve memory performance. Required when using 'xformers'.", + "(Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0.)": "(Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0.)", + "(Useful for users who use PortMaster or other software that controls the DNS)": "(Useful for users who use PortMaster or other software that controls the DNS)", + "Use GPU": "Use GPU", + "Use GPU, only for CUDA on Windows/Linux - experimental and risky, can messed up dependencies (requires restart)": "Use GPU, only for CUDA on Windows/Linux - experimental and risky, can messed up dependencies (requires restart)", + "Use half floats": "Use half floats", + "Use Hash from Metadata (May have false-positives but can be useful if you've pruned models)": "Use Hash from Metadata (May have false-positives but can be useful if you've pruned models)", + "Use improved segmented mask (use pastenet to mask only the face)": "Use improved segmented mask (use pastenet to mask only the face)", + "Use init": "Use init", + "Use init image as video": "Use init image as video", + "Use init image instead of video. Doesn't require generation of inputframes.": "Use init image instead of video. Doesn't require generation of inputframes.", + "Use inpaint width/height": "Use inpaint width/height", + "Use input image's alpha channel as mask": "Use input image's alpha channel as mask", + "Use Interrogator Caption": "Use Interrogator Caption", "Use Jit": "Use Jit", - "clipseg options": "clipseg options", - "Mask Target (e.g., girl, cats)": "Mask Target (e.g., girl, cats)", - "Exclude Target (e.g., finger, book)": "Exclude Target (e.g., finger, book)", - "Mask Blur Kernel Size(MedianBlur)": "Mask Blur Kernel Size(MedianBlur)", - "Mask Blur Kernel Size(GaussianBlur)": "Mask Blur Kernel Size(GaussianBlur)", - "Minimum keyframe gap": "Minimum keyframe gap", - "Maximum keyframe gap": "Maximum keyframe gap", - "Threshold of delta frame edge": "Threshold of delta frame edge", - "Add last frame to keyframes": "Add last frame to keyframes", - "[color-matcher]": "[color-matcher]", - "Color Transfer Method": "Color Transfer Method", - "default": "default", + "Use kohya-ss's finetuning metadata json": "Use kohya-ss's finetuning metadata json", + "Use local groundingdino to bypass C++ problem": "Use local groundingdino to bypass C++ problem", + "Use LORA directory for LoCon's": "Use LORA directory for LoCon's", + "Use LyCoris handler for all Lora types": "Use LyCoris handler for all Lora types", + "Use main sampler": "Use main sampler", + "Use mask": "Use mask", + "Use mask as output alpha channel": "Use mask as output alpha channel", + "Use mask video": "Use mask video", + "Use \"Maximum dimension\" for aspect ratio buttons (by default we use the max width or height)": "Use \"Maximum dimension\" for aspect ratio buttons (by default we use the max width or height)", + "use MBW": "use MBW", + "use Merge Block Weights": "use Merge Block Weights", + "Use mid-control on highres pass (second pass)": "Use mid-control on highres pass (second pass)", + "Use minimal area (for close faces)": "Use minimal area (for close faces)", + "Use minimal area for face selection": "Use minimal area for face selection", + "Use model EMA weights when possible": "Use model EMA weights when possible", + "Use noise mask": "Use noise mask", + "Use old aspect ratio formula": "Use old aspect ratio formula", + "Use only-mid-control on high-res. fix (second pass)": "Use only-mid-control on high-res. fix (second pass)", + "use optical flow estimation for your in-between (cadence) frames": "use optical flow estimation for your in-between (cadence) frames", + "Use optimized images in the thumbnail interface (significantly reduces the amount of data transferred)": "Use optimized images in the thumbnail interface (significantly reduces the amount of data transferred)", + "Use output of nth layer from back of text encoder (n>=1)": "Use output of nth layer from back of text encoder (n>=1)", + "Use Preprocess image If exists in /controlnet_preprocess": "Use Preprocess image If exists in /controlnet_preprocess", + "use prompt and setting by image": "use prompt and setting by image", + "Use prompt from preview image": "Use prompt from preview image", + "Use random seeds for sub-generations when doing a rolling generation (WARNING!!! The result will be non-deterministic!!!)": "Use random seeds for sub-generations when doing a rolling generation (WARNING!!! The result will be non-deterministic!!!)", + "Use raw CLIP token to calculate token count (without emphasis or embeddings)": "Use raw CLIP token to calculate token count (without emphasis or embeddings)", + "Use recursive with glob pattern": "Use recursive with glob pattern", + "Use recycle bin when deleting images": "刪除圖像時丟入資源回收桶", + "Use regex": "Use regex", + "User Interface": "User Interface", + "username": "username", + "User name": "User name", + "Username": "Username", + "Uses aesthetic image scorer extension to check the quality of the image.": "Uses aesthetic image scorer extension to check the quality of the image.", + "Use same checkpoint": "Use same checkpoint", + "Use same sampler": "Use same sampler", + "Use same seed for all images": "Use same seed for all images", + "Use same VAE": "Use same VAE", + "use scale": "use scale", + "Use SD webui's built-in hashing functions for model hashes. If SD webui was launced with `--no-hashing`, hashing will fail, but this provides a hash cache, which should make repeat model scanning faster.": "Use SD webui's built-in hashing functions for model hashes. If SD webui was launced with `--no-hashing`, hashing will fail, but this provides a hash cache, which should make repeat model scanning faster.", + "Use SD webui's built-in hashing functions for model hashes. This provides a hash cache, which should make repeat model scanning faster and make hashes reusable across features.": "Use SD webui's built-in hashing functions for model hashes. This provides a hash cache, which should make repeat model scanning faster and make hashes reusable across features.", + "Use separate CFG scale": "Use separate CFG scale", + "Use separate CFG scale 10th": "Use separate CFG scale 10th", + "Use separate CFG scale 2nd": "Use separate CFG scale 2nd", + "Use separate CFG scale 3rd": "Use separate CFG scale 3rd", + "Use separate CFG scale 4th": "Use separate CFG scale 4th", + "Use separate CFG scale 5th": "Use separate CFG scale 5th", + "Use separate CFG scale 6th": "Use separate CFG scale 6th", + "Use separate CFG scale 7th": "Use separate CFG scale 7th", + "Use separate CFG scale 8th": "Use separate CFG scale 8th", + "Use separate CFG scale 9th": "Use separate CFG scale 9th", + "Use separate checkpoint": "Use separate checkpoint", + "Use separate checkpoint 10th": "Use separate checkpoint 10th", + "Use separate checkpoint 2nd": "Use separate checkpoint 2nd", + "Use separate checkpoint 3rd": "Use separate checkpoint 3rd", + "Use separate checkpoint 4th": "Use separate checkpoint 4th", + "Use separate checkpoint 5th": "Use separate checkpoint 5th", + "Use separate checkpoint 6th": "Use separate checkpoint 6th", + "Use separate checkpoint 7th": "Use separate checkpoint 7th", + "Use separate checkpoint 8th": "Use separate checkpoint 8th", + "Use separate checkpoint 9th": "Use separate checkpoint 9th", + "Use separate CLIP skip": "Use separate CLIP skip", + "Use separate CLIP skip 10th": "Use separate CLIP skip 10th", + "Use separate CLIP skip 2nd": "Use separate CLIP skip 2nd", + "Use separate CLIP skip 3rd": "Use separate CLIP skip 3rd", + "Use separate CLIP skip 4th": "Use separate CLIP skip 4th", + "Use separate CLIP skip 5th": "Use separate CLIP skip 5th", + "Use separate CLIP skip 6th": "Use separate CLIP skip 6th", + "Use separate CLIP skip 7th": "Use separate CLIP skip 7th", + "Use separate CLIP skip 8th": "Use separate CLIP skip 8th", + "Use separate CLIP skip 9th": "Use separate CLIP skip 9th", + "Use separate noise multiplier": "Use separate noise multiplier", + "Use separate noise multiplier 10th": "Use separate noise multiplier 10th", + "Use separate noise multiplier 2nd": "Use separate noise multiplier 2nd", + "Use separate noise multiplier 3rd": "Use separate noise multiplier 3rd", + "Use separate noise multiplier 4th": "Use separate noise multiplier 4th", + "Use separate noise multiplier 5th": "Use separate noise multiplier 5th", + "Use separate noise multiplier 6th": "Use separate noise multiplier 6th", + "Use separate noise multiplier 7th": "Use separate noise multiplier 7th", + "Use separate noise multiplier 8th": "Use separate noise multiplier 8th", + "Use separate noise multiplier 9th": "Use separate noise multiplier 9th", + "Use separate sampler": "Use separate sampler", + "Use separate sampler 10th": "Use separate sampler 10th", + "Use separate sampler 2nd": "Use separate sampler 2nd", + "Use separate sampler 3rd": "Use separate sampler 3rd", + "Use separate sampler 4th": "Use separate sampler 4th", + "Use separate sampler 5th": "Use separate sampler 5th", + "Use separate sampler 6th": "Use separate sampler 6th", + "Use separate sampler 7th": "Use separate sampler 7th", + "Use separate sampler 8th": "Use separate sampler 8th", + "Use separate sampler 9th": "Use separate sampler 9th", + "Use separate steps": "Use separate steps", + "Use separate steps 10th": "Use separate steps 10th", + "Use separate steps 2nd": "Use separate steps 2nd", + "Use separate steps 3rd": "Use separate steps 3rd", + "Use separate steps 4th": "Use separate steps 4th", + "Use separate steps 5th": "Use separate steps 5th", + "Use separate steps 6th": "Use separate steps 6th", + "Use separate steps 7th": "Use separate steps 7th", + "Use separate steps 8th": "Use separate steps 8th", + "Use separate steps 9th": "Use separate steps 9th", + "Use separate VAE": "Use separate VAE", + "Use separate VAE 10th": "Use separate VAE 10th", + "Use separate VAE 2nd": "Use separate VAE 2nd", + "Use separate VAE 3rd": "Use separate VAE 3rd", + "Use separate VAE 4th": "Use separate VAE 4th", + "Use separate VAE 5th": "Use separate VAE 5th", + "Use separate VAE 6th": "Use separate VAE 6th", + "Use separate VAE 7th": "Use separate VAE 7th", + "Use separate VAE 8th": "Use separate VAE 8th", + "Use separate VAE 9th": "Use separate VAE 9th", + "Use separate width/height": "Use separate width/height", + "Use separate width/height 10th": "Use separate width/height 10th", + "Use separate width/height 2nd": "Use separate width/height 2nd", + "Use separate width/height 3rd": "Use separate width/height 3rd", + "Use separate width/height 4th": "Use separate width/height 4th", + "Use separate width/height 5th": "Use separate width/height 5th", + "Use separate width/height 6th": "Use separate width/height 6th", + "Use separate width/height 7th": "Use separate width/height 7th", + "Use separate width/height 8th": "Use separate width/height 8th", + "Use separate width/height 9th": "Use separate width/height 9th", + "Uses Imagic for training instead of full dreambooth, useful for training with a single instance image.": "Uses Imagic for training instead of full dreambooth, useful for training with a single instance image.", + "Uses Low-rank Adaptation for Fast Text-to-Image Diffusion Fine-tuning. Uses less VRAM, saves a .pt file instead of a full checkpoint": "Uses Low-rank Adaptation for Fast Text-to-Image Diffusion Fine-tuning. Uses less VRAM, saves a .pt file instead of a full checkpoint", + "Use Smart-Steps": "Use Smart-Steps", + "Use Space Instead Of _": "Use Space Instead Of _", + "Use spaces instead of underscore": "Use spaces instead of underscore", + "Use static shapes.": "Use static shapes.", + "Uses your": "Uses your", + "Use tags like [seed] and [date] to define how filenames for images are chosen. Leave empty for default.": "Use tags like [seed] and [date] to define how filenames for images are chosen. Leave empty for default.", + "Use tags like [seed] and [date] to define how subdirectories for images and grids are chosen. Leave empty for default.": "Use tags like [seed] and [date] to define how subdirectories for images and grids are chosen. Leave empty for default.", + "Use Text Escape": "Use Text Escape", + "Use the filter icon to filter results.": "Use the filter icon to filter results.", + "Use the img2img inpainting tab.": "Use the img2img inpainting tab.", + "Use the negative_prompts field to automatically append all words as a negative prompt. *Don't* add --neg in the negative_prompts field!": "Use the negative_prompts field to automatically append all words as a negative prompt. *Don't* add --neg in the negative_prompts field!", + "Use the wildcard in your script by typing the name of the file or copying the text from the Wildcards file text box": "Use the wildcard in your script by typing the name of the file or copying the text from the Wildcards file text box", + "Use the wildcard in your script by typing the name of the file or copying the text from the Wildcards file text box.": "Use the wildcard in your script by typing the name of the file or copying the text from the Wildcards file text box.", + "Use this extension to generate optimized engines and enable the best performance on NVIDIA RTX GPUs with TensorRT. Please follow the instructions below to set everything up.": "Use this extension to generate optimized engines and enable the best performance on NVIDIA RTX GPUs with TensorRT. Please follow the instructions below to set everything up.", + "Use this negative prompt": "Use this negative prompt", + "Use this when scanning can not find a local model on civitai": "Use this when scanning can not find a local model on civitai", + "Use Ultimate SD Upscale script instead": "Use Ultimate SD Upscale script instead", + "Use Upscaled": "Use Upscaled", + "Use web address or local path. Note: if the image box below is used then this field is ignored.": "Use web address or local path. Note: if the image box below is used then this field is ignored.", + "Use wildcards for negative prompts": "Use wildcards for negative prompts", + "Use xformers": "Use xformers", + "VAE Merger": "VAE Merger", + "VAE model": "VAE model", + "Validate": "Validate", + "values": "values", + "Values closer to 0 closely match your training dataset, and values closer to 1 generalize more and deviate from your training dataset. Default is 1e-2, values lower than 0.1 are recommended. For D-Adaptation values between 0.02 and 0.04 are recommended": "Values closer to 0 closely match your training dataset, and values closer to 1 generalize more and deviate from your training dataset. Default is 1e-2, values lower than 0.1 are recommended. For D-Adaptation values between 0.02 and 0.04 are recommended", + "Value to set/add/mul": "Value to set/add/mul", + "Variational autoencoder": "Variational autoencoder", + "Vectors": "Vectors", + "Vector Studio": "Vector Studio", + "verbose console output": "verbose console output", + "version": "version", + "Version:": "Version:", + "version 2.14.0 of the WebUI extension": "version 2.14.0 of the WebUI extension", + "vertical": "vertical", + "vertical only": "vertical only", + "vertical split num": "vertical split num", + "veryfast": "veryfast", + "veryslow": "veryslow", + "vibrant": "vibrant", + "Vid2depth": "Vid2depth", + "Video Depth": "Video Depth", + "Video encoding": "Video encoding", + "Video Init": "Video Init", + "Video init path": "Video init path", + "video_init_path, extract_nth_frame, overwrite_extracted_frames": "video_init_path, extract_nth_frame, overwrite_extracted_frames", + "Video init path/ URL": "Video init path/ URL", + "Video Input": "影片輸入", + "video_input mode only, enables the extraction and use of a separate video file intended for use as a mask. White areas of the extracted video frames will not be affected by diffusion, while black areas will be fully effected. Lighter/darker areas are affected dynamically.": "video_input mode only, enables the extraction and use of a separate video file intended for use as a mask. White areas of the extracted video frames will not be affected by diffusion, while black areas will be fully effected. Lighter/darker areas are affected dynamically.", + "Video mask path": "Video mask path", + "Video Name": "Video Name", + "Video output": "Video output", + "Video Output Settings": "Video Output Settings", + "Video Segment Duration (seconds)": "Video Segment Duration (seconds)", + "Video source": "Video source", + "Video to get Depth from": "Video to get Depth from", + "Video to Interpolate": "Video to Interpolate", + "Video to Upscale": "Video to Upscale", + "Video Upscaling": "Video Upscaling", + "View Path": "View Path", + "View Path Select": "View Path Select", + "View saved data": "View saved data", + "View the wiki for usage tips.": "View the wiki for usage tips.", + "Vignette": "Vignette", + ". Visit": ". Visit", + "Visual style": "Visual style", + "vivid_light": "強烈光線", + "VRAM usage polls per second during generation. Set to 0 to disable.": "VRAM usage polls per second during generation. Set to 0 to disable.", + "Want to figure out what a good prompt might be to create new images like an existing one? The CLIP Interrogator is here to get you answers!": "Want to figure out what a good prompt might be to create new images like an existing one? The CLIP Interrogator is here to get you answers!", + "Warmup Period": "Warmup Period", + "Warn if changes in caption is not saved": "Warn if changes in caption is not saved", + "WARNING : Settings are immediately applied but will not be saved until you click \"Save\"": "WARNING : Settings are immediately applied but will not be saved until you click \"Save\"", + "WARNING : Some visual settings are immediately applied but will not be saved until you click \"Save\"": "WARNING : Some visual settings are immediately applied but will not be saved until you click \"Save\"", + "(Warning: This is an external site and very likely contains NSFW examples!)": "(Warning: This is an external site and very likely contains NSFW examples!)", + "watercolor": "watercolor", + "Watercolor": "Watercolor", + "Wave offset (ignore this if you don't know what it means)": "Wave offset (ignore this if you don't know what it means)", + "Waves color": "Waves color", + "WDv1.4 Tagger Score Threshold": "WDv1.4 Tagger Score Threshold", + "Weight": "權重", + "Weight 1": "權重 1️⃣", + "Weight 2": "權重 2️⃣", + "Weight 3": "權重 3️⃣", + "Weight 4": "權重 4️⃣", + "Weight 5": "權重 5️⃣", + "weights alpha": "weights alpha", + "Weights alpha": "Weights alpha", + "weights beta": "weights beta", + "Weights beta": "Weights beta", + "Weight schedule": "Weight schedule", + "Weights Presets": "Weights Presets", + "Weights setting": "Weights setting", + "Weights Setting": "Weights Setting", + "Weight sum": "Weight sum", + "Weight sum:A*(1-alpha)+B*alpha": "Weight sum:A*(1-alpha)+B*alpha", + "Weight threshold": "Weight threshold", + "Weight values": "Weight values", + "Weight_values": "Weight_values", + "Welcome to LightDiffusionFlow! \\(^o^)/~": "Welcome to LightDiffusionFlow! \\(^o^)/~", + "When adding to prompt, refer to lora by": "When adding to prompt, refer to lora by", + "when checked, do not output a video": "when checked, do not output a video", + "When enabled, a checkpoint will be generated at the specified epoch intervals while training is active. This also controls manual generation using the 'save weights' button while training is active.": "When enabled, a checkpoint will be generated at the specified epoch intervals while training is active. This also controls manual generation using the 'save weights' button while training is active.", + "When enabled, a checkpoint will be generated when training completes successfully.": "When enabled, a checkpoint will be generated when training completes successfully.", + "When enabled, a checkpoint will be generated when training is canceled by the user.": "When enabled, a checkpoint will be generated when training is canceled by the user.", + "When enabled, a unique snapshot of the diffusion weights will be saved at each specified epoch interval. This uses more HDD space (A LOT), but allows resuming from training, including the optimizer state.": "When enabled, a unique snapshot of the diffusion weights will be saved at each specified epoch interval. This uses more HDD space (A LOT), but allows resuming from training, including the optimizer state.", + "When enabled, a unique snapshot of the diffusion weights will be saved when training completes. This uses more HDD space, but allows resuming from training including the optimizer state.": "When enabled, a unique snapshot of the diffusion weights will be saved when training completes. This uses more HDD space, but allows resuming from training including the optimizer state.", + "When enabled, a unique snapshot of the diffusion weights will be saved when training is canceled. This uses more HDD space, but allows resuming from training including the optimizer state.": "When enabled, a unique snapshot of the diffusion weights will be saved when training is canceled. This uses more HDD space, but allows resuming from training including the optimizer state.", + "When enabled, checkpoints will be saved to a subdirectory in the selected checkpoints folder.": "When enabled, checkpoints will be saved to a subdirectory in the selected checkpoints folder.", + "When enabled, tags after the first ',' in a prompt will be randomly ordered, which can potentially improve training.": "When enabled, tags after the first ',' in a prompt will be randomly ordered, which can potentially improve training.", + "when enabled, will re-extract video frames each run. When using video_input mode, the run will be instructed to write video frames to the drive. If you’ve already populated the frames needed, uncheck this box to skip past redundant extraction, and immediately start the render. If you have not extracted frames, you must run at least once with this box checked to write the necessary frames.": "when enabled, will re-extract video frames each run. When using video_input mode, the run will be instructed to write video frames to the drive. If you’ve already populated the frames needed, uncheck this box to skip past redundant extraction, and immediately start the render. If you have not extracted frames, you must run at least once with this box checked to write the necessary frames.", + "(when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both)": "(when generating captions using postprocessing; Ignore = use generated; Keep = use original; Prepend/Append = combine both)", + "When loading models attempt stream loading optimized for slow or network storage": "When loading models attempt stream loading optimized for slow or network storage", + "When loading models attempt to reuse previous model dictionary": "When loading models attempt to reuse previous model dictionary", + "When 'Pause After N Epochs' is greater than 0, this is the amount of time, in seconds, that training will be paused for": "When 'Pause After N Epochs' is greater than 0, this is the amount of time, in seconds, that training will be paused for", + "When performing the backwards pass, gradients will be set to none, instead of creating a new empty tensor. This will slightly improve VRAM.": "When performing the backwards pass, gradients will be set to none, instead of creating a new empty tensor. This will slightly improve VRAM.", + "when this box is checked, and FFMPEG mp4 is selected as the output format, an audio file will be multiplexed with the video.": "when this box is checked, and FFMPEG mp4 is selected as the output format, an audio file will be multiplexed with the video.", + "When this box is checked latents will be cached. Caching latents will use more VRAM, but improve training speed.": "When this box is checked latents will be cached. Caching latents will use more VRAM, but improve training speed.", + "(When using controlnet together, you can put in large values (even 1.0 is possible).)": "(When using controlnet together, you can put in large values (even 1.0 is possible).)", + "When using [filewords], this is the class identifier to use/find in existing prompts. Should be a single word.": "When using [filewords], this is the class identifier to use/find in existing prompts. Should be a single word.", + "When using [filewords], this is the instance identifier that is unique to your subject. Should be a single word.": "When using [filewords], this is the instance identifier that is unique to your subject. Should be a single word.", + "When you are done click the": "When you are done click the", + "When you fill in the Overwrite subject field, that subject will be used to build the dynamic prompt around. It is best, if you set the subject type to match the subject. For example, set it to humanoid if you place a person in the override subject field.": "When you fill in the Overwrite subject field, that subject will be used to build the dynamic prompt around. It is best, if you set the subject type to match the subject. For example, set it to humanoid if you place a person in the override subject field.", + "When you have --xformers in your command line args, you want AnimateDiff to": "When you have --xformers in your command line args, you want AnimateDiff to", + "Where to insert the trigger keyword": "Where to insert the trigger keyword", + "Where to save Controlnet data?": "Where to save Controlnet data?", + "Whether to use optical flow to blend frames during cadence (if cadence more than 1)": "Whether to use optical flow to blend frames during cadence (if cadence more than 1)", + "whimsical": "whimsical", + "White is Opaque": "White is Opaque", + "wide": "wide", + "(will append after comma if the above is enabled)": "(will append after comma if the above is enabled)", + "will do nothing!": "will do nothing!", + "will ignore all motion parameters and attempt to reference a video loaded into the runtime, specified by the video_init_path. Max_frames is ignored during video_input mode, and instead, follows the number of frames pulled from the video’s length. Resume_from_timestring is NOT available with Video_Input mode.": "will ignore all motion parameters and attempt to reference a video loaded into the runtime, specified by the video_init_path. Max_frames is ignored during video_input mode, and instead, follows the number of frames pulled from the video’s length. Resume_from_timestring is NOT available with Video_Input mode.", + "will output a greyscale depth map image alongside the output images.": "will output a greyscale depth map image alongside the output images.", + "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Will upscale the image to twice the dimensions; use width and height sliders to set tile size", + "(Will use & prefer the native activation keywords settable in the extra networks UI. Other functionality requires the": "(Will use & prefer the native activation keywords settable in the extra networks UI. Other functionality requires the", + "(WIP, expect some bugs)": "(WIP, expect some bugs)", + "With confidence": "With confidence", + "With Workflow mode, you turn off the automatic generation of new prompts on ‘generate’, and it will use the Workflow prompt field instead. So you can work and finetune any fun prompts without turning of the script.": "With Workflow mode, you turn off the automatic generation of new prompts on ‘generate’, and it will use the Workflow prompt field instead. So you can work and finetune any fun prompts without turning of the script.", + "🧙 Wizard": "🧙 Wizard", + "Word delimiters when editing the prompt with Ctrl+up/down": "Word delimiters when editing the prompt with Ctrl+up/down", + "word masks": "word masks", + "words in here will be added to the start of all positive prompts": "words in here will be added to the start of all positive prompts", + "Worker Name": "Worker Name", + "Workflow": "Workflow", + "Workflow assist": "Workflow assist", + "Workflow assist, suggestions by redditor Woisek.": "Workflow assist, suggestions by redditor Woisek.", + "Workflow Editor": "Workflow Editor", + "Workflow mode, turns off prompt generation and uses below Workflow prompt instead.": "Workflow mode, turns off prompt generation and uses below Workflow prompt instead.", + "Workflow prompt": "Workflow prompt", + "*Work In Progress*. All params below are going to be keyframable at some point. If you want to speedup the integration, join Deforum's development. 😉": "*Work In Progress*. All params below are going to be keyframable at some point. If you want to speedup the integration, join Deforum's development. 😉", + "wrap": "包裹", + "Write infotext to metadata of the generated image": "Write infotext to metadata of the generated image", + "write merged model ID to": "write merged model ID to", + "Write weights to tags files": "Write weights to tags files", + "X center axis for 2D angle/zoom": "X center axis for 2D angle/zoom", + "x center axis for 2D angle/zoom *only*": "x center axis for 2D angle/zoom *only*", + "xmenber": "xmenber", + "xtype": "xtype", + "X Types": "X Types", + "X Values": "X Values", + "X/Y plot": "X/Y plot", + "XYZ plot": "XYZ plot", + "XYZ Plot": "XYZ Plot", + "XYZ plot settings": "XYZ plot settings", + "Y center axis for 2D angle/zoom": "Y center axis for 2D angle/zoom", + "y center axis for 2D angle/zoom *only*": "y center axis for 2D angle/zoom *only*", + "Yellow | Blue": "Yellow | Blue", + "Yes": "是", + "ymenber": "ymenber", + "You are using": "You are using", + "You can also choose the prompt seperator mode for use with Latent Couple extension": "You can also choose the prompt seperator mode for use with Latent Couple extension", + ". You can also join this": ". You can also join this", + "You can change the maximum execution time, by default it’s 30seconds.": "You can change the maximum execution time, by default it’s 30seconds.", + "You can choose a certain subject type, if you want to generate something more specific. It has the following types:": "You can choose a certain subject type, if you want to generate something more specific. It has the following types:", + "(You can create your own API key in your": "(You can create your own API key in your", + "You can enhance semantic segmentation for control_v11p_sd15_seg from lllyasviel. Non-semantic segmentation for Edit-Anything will be supported when they convert their models to lllyasviel format.": "You can enhance semantic segmentation for control_v11p_sd15_seg from lllyasviel. Non-semantic segmentation for Edit-Anything will be supported when they convert their models to lllyasviel format.", + "You can enhance semantic segmentation for control_v11p_sd15_seg from lllyasviel. You can also utilize": "You can enhance semantic segmentation for control_v11p_sd15_seg from lllyasviel. You can also utilize", + "You can generate image layout either in single image or in batch. Since there might be A LOT of outputs, there is no gallery for preview. You need to go to the output folder for either single image or batch process.": "You can generate image layout either in single image or in batch. Since there might be A LOT of outputs, there is no gallery for preview. You need to go to the output folder for either single image or batch process.", + "You can mask images by their categories via semantic segmentation. Please enter category ids (integers), separated by": "You can mask images by their categories via semantic segmentation. Please enter category ids (integers), separated by", + "You can mask images by their categories via semantic segmentation. Please enter category ids (integers), separated by +. Visit here for ade20k and here for coco to get category->id map. Note that coco jumps some numbers, so the actual ID is line_number - 21.": "You can mask images by their categories via semantic segmentation. Please enter category ids (integers), separated by +. Visit here for ade20k and here for coco to get category->id map. Note that coco jumps some numbers, so the actual ID is line_number - 21.", + "You can now start generating images accelerated by TRT. If you need to create more Engines, go to the TensorRT tab.": "You can now start generating images accelerated by TRT. If you need to create more Engines, go to the TensorRT tab.", + "You can put any image or images or video you like in the background.": "You can put any image or images or video you like in the background.", + "You can put comma seperated values here, those will be ignored from any list processing. For example, adding \"\"film grain, sepia\"\", will make these values not appear during generation.": "You can put comma seperated values here, those will be ignored from any list processing. For example, adding \"\"film grain, sepia\"\", will make these values not appear during generation.", + "You can specify in this field -> [Ebsynth Utility]->[configuration]->[stage 8]->[Background source]": "You can specify in this field -> [Ebsynth Utility]->[configuration]->[stage 8]->[Background source]", + "You can specify \"path_to_project_dir/inv/crossfade_tmp\".": "You can specify \"path_to_project_dir/inv/crossfade_tmp\".", + "You can stop the algorithm at any time": "You can stop the algorithm at any time", + "You can toggle the separator mode. Standardly this is a comma, but you can choose an AND or a BREAK.": "You can toggle the separator mode. Standardly this is a comma, but you can choose an AND or a BREAK.", + "You can try it by own, to dig more deeper into Abyss ...": "You can try it by own, to dig more deeper into Abyss ...", + "You can turn it off and maybe add your own in the prefix or suffix prompt fields": "You can turn it off and maybe add your own in the prefix or suffix prompt fields", + "You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. \n Set the keyframes and the images that you want to show up. \n Note: the number of frames between each keyframe should be greater than the tweening frames.": "You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. \n Set the keyframes and the images that you want to show up. \n Note: the number of frames between each keyframe should be greater than the tweening frames.", + "You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. \n Set the keyframes and the images that you want to show up. \n Note: the number of frames between each keyframe should be greater than the tweening frames.": "You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. \n Set the keyframes and the images that you want to show up. \n Note: the number of frames between each keyframe should be greater than the tweening frames.", + "You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. \n Set the keyframes and the images that you want to show up. \n Note: the number of frames between each keyframe should be greater than the tweening frames.": "You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. \n Set the keyframes and the images that you want to show up. \n Note: the number of frames between each keyframe should be greater than the tweening frames.", + "You DID NOT load any model!": "You DID NOT load any model!", + "You don't need a mask to composite video. But, Mask types can control the way that video is composited with the previous image each frame.": "You don't need a mask to composite video. But, Mask types can control the way that video is composited with the previous image each frame.", + "You may configurate automatic sam generation. ": "You may configurate automatic sam generation. ", + "You may configurate automatic sam generation. See": "You may configurate automatic sam generation. See", + "You may configurate the following items and generate masked image for all images under a directory. This mode is designed for generating LoRA/LyCORIS training set.": "You may configurate the following items and generate masked image for all images under a directory. This mode is designed for generating LoRA/LyCORIS training set.", + "You must enter text prompts to enable groundingdino. Otherwise this extension will fall back to point prompts only.": "You must enter text prompts to enable groundingdino. Otherwise this extension will fall back to point prompts only.", + "You probably want this to be 'fp16'.": "You probably want this to be 'fp16'.", + "Your Civitai API Key": "Your Civitai API Key", + "Your Civitai Link Key": "Your Civitai Link Key", + "ytype": "ytype", + "Y Types": "Y Types", + "Y Values": "Y Values", + "Zentangle": "Zentangle", + "zeros": "zeros", + "zeros will not add any new pixel information": "zeros will not add any new pixel information", + "ZIP Code": "ZIP Code", + "ZoeDepth was not trained on panoramic images. It doesn't know anything about panoramas or spherical projection. Here, we just treat the estimated depth as radius and some projection errors are expected. Nonetheless, ZoeDepth still works surprisingly well on 360 reconstruction.": "ZoeDepth was not trained on panoramic images. It doesn't know anything about panoramas or spherical projection. Here, we just treat the estimated depth as radius and some projection errors are expected. Nonetheless, ZoeDepth still works surprisingly well on 360 reconstruction.", + "ZoeDepth was not trained on panoramic images. It doesn’t know anything about panoramas or spherical projection. Here, we just treat the estimated depth as radius and some projection errors are expected. Nonetheless, ZoeDepth still works surprisingly well on 360 reconstruction.": "ZoeDepth was not trained on panoramic images. It doesn’t know anything about panoramas or spherical projection. Here, we just treat the estimated depth as radius and some projection errors are expected. Nonetheless, ZoeDepth still works surprisingly well on 360 reconstruction.", + "Zoom in masked area": "Zoom in masked area", + "清空正面提示词": "清空正面提示词", + "清空负面提示词": "清空负面提示词", + "Ebsynth Utility": "Ebsynth Utility", + "Face Prompt": "Face Prompt", "hm": "hm", "reinhard": "reinhard", "mvgd": "mvgd", "mkl": "mkl", "hm-mvgd-hm": "hm-mvgd-hm", "hm-mkl-hm": "hm-mkl-hm", - "Color Matcher Ref Image Type": "Color Matcher Ref Image Type", - "original video frame": "original video frame", - "first frame of img2img result": "first frame of img2img result", - "If an image is specified below, it will be used with highest priority.": "If an image is specified below, it will be used with highest priority.", - "Color Matcher Ref Image": "Color Matcher Ref Image", - "Apply mask to the result": "Apply mask to the result", - "Apply mask to the Ref Image": "Apply mask to the Ref Image", - "Apply mask to original image": "Apply mask to original image", - "Crossfade blend rate": "Crossfade blend rate", - "Export type": "Export type", - "Background source(mp4 or directory containing images)": "Background source(mp4 or directory containing images)", - "Background type": "Background type", - "Mask Blur Kernel Size": "Mask Blur Kernel Size", - "Foreground Transparency": "Foreground Transparency", - "Mask Mode": "Mask Mode", - "Process Stage": "Process Stage", - "stage 3": "stage 3", - "stage 4": "stage 4", - "stage 5": "stage 5", - "stage 6": "stage 6", - "The process of creating a video can be divided into the following stages.": "The process of creating a video can be divided into the following stages.", - "(Stage 3, 4, and 6 only show a guide and do nothing actual processing.)": "(Stage 3, 4, and 6 only show a guide and do nothing actual processing.)", - "Extract frames from the original video.": "Extract frames from the original video.", - "Generate a mask image.": "Generate a mask image.", - "Select keyframes to be given to ebsynth.": "Select keyframes to be given to ebsynth.", - "img2img keyframes.": "img2img keyframes.", - "(this is optional. Perform color correction on the img2img results and expect flickering to decrease. Or, you can simply change the color tone from the generated result.)": "(this is optional. Perform color correction on the img2img results and expect flickering to decrease. Or, you can simply change the color tone from the generated result.)", - "and upscale to the size of the original video.": "and upscale to the size of the original video.", - "Rename keyframes.": "Rename keyframes.", - "Generate .ebs file.(ebsynth project file)": "Generate .ebs file.(ebsynth project file)", - "Running ebsynth.(on your self)": "Running ebsynth.(on your self)", - "Open the generated .ebs under project directory and press [Run All] button.": "Open the generated .ebs under project directory and press [Run All] button.", - "If out-* directory already exists in the Project directory, delete it manually before executing.": "If out-* directory already exists in the Project directory, delete it manually before executing.", - "If multiple .ebs files are generated, run them all.": "If multiple .ebs files are generated, run them all.", - "Concatenate each frame while crossfading.": "Concatenate each frame while crossfading.", - "Composite audio files extracted from the original video onto the concatenated video.": "Composite audio files extracted from the original video onto the concatenated video.", - "This is an extra stage.": "This is an extra stage.", - "You can put any image or images or video you like in the background.": "You can put any image or images or video you like in the background.", - "You can specify in this field -> [Ebsynth Utility]->[configuration]->[stage 8]->[Background source]": "You can specify in this field -> [Ebsynth Utility]->[configuration]->[stage 8]->[Background source]", - "If you have already created a background video in Invert Mask Mode([Ebsynth Utility]->[configuration]->[etc]->[Mask Mode]),": "If you have already created a background video in Invert Mask Mode([Ebsynth Utility]->[configuration]->[etc]->[Mask Mode]),", - "You can specify \"path_to_project_dir/inv/crossfade_tmp\".": "You can specify \"path_to_project_dir/inv/crossfade_tmp\".", "https://github.com/s9roll7/ebsynth_utility": "https://github.com/s9roll7/ebsynth_utility", "c255e64a (Sat Apr 22 00:49:32 2023)": "c255e64a (Sat Apr 22 00:49:32 2023)", - "Prompt for Face": "Prompt for Face", - "mp4": "mp4", - "webm": "webm", - "gif": "gif", "rawvideo": "rawvideo", - "Fit video length": "Fit video length", - "Loop": "Loop", - "Invert": "Invert", "ebsynth utility": "ebsynth utility", - "Don't Override": "Don't Override", "DeepDanbooru": "DeepDanbooru", "YuNet": "YuNet", "Yolov5_anime": "Yolov5_anime", - "Reset": "重置", - "Load from JSON": "從 JSON 載入", - "Detect from image": "從圖像偵測", - "Add Background image": "加入背景圖像", - "json": "json", - "Save JSON": "儲存為 JSON", - "Save PNG": "儲存為 PNG", - "Send to": "發送至", - "openpose-editor": "openPose 編輯器", - "https://github.com/fkunn1326/openpose-editor.git": "https://github.com/fkunn1326/openpose-editor.git", - "NAIConvert": "NAI轉換", - "History": "歷史記錄", - "https://github.com/animerl/novelai-2-local-prompt.git": "https://github.com/animerl/novelai-2-local-prompt.git", - "Load Settings": "載入設定", - "Save Settings": "儲存設定", - "Generate Ckpt": "產生 Ckpt", - "Save Weights": "儲存權重", - "Generate Samples": "產生樣本", - "Select or create a model to begin.": "選擇或建立一個模型", - "Select": "選擇模型", - "Create": "建立", - "Snapshot to Resume": "從 Snapshot 恢復", - "Lora Model": "LoRA 模型", - "Loaded Model:": "載入模型:", - "Model Revision:": "模型修正:", - "Model Epoch:": "模型訓練週期:", - "V2 Model:": "V2 模型:", - "Has EMA:": "有 EMA:", - "Source Checkpoint:": "來源模型權重存檔點:", - "Create Model": "建立模型", - "Create From Hub": "從 huggingface 建立", - "512x Model": "512x 模型", - "Model Path": "模型路徑", - "HuggingFace Token": "HuggingFace 標記", - "Source Checkpoint": "源模型權重存檔點", - "Extract EMA Weights": "提取 EMA 權重", - "Unfreeze Model": "解凍模型", - "Resources": "Resources", - "Beginners guide": "Beginners guide", - "Release notes": "發布說明", - "Input": "輸入", - "Concepts": "概念", - "Saving": "儲存", - "Testing": "測試", - "Performance Wizard (WIP)": "效能嚮導(半成品)", - "General": "一般的", - "Use LORA": "使用 LoRA", - "Use Lora Extended": "使用 LoRA 擴充功能(Locon)", - "Train Imagic Only": "僅意象訓練", - "Train Inpainting Model": "訓練局部重繪模型", - "Intervals": "訓練週期 / 間隔", - "Training Steps Per Image (Epochs)": "每張圖像的訓練步數(訓練週期)", - "Pause After N Epochs": "N 階段後暫停", - "Amount of time to pause between Epochs (s)": "每訓練週期之間暫停的時間(秒)", - "Save Model Frequency (Epochs)": "儲存模型頻率(訓練週期)", - "Save Preview(s) Frequency (Epochs)": "儲存預覽頻率(訓練週期)", - "Batching": "批次", - "Batch Size": "每批數量", - "Gradient Accumulation Steps": "梯度累積疊代步數", - "Class Batch Size": "類別每批數量", - "Set Gradients to None When Zeroing": "將梯度設定為 0 的時候設定為無", - "Gradient Checkpointing": "梯度進度記錄", - "Learning Rate": "學習率", - "Lora UNET Learning Rate": "LoRA UNET 學習率", - "Lora Text Encoder Learning Rate": "LoRA Text Encoder 學習率", - "Learning Rate Scheduler": "學習率調度器", - "linear_with_warmup": "linear_with_warmup", - "cosine": "餘弦(cosine)", - "cosine_annealing": "cosine_annealing", - "cosine_annealing_with_restarts": "cosine_annealing_with_restarts", - "cosine_with_restarts": "含重啟的餘弦(cosine)", - "polynomial": "多項式(polynomial)", - "constant": "常數(constant)", - "constant_with_warmup": "含預熱的常數(constant)", - "Min Learning Rate": "最小學習率", - "Number of Hard Resets": "硬重置數量", - "Constant/Linear Starting Factor": "常數/線性起始因子", - "Polynomial Power": "多項式功率", - "Scale Position": "比例位置", - "Learning Rate Warmup Steps": "學習率預熱步數", - "Image Processing": "圖像處理", - "Max Resolution": "最高解析度", - "Apply Horizontal Flip": "套用水平翻轉", - "Tuning": "調整", - "Use EMA": "使用 EMA", - "Optimizer": "優化器", - "Torch AdamW": "Torch AdamW", - "8bit AdamW": "8bit AdamW", - "Lion": "Lion", - "Mixed Precision": "混合精度", - "no": "否", - "fp16": "fp16", - "Memory Attention": "記憶體注意力", - "Cache Latents": "快取潛在變數", - "Train UNET": "訓練 UNET", - "Step Ratio of Text Encoder Training": "文字編碼器訓練步驟比率", - "Offset Noise": "噪聲偏移", - "Freeze CLIP Normalization Layers": "凍結 CLIP 正規化層", - "Clip Skip": "Clip 跳過層", - "Weight Decay": "權重衰減", - "Pad Tokens": "填充標記", - "Strict Tokens": "嚴格的標記", - "Shuffle Tags": "洗牌標籤", - "Max Token Length": "最大標記長度", - "Prior Loss": "先前的損失", - "Scale Prior Loss": "縮放先前的損失", - "Prior Loss Weight": "先前損失權重", - "Prior Loss Target": "先前損失目標", - "Minimum Prior Loss Weight": "最小先前損失權重", - "Sanity Sample Prompt": "樣本提示詞", - "Sanity Sample Negative Prompt": "樣本反向提示詞", - "Sanity Sample Seed": "樣本種子", - "Miscellaneous": "雜項", - "Pretrained VAE Name or Path": "預訓練 VAE 名稱或路徑", - "Use Concepts List": "使用概念列表", - "Concepts List": "概念列表", - "API Key": "API 金鑰", - "Discord Webhook": "Discord Webhook", - "Save and Test Webhook": "儲存並測試 Webhook", - "Training Wizard (Person)": "訓練嚮導(人物)", - "Training Wizard (Object/Style)": "訓練嚮導(物件 / 樣式)", - "Concept 1": "概念 1", - "Concept 2": "概念 2", - "Concept 3": "概念 3", - "Concept 4": "概念 4", - "Directories": "目錄", - "Dataset Directory": "實例圖像數據目錄", - "Classification Dataset Directory": "類別/正則數據集目錄", - "Filewords": "風格 / 物品名稱", - "Instance Token": "實例名稱", - "Class Token": "類別/正則名稱", - "Training Prompts": "訓練提示詞", - "Instance Prompt": "實例提示詞", - "Class Prompt": "類別/正則提示詞", - "Classification Image Negative Prompt": "類別(正則) 圖像反向提示詞", - "Sample Prompts": "樣本提示詞", - "Sample Image Prompt": "樣本圖像提示詞", - "Sample Negative Prompt": "樣本反向提示詞", - "Sample Prompt Template File": "樣本提示詞範本檔案", - "Class Image Generation": "生成類別(正則) 圖像", - "Class Images Per Instance Image": "每個實例圖像的類別(正則) 圖片數量", - "Classification CFG Scale": "類別(正則) CFG比例", - "Classification Steps": "類別(正則) 步驟", - "Sample Image Generation": "生成樣本圖像", - "Number of Samples to Generate": "產生樣本的數量", - "Sample Seed": "樣本種子", - "Sample CFG Scale": "樣本CFG比例", - "Sample Steps": "樣本步數", - "Custom Model Name": "自定義模型名稱", - "Save in .safetensors format": "以 .safetensors 格式保存", - "Save EMA Weights to Generated Models": "將 EMA 權重儲存到產生的模型中", - "Use EMA Weights for Inference": "使用EMA權重進行推論", - "Half Model": "半精度模型", - "Save Checkpoint to Subdirectory": "保存檢查點到子目錄", - "Generate a .ckpt file when saving during training.": "在訓練期間儲存時產生 .ckpt 文件。", - "Generate a .ckpt file when training completes.": "在訓練完成時產生 .ckpt 文件。", - "Generate a .ckpt file when training is canceled.": "在訓練取消時產生 .ckpt 文件。", - "Lora UNET Rank": "LoRA UNET等級", - "Lora Text Encoder Rank": "LoRA 文字編碼器等級", - "Lora Weight": "LoRA 權重", - "Lora Text Weight": "LoRA 文本權重", - "Generate lora weights when saving during training.": "在訓練期間儲存時產生 LoRA。", - "Generate lora weights when training completes.": "在訓練完成時產生 LoRA。", - "Generate lora weights when training is canceled.": "在訓練取消時產生 LoRA。", - "Generate lora weights for extra networks.": "產生附加網路的 LoRA。(警告:如有使用 LoCon功能,需先安裝擴充。)", - "Diffusion Weights (training snapshots)": "Diffusion Weights (training snapshots)", - "Save separate diffusers snapshots when saving during training.": "在訓練期間保存獨立的模型。", - "Save separate diffusers snapshots when training completes.": "訓練完成後保存獨立的模型。", - "Save separate diffusers snapshots when training is canceled.": "當訓練被取消時保存獨立的模型。", - "Class Generation Schedulers": "類別(正則) 圖像生成調度器", - "Image Generation Library": "圖像生成", - "A1111 txt2img (Euler a)": "A1111 txt2img (Euler a)", - "Native Diffusers": "Native Diffusers", - "Image Generation Scheduler": "圖像生成採樣方式", - "DDPM": "DDPM", - "PNDM": "PNDM", - "LMSDiscrete": "LMSDiscrete", - "EulerDiscrete": "EulerDiscrete", - "HeunDiscrete": "HeunDiscrete", - "EulerAncestralDiscrete": "EulerAncestralDiscrete", - "DPMSolverMultistep": "DPMSolverMultistep", - "DPMSolverSinglestep": "DPMSolverSinglestep", - "KDPM2Discrete": "KDPM2Discrete", - "KDPM2AncestralDiscrete": "KDPM2AncestralDiscrete", - "DEISMultistep": "DEISMultistep", - "UniPCMultistep": "UniPCMultistep", - "Manual Class Generation": "Manual Class Generation", - "Generate Class Images": "產生類別(正則) 圖片", - "Generate Graph": "產生圖形", - "Graph Smoothing Steps": "圖形平滑步驟", - "Debug Buckets": "除錯", - "Epochs to Simulate": "要模擬的訓練週期", - "Batch Size to Simulate": "模擬的批量大小", - "Generate Sample Images": "產生樣本圖像", - "Sample Prompt": "樣本提示詞", - "Sample Prompt File": "樣本提示文件", - "Sample Width": "樣本寬度", - "Sample Height": "樣本高度", - "Sample Batch Size": "樣本批次大小", - "Swap Sample Faces": "交換 Sample Faces", - "Swap Prompt": "交換提示詞", - "Swap Negative Prompt": "交換反向提示詞", - "Swap Steps": "交換疊代步數", - "Swap Batch": "交換批次", - "Use txt2img": "使用文生圖", - "Experimental Settings": "實驗性設定", - "Deterministic": "確定性的訓練", - "Use EMA for prediction": "使用 EMA 進行預測", - "Calculate Split Loss": "計算分割損失", - "Use TensorFloat 32": "使用TensorFloat 32", - "Noise scheduler": "Noise scheduler", - "DEIS": "DEIS", - "Update Extension and Restart": "更新擴充並重新啟動", - "Bucket Cropping": "批量裁剪", - "Source Path": "來源路徑", - "Dest Path": "目標路徑", - "Max Res": "最大解析度", - "Bucket Steps": "批量Steps", - "Dry Run": "空運行", - "Start Cropping": "開始裁剪", - "Checkbox": "核取方塊", - "Check Progress": "查看進度", - "Update Parameters": "更新參數", - "Changelog": "更新紀錄", - "X": "X", - "sd_dreambooth_extension": "sd_dreambooth_extension", - "https://github.com/d8ahazard/sd_dreambooth_extension.git": "https://github.com/d8ahazard/sd_dreambooth_extension.git", - "runwayml/stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5", - "A generic prompt used to generate a sample image to verify model fidelity.": "用於產生樣本圖像以驗證模型保真度的通用提示。", - "A negative prompt for the generic sample image.": "通用圖像的反向提示詞。", - "Leave blank to use base model VAE.": "留空以使用基本模型 VAE。", - "Path to JSON file with concepts to train.": "帶有要訓練概念的 JSON 檔案的路徑。", - "https://discord.com/api/webhooks/XXX/XXXX": "https://discord.com/api/webhooks/XXX/XXXX", - "(Optional) Path to directory with classification/regularization images": "(可選)帶有類別(正則) 圖像的目錄路徑", - "When using [filewords], this is the subject to use when building prompts.": "使用 [filewords] 時,構建提示時使用的主題。", - "When using [filewords], this is the class to use when building prompts.": "使用 [filewords] 時,構建提示時使用的類別(正則)。", - "Optionally use [filewords] to read image captions from files.": "可以選擇使用 [filewords] 從檔案中讀取圖像標題。", - "Leave blank to use instance prompt. Optionally use [filewords] to base sample captions on instance images.": "留空以使用實例提示。可以選擇使用 [filewords] 以實例圖像為基礎生成樣本標題。", - "Enter the path to a txt file containing sample prompts.": "輸入包含樣本提示的 txt 檔案的路徑。", - "Enter a model name for saving checkpoints and lora models.": "輸入模型名稱以保存檢查點和 lora 模型。", - "Enable": "啟用", + "Asymmetric tiling": "Asymmetric tiling", + "asymmetric-tiling-sd-webui": "asymmetric-tiling-sd-webui", + "https://github.com/tjm35/asymmetric-tiling-sd-webui.git": "https://github.com/tjm35/asymmetric-tiling-sd-webui.git", + "Tile X": "Tile X", + "Tile Y": "Tile Y", + "Start tiling from step N": "Start tiling from step N", + "Stop tiling after step N (-1: Don't stop)": "Stop tiling after step N (-1: Don't stop)", + "LoRA": "LoRA", + "Elements": "Elements", + "Calcutation Mode": "Calcutation Mode", + "cosineA": "cosineA", + "cosineB": "cosineB", + "smoothAdd": "smoothAdd", + "tensor": "tensor", + "merge from ID": "merge from ID", + "Set from ID(-1 for last)": "Set from ID(-1 for last)", + "Hires Fix , Batch size": "Hires Fix , Batch size", + "Tensor Merge": "Tensor Merge", + "number of -1": "number of -1", + "Y grid (Disabled if blank)": "Y grid (Disabled if blank)", + "Reserve XY Plot": "Reserve XY Plot", + "block IDs": "block IDs", + "BASE": "BASE", + "calcmode": "calcmode", + "effective chekcer settings": "effective chekcer settings", + "Reservation": "Reservation", + "set to alpha": "set to alpha", + "read from alpha": "read from alpha", + "set to beta": "set to beta", + "read from beta": "read from beta", + "set to X": "set to X", + "weights for alpha, base alpha,IN00,IN02,...IN11,M00,OUT00,...,OUT11": "weights for alpha, base alpha,IN00,IN02,...IN11,M00,OUT00,...,OUT11", + "weights,for beta, base beta,IN00,IN02,...IN11,M00,OUT00,...,OUT11": "weights,for beta, base beta,IN00,IN02,...IN11,M00,OUT00,...,OUT11", + "available": "available", + "Start XY plot": "Start XY plot", + "Delete list(-1 for all)": "Delete list(-1 for all)", + "Delete num :": "Delete num :", + "No.": "No.", + "float": "float", + "bf16": "bf16", + "LoRAname1:ratio1:Blocks1,LoRAname2:ratio2:Blocks2,...(\":blocks\" is option, not necessary)": "LoRAname1:ratio1:Blocks1,LoRAname2:ratio2:Blocks2,...(\":blocks\" is option, not necessary)", + "limit dimension": "limit dimension", + "ID": "ID", + "use ID": "use ID", + "load keys": "load keys", + "block": "block", + "key": "key", + "sd-webui-supermerger": "sd-webui-supermerger", + "https://github.com/hako-mikan/sd-webui-supermerger.git": "https://github.com/hako-mikan/sd-webui-supermerger.git", + "847c8760 (Fri Apr 21 16:44:01 2023)": "847c8760 (Fri Apr 21 16:44:01 2023)", + "Blocks:Element:Ratio,Blocks:Element:Ratio,...": "Blocks:Element:Ratio,Blocks:Element:Ratio,...", + "beta (if Triple or Twice is not selected,Twice automatically enable)": "beta (if Triple or Twice is not selected,Twice automatically enable)", + "alpha and beta": "alpha and beta", + "mbw alpha": "mbw alpha", + "mbw beta": "mbw beta", + "mbw alpha and beta": "mbw alpha and beta", + "pinpoint blocks (alpha or beta must be selected for another axis)": "pinpoint blocks (alpha or beta must be selected for another axis)", + "elemental": "elemental", + "pinpoint element": "pinpoint element", + "effective elemental checker": "effective elemental checker", + "tensors": "tensors", + "BLIP2 Captioner": "BLIP2 Captioner", + "Generated Caption": "已產生的描述", + "Output Caption Extension": "Output Caption Extension", + "Idle": "Idle", + "Nucleus": "Nucleus", + "Top-K": "Top-K", + "Number of beams (0 = no beam search)": "Number of beams (0 = no beam search)", + "Caption min length": "描述最小長度", + "Caption max length": "描述最大長度", + "Top p": "Top p", + "stable-diffusion-webui-blip2-captioner": "stable-diffusion-webui-blip2-captioner", + "https://github.com/p1atdev/stable-diffusion-webui-blip2-captioner": "https://github.com/p1atdev/stable-diffusion-webui-blip2-captioner", + "path/to/caption": "path/to/caption", + "txt": "txt", "Highres. percentage chance": "高解析度修復:隨機機率", "Highres. Denoising Strength": "高解析度修復:重繪幅度", "Highres. Width": "高解析度修復:第一遍寬度", @@ -3521,18 +8291,133 @@ "Range of stepped values (min, max, step)": "含步數的隨機範圍(最小,最大,步數)", "Float value from 0 to 1": "從 0 到 1 的浮點數數值", "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.": "在生成圖像之前從模型權重存檔點中加載權重。你可以使用哈希值或檔案名的一部分(如設定中所示)作為模型權重存檔點名稱。建議用在Y軸上以減少過程中模型的切換", + "Depth Library": "深度圖圖庫", + "Pages:": "頁碼:", + "https://github.com/jexom/sd-webui-depth-lib.git": "https://github.com/jexom/sd-webui-depth-lib.git", + "3D Model Loader": "3D 模型載入器", + "Light": "Light", + "Position/Rotate X": "Position/Rotate X", + "Position/Rotate Y": "Position/Rotate Y", + "Position/Rotate Z": "Position/Rotate Z", + "Show Ground": "顯示地面", + "Show Grid": "顯示網格", + "Show Axis": "顯示軸", + "Ground Color": "地面顏色", + "Light Color": "Light Color", + "Model Scale": "Model Scale", + "Load Model": "Load Model", + "Play/Pause": "Play/Pause", + "Canvas Background Color": "畫布背景顏色", + "Canvas Ground Color": "畫布地面顏色", + "sd-3dmodel-loader": "sd-3dmodel-loader", + "https://github.com/jtydhr88/sd-3dmodel-loader.git": "https://github.com/jtydhr88/sd-3dmodel-loader.git", + "Mine Diffusion": "Mine Diffusion", + "Blocks Blacklist": "Blocks Blacklist", + "Schematic Settings": "Schematic Settings", + "Orientation": "Orientation", + "Rotation": "Rotation", + "Schematic Size": "Schematic Size", + "Dithering": "Dithering", + "Save Schematic": "Save Schematic", + "litematic": "litematic", + "schematic(in development)": "schematic(in development)", + "Path to Schematics Folder": "Path to Schematics Folder", + "Schematic Name": "Schematic Name", + "exclude": "exclude", + "Blacklist Presets": "Blacklist Presets", + "Update Presets": "Update Presets", + "Blacklist Name": "Blacklist Name", + "Save Blacklist": "Save Blacklist", + "Bedrock": "Bedrock", + "Water (Unsupported!)": "Water (Unsupported!)", + "Last schemtatics dir path": "Last schemtatics dir path", + "https://github.com/fropych/mine-diffusion": "https://github.com/fropych/mine-diffusion", + "039062f0 (Fri Mar 31 14:38:41 2023)": "039062f0 (Fri Mar 31 14:38:41 2023)", + "Remove Bedrock": "Remove Bedrock", + "Remove Water (Unsupported!)": "Remove Water (Unsupported!)", + "All Blocks": "All Blocks", + "deforum.github.io": "deforum.github.io", + "kabachuha": "kabachuha", + "sd-parseq": "sd-parseq", + "as a UI to define your animation schedules (see the Parseq section in the Keyframes tab).": "作為一個用來定義動畫時間軸的使用者介面(UI),您可以使用 sd-parseq(請參考「關鍵幀(Keyframes)」標籤中的 Parseq 部分)", + "framesync.xyz": "framesync.xyz", + "!": "!", + "DDIM Eta": "DDIM Eta", + "2D": "2D", + "3D": "3D", + "CFG": "提示詞相關性", + "Depth Warping": "Depth Warping", + "Field Of View": "Field Of View", + "MiDaS weight": "MiDaS weight", + "Sampling mode": "Sampling mode", + "bicubic": "bicubic", + "bilinear": "bilinear", + "nearest": "nearest", + "Perspective flip fv": "Perspective flip fv", + "Match Frame 0 HSV": "Match Frame 0 HSV", + "Match Frame 0 LAB": "Match Frame 0 LAB", + "Match Frame 0 RGB": "Match Frame 0 RGB", + "Color coherence video every N frames": "Color coherence video every N frames", + "ControlNet not found. Please install it :)": "ControlNet not found. Please install it :)", + "Please, change animation mode to 2D or 3D to enable Hybrid Mode": "Please, change animation mode to 2D or 3D to enable Hybrid Mode", + "by": "by", + "'": "'", + "(": "(", + ")": ")", + "DIS Medium": "DIS Medium", + "Farneback": "Farneback", + "FFMPEG mp4": "FFMPEG mp4", + "Skip video for run all": "Skip video for run all", + "realesr-animevideov3": "realesr-animevideov3", + "realesrgan-x4plus": "realesrgan-x4plus", + "realesrgan-x4plus-anime": "realesrgan-x4plus-anime", + "x2": "x2", + "x3": "x3", + "x4": "x4", + "Frame Interoplation": "Frame Interoplation", + "RIFE": "RIFE", + "/": "/", + "FILM": "FILM", + "RIFE v4.6": "RIFE v4.6", + "Slow-Mo X": "Slow-Mo X", + "Interpolate an existing video": "Interpolate an existing video", + "Interpolated Vid FPS": "Interpolated Vid FPS", + "*Interpolate uploaded video*": "*Interpolate uploaded video*", + "Upscale V2": "Upscale V2", + "Upscale V1": "Upscale V1", + "Path name modifier": "Path name modifier", + "x0_pred": "x0_pred", + "x": "x", + "Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%05d.png', just replace 20230124234916 with your batch ID. The %05d is important, don't forget it!": "Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%05d.png', just replace 20230124234916 with your batch ID. The %05d is important, don't forget it!", + "MP4 path": "MP4 path", + "Render steps": "Render steps", + "INVISIBLE": "INVISIBLE", + "from_img2img_instead_of_link": "from_img2img_instead_of_link", + "Perlin W": "Perlin W", + "Perlin H": "Perlin H", + "Filename format": "Filename format", + "save_settings": "save_settings", + "save_samples": "save_samples", + "display_samples": "display_samples", + "Subseed controls & More": "Subseed controls & More", + "Enable subseed controls": "Enable subseed controls", + "N Batch": "N Batch", + "Save sample per step": "Save sample per step", + "Show sample per step": "Show sample per step", + "Deforum extension for auto1111 — version 2.2b": "Deforum extension for auto1111 — version 2.2b", + "* Paths can be relative to webui folder OR full - absolute": "* Paths can be relative to webui folder OR full - absolute", + "General Settings File": "General Settings File", + "Video Settings File": "Video Settings File", + "Save Video Settings": "Save Video Settings", + "deforum-for-automatic1111-webui": "Deforum", + "https://github.com/deforum-art/deforum-for-automatic1111-webui.git": "https://github.com/deforum-art/deforum-for-automatic1111-webui.git", "Canvas Editor": "Canvas Editor", - "Send to Canvas Editor": "Send to Canvas Editor", - "ControlNet": "ControlNet", "Polotno API Key": "Polotno API Key", "https://github.com/jtydhr88/sd-canvas-editor": "https://github.com/jtydhr88/sd-canvas-editor", "4f3ffe4a (Tue Apr 18 01:45:04 2023)": "4f3ffe4a (Tue Apr 18 01:45:04 2023)", - "Templates": "Templates", "Text": "文字", "Photos": "Photos", - "Elements": "Elements", "Upload": "Upload", - "Background": "Background", "Txt2img": "Txt2img", "Img2img": "Img2img", "Position": "Position", @@ -3543,7 +8428,6 @@ "Create body text": "Create body text", "Show templates with the same size": "Show templates with the same size", "Lines": "Lines", - "Shapes": "Shapes", "Do you want to upload your own images?": "Do you want to upload your own images?", "Upload Image": "Upload Image", "No results": "No results", @@ -3561,291 +8445,73 @@ "Full HD": "Full HD", "txt2img Library": "txt2img Library", "img2img Library": "img2img Library", - "Posex": "Posex", - "Send this image to ControlNet.": ">> ControlNet", - "Target ControlNet number": "目標 ControlNet 號碼", - "https://github.com/hnmr293/posex.git": "https://github.com/hnmr293/posex.git", - "3D Model Loader": "3D 模型載入器", - "Target": "目標", - "Light": "Light", - "Position/Rotate X": "Position/Rotate X", - "Position/Rotate Y": "Position/Rotate Y", - "Position/Rotate Z": "Position/Rotate Z", - "Show Ground": "顯示地面", - "Show Grid": "顯示網格", - "Show Axis": "顯示軸", - "Background Color": "背景顏色", - "Ground Color": "地面顏色", - "Light Color": "Light Color", - "Model Scale": "Model Scale", - "Load Model": "Load Model", - "Play/Pause": "Play/Pause", - "Stop": "停止", - "3D Model": "3D 模型", - "Canvas Background Color": "畫布背景顏色", - "Canvas Ground Color": "畫布地面顏色", - "Canvas Width": "畫布寬度", - "Canvas Height": "畫布高度", - "sd-3dmodel-loader": "sd-3dmodel-loader", - "https://github.com/jtydhr88/sd-3dmodel-loader.git": "https://github.com/jtydhr88/sd-3dmodel-loader.git", - "ControlNet v1.1.411": "ControlNet v1.1.411", - "ControlNet Unit 0": "ControlNet Unit 0", - "ControlNet Unit 1": "ControlNet Unit 1", - "ControlNet Unit 2": "ControlNet Unit 2", - "Preprocessor Preview": "預處理器預覽", - "Input Directory": "輸入目錄", - "Open New Canvas": "打開新畫布", - "New Canvas Width": "畫布寬度", - "New Canvas Height": "畫布高度", - "Create New Canvas": "創建新畫布", - "Set the preprocessor to [invert] If your image has white background and black lines.": "如果您的線稿圖像是白色背景和黑色線條,請將預處理器設置為 [invert]。", - "Low VRAM": "低 VRAM 模式", - "Pixel Perfect": "完美像素", - "Allow Preview": "開啟預覽", - "Preview as Input": "Preview as Input", - "Control Type": "Control Type", - "All": "所有", - "NormalMap": "NormalMap", - "OpenPose": "OpenPose", - "MLSD": "MLSD", - "Lineart": "Lineart", - "SoftEdge": "SoftEdge", - "Scribble/Sketch": "Scribble/Sketch", - "Segmentation": "Segmentation", - "Shuffle": "Shuffle", - "Tile/Blur": "Tile/Blur", - "InstructP2P": "InstructP2P", - "Reference": "Reference", - "Recolor": "Recolor", - "Revision": "Revision", - "T2I-Adapter": "T2I-Adapter", - "IP-Adapter": "IP-Adapter", - "Preprocessor": "預處理器", - "Control Weight": "控制權重", - "Starting Control Step": "開始控制步數(%)", - "Ending Control Step": "停止控制步數(%)", - "Preprocessor resolution": "預處理器解析度", - "Threshold A": "閾值 A", - "Threshold B": "閾值 B", - "Control Mode": "Control Mode", - "Balanced": "平衡", - "My prompt is more important": "我的提示詞更重要", - "ControlNet is more important": "ControlNet更重要", - "Resize Mode": "縮放模式", - "Just Resize": "拉伸", - "Crop and Resize": "裁剪並調整大小", - "Resize and Fill": "調整大小並填充", - "[Loopback] Automatically send generated images to this ControlNet unit": "[Loopback] 自動將生成的圖像發送回此 ControlNet", - "Presets": "Presets", - "Preset name": "Preset name", - "ControlNet-M2M": "ControlNet-M2M", - "Duration": "持續時間", - "[ControlNet] Enabled": "[ControlNet] 啟用", - "[ControlNet] Model": "[ControlNet] 模型", - "[ControlNet] Weight": "[ControlNet] 權重", - "[ControlNet] Guidance Start": "[ControlNet] 引導開始", - "[ControlNet] Guidance End": "[ControlNet] 引導結束", - "[ControlNet] Resize Mode": "[ControlNet] 縮放模式", - "[ControlNet] Preprocessor": "[ControlNet] 預處理器", - "[ControlNet] Pre Resolution": "[ControlNet] 解析度", - "[ControlNet] Pre Threshold A": "[ControlNet] 閾值 A", - "[ControlNet] Pre Threshold B": "[ControlNet] 閾值 B", - "ControlNet-0": "ControlNet-0", - "ControlNet-1": "ControlNet-1", - "ControlNet-2": "ControlNet-2", - "Movie Input": "Movie Input", - "Image Input": "Image Input", - "Save preprocessed": "儲存預處理", - "Controlnet input directory": "Controlnet輸入目錄", - "Upload independent control image": "Upload independent control image", - "Directory for detected maps auto saving": "檢測圖的自動儲存目錄", - "Extra path to scan for ControlNet models (e.g. training output directory)": "掃描 ControlNet 模型的額外路徑(例如訓練輸出目錄)", - "Path to directory containing annotator model directories (requires restart, overrides corresponding command line flag)": "包含預處理器模型的路徑(需要重新啟動,取代命令行設置)", - "Multi-ControlNet: ControlNet unit number (requires restart)": "Multi-ControlNet: ControlNet unit number (requires restart)", - "Model cache size (requires restart)": "模型緩存大小(需要儲存設定並重新啟動)", - "ControlNet inpainting Gaussian blur sigma": "ControlNet inpainting Gaussian blur sigma", - "Do not apply ControlNet during highres fix": "Do not apply ControlNet during highres fix", - "Do not append detectmap to output": "不要將檢測圖附加到輸出目錄", - "Allow detectmap auto saving": "允許檢測圖自動儲存", - "Allow other script to control this extension": "允許其他指令碼控制此擴充功能", - "Paste ControlNet parameters in infotext": "Paste ControlNet parameters in infotext", - "Show batch images in gradio gallery output": "Show batch images in gradio gallery output", - "Increment seed after each controlnet batch iteration": "在每次 controlnet 批處理迭代後增大種子", - "Disable control type selection": "Disable control type selection", - "Disable openpose edit": "Disable openpose edit", - "Ignore mask on ControlNet input image if control type is not inpaint": "Ignore mask on ControlNet input image if control type is not inpaint", - "https://github.com/Mikubill/sd-webui-controlnet.git": "https://github.com/Mikubill/sd-webui-controlnet.git", - "Leave empty to use img2img batch controlnet input directory": "留空使用img2img批處理controlnet輸入目錄", - "[IP-Adapter]": "[IP-Adapter]", - "Preprocessor Resolution": "Preprocessor Resolution", - "Noise Augmentation": "Noise Augmentation", - "MLSD Distance Threshold": "MLSD Distance Threshold", - "Latent Mirror mode": "鏡像潛在變數模式", - "Alternate Steps": "交替疊代", - "Blend Average": "平均混合", - "Latent Mirror style": "潛在變數鏡像樣式", - "Vertical Mirroring": "垂直鏡像", - "Horizontal Mirroring": "水平鏡像", - "Horizontal+Vertical Mirroring": "垂直+水平鏡像", - "90 Degree Rotation": "90 度旋轉", - "180 Degree Rotation": "180 度旋轉", - "Roll Channels": "三原色頻道輪替", - "X panning": "沿 X 軸滾動", - "Y panning": "沿 Y 軸滾動", - "Maximum steps fraction to mirror at": "鏡像干涉止步於總疊代步數的", - "SD-latent-mirroring": "SD-latent-mirroring", - "https://github.com/dfaker/SD-latent-mirroring.git": "https://github.com/dfaker/SD-latent-mirroring.git", - "Tokenizer": "標記解析器", - "Before your text is sent to the neural network, it gets turned into numbers in a process called tokenization. These tokens are how the neural network reads and interprets text. Thanks to our great friends at Shousetsu愛 for inspiration for this feature.": "在你的文本被發送到神經網路之前,它在一個稱為標記化的過程中被轉化為數字。這些標記是神經網路閱讀和解釋文本的方式。感謝我們的好朋友 Shousetsu愛 為這個功能帶來的靈感", - "Text input": "文本輸入", - "ID input": "ID 輸入", - "Tokenize": "標記拆分", - "Tokens": "標記", - "stable-diffusion-webui-tokenizer": "stable-diffusion-webui-tokenizer", - "Prompt for tokenization": "給標記化準備的提示詞", - "Ids for tokenization (example: 9061, 631, 736)": "用於標記化的 ID(例:9061,631,736)", - "https://github.com/AUTOMATIC1111/stable-diffusion-webui-tokenizer.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-tokenizer.git", - "Image Browser": "圖庫瀏覽器", - "txt2img-grids": "文生圖網格", - "img2img-grids": "img2img-網格", - "Favorites": "收藏夾", - "Others": "其他", - "Favorites path from settings: log/images": "Favorites path from settings: log/images", - "Images directory": "圖像目錄", - "Sub directory depth": "子目錄深度", - "Add to / replace in saved directories": "加入至或取代已儲存的目錄", - "Saved directories": "已儲存的目錄", - "Remove from saved directories": "從已儲存的目錄移除", - "Sub directories": "子目錄", - "Nothing selected": "未選取", - "Get sub directories": "讀取子目錄", - "Maintenance": "維護", - "⚠ Caution: You should only use these options if you know what you are doing. ⚠": "⚠ 警告:只有您知道在做什麼時才使用這些選項。⚠", - "Status:": "Status:", - "Last message": "Last message", - "Rebuild exif cache": "Rebuild exif cache", - "Delete 0-entries from exif cache": "Delete 0-entries from exif cache", - "Update directory names in database": "Update directory names in database", - "From (full path)": "From (full path)", - "to (full path)": "to (full path)", - "Reapply ranking after moving files": "Reapply ranking after moving files", - "Dropdown": "下拉式清單", - "First Page": "首頁", - "Prev Page": "上一頁", - "Page Index": "頁數", - "Next Page": "下一頁", - "End Page": "尾頁", - "ranking": "評等", - "Next Image After Ranking (To be implemented)": "Next Image After Ranking (To be implemented)", - "delete next": "刪除後 N 張", - "also delete off-screen images": "also delete off-screen images", - "Additional Generation Info": "Additional Generation Info", - "sort by": "排序方式", - "Sort by": "Sort by", - "path name": "路徑名", - "date": "日期", - "aesthetic_score": "美學分數", - "cfg scale": "提示詞相關性", - "steps": "疊代步數", - "size": "尺寸", - "model": "模型", - "model hash": "模型雜湊值", - "filename keyword": "檔名關鍵字", - "Filename keyword search": "Filename keyword search", - "exif keyword": "exif 關鍵字", - "EXIF keyword search": "EXIF keyword search", - "Search negative prompt": "搜尋反向提示詞", - "No": "No", - "Yes": "是", - "Only": "只有", - "case sensitive": "case sensitive", - "regex - e.g. ^(?!.*Hires).*$": "regex - e.g. ^(?!.*Hires).*$", - "ranking filter": "以評等篩選", - "Ranking filter": "Ranking filter", - "minimum aesthetic_score": "最小美學分數", - "Minimum aesthetic_score": "Minimum aesthetic_score", - "Maximum aesthetic_score": "Maximum aesthetic_score", - "Generate Info": "產生資訊", - "Generation Info": "Generation Info", - "File Name": "檔案名", - "Move to favorites": "移動到收藏夾", - "Send to txt2img ControlNet": "Send to txt2img ControlNet", - "Send to img2img ControlNet": "Send to img2img ControlNet", - "ControlNet number": "ControlNet number", - "Directory path": "目錄路徑", - "Move to directory": "移動到目錄", - "Renew Page": "刷新頁面", - "set_index": "設定索引", - "load_switch": "載入開關", - "to_dir_load_switch": "to_dir_load_switch", - "turn_page_switch": "翻頁開關", - "List of active tabs (separated by commas). Available options are txt2img, img2img, txt2img-grids, img2img-grids, Extras, Favorites, Others. Custom folders are also supported by specifying their path.": "List of active tabs (separated by commas). Available options are txt2img, img2img, txt2img-grids, img2img-grids, Extras, Favorites, Others. Custom folders are also supported by specifying their path.", - "Select components to hide": "Select components to hide", - "Include images in sub directories": "包含子目錄的圖像", - "Preload images at startup": "在啟動時預加載圖像", - "Move buttons copy instead of move": "將移動按鈕以複製取代", - "Print image deletion messages to the console": "將圖像刪除訊息打印到控制台", - "Move/Copy/Delete matching .txt files": "Move/Copy/Delete matching .txt files", - "Print warning logs to the console": "在控制台顯示警告訊息", - "Print debug logs to the console": "在控制台顯示除錯訊息。", - "Use recycle bin when deleting images": "刪除圖像時丟入資源回收桶", - "Scan Exif-/.txt-data (initially slower, but required for many features to work)": "Scan Exif-/.txt-data (initially slower, but required for many features to work)", - "Scan Exif-/.txt-data (slower, but required for exif-keyword-search)": "Scan Exif-/.txt-data (slower, but required for exif-keyword-search)", - "Change CTRL keybindings to SHIFT": "將 Ctrl 改成 Shift 按鍵", - "or to CTRL+SHIFT": "或是 Ctrl + Shift", - "Enable Maintenance tab": "Enable Maintenance tab", - "Save ranking in image's pnginfo": "Save ranking in image's pnginfo", - "Number of columns on the page": "每頁列數", - "Number of rows on the page": "每頁行數", - "Minimum number of pages per load": "每次載入的最小頁數", - "stable-diffusion-webui-images-browser": "stable-diffusion-webui-images-browser", - "https://github.com/AlUlkesh/stable-diffusion-webui-images-browser.git": "https://github.com/AlUlkesh/stable-diffusion-webui-images-browser.git", - "Input images directory": "輸入圖像目錄", - "Smart Preprocess": "智慧預處理", - "sd_smartprocess": "sd_smartprocess", - "Rename images": "重新命名圖像", - "Cropping": "裁切", - "Output Size": "輸出尺寸", - "Pad Images": "Pad Images", - "Crop Images": "裁切圖像", - "Captions": "描述", - "Generate Captions": "產生描述", - "Max Caption Length (0=unlimited)": "最大描述長度(設 0 為無限制)", - "Existing Caption Action": "既有描述處理", - "Add CLIP results to Caption": "將 CLIP 結果加入描述", - "Number of CLIP beams": "Number of CLIP beams", - "CLIP Minimum length": "最小 CLIP 長度", - "CLIP Maximum length": "最小 CLIP 長度", - "Use v2 CLIP Model": "使用 v2 CLIP 模型", - "Append Flavor tags from CLIP": "Append Flavor tags from CLIP", - "Max flavors to append.": "Max flavors to append.", - "Append Medium tags from CLIP": "Append Medium tags from CLIP", - "Append Movement tags from CLIP": "Append Movement tags from CLIP", - "Append Artist tags from CLIP": "Append Artist tags from CLIP", - "Append Trending tags from CLIP": "Append Trending tags from CLIP", - "Add WD14 Tags to Caption": "Add WD14 Tags to Caption", - "Minimum Score for WD14 Tags": "Minimum Score for WD14 Tags", - "Minimum Score for DeepDanbooru Tags": "Minimum Score for DeepDanbooru Tags", - "Tags To Ignore": "要忽略的標記", - "Replace Class with Subject in Caption": "在描述中將類別取代為主題", - "Subject Class": "主題類別", - "Subject class to crop (leave blank to auto-detect)": "要裁切的主題類別(留空以自動偵測)", - "Subject Name": "主題名稱", - "Subject Name to replace class with in captions": "要在描述中將類別取代的主題名稱", - "Post-Processing": "後處理", - "Face Restore Model": "面部修復模型", - "Upscale and Resize": "放大並縮放比例", - "https://github.com/d8ahazard/sd_smartprocess.git": "https://github.com/d8ahazard/sd_smartprocess.git", - "Asymmetric tiling": "Asymmetric tiling", - "asymmetric-tiling-sd-webui": "asymmetric-tiling-sd-webui", - "https://github.com/tjm35/asymmetric-tiling-sd-webui.git": "https://github.com/tjm35/asymmetric-tiling-sd-webui.git", - "Active": "Active", - "Tile X": "Tile X", - "Tile Y": "Tile Y", - "Start tiling from step N": "Start tiling from step N", - "Stop tiling after step N (-1: Don't stop)": "Stop tiling after step N (-1: Don't stop)", - "Return mask": "Return mask", - "Alpha matting": "Alpha matting", + "the wiki for usage tips.": "the wiki for usage tips.", + "Mimic CFG Scale": "Mimic CFG Scale", + "Dynamic Thresholding Advanced Options": "Dynamic Thresholding Advanced Options", + "sd-dynamic-thresholding": "sd-dynamic-thresholding", + "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding.git": "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding.git", + "Top percentile of latents to clamp": "Top percentile of latents to clamp", + "Mimic Scale Scheduler": "Mimic Scale Scheduler", + "Constant": "Constant", + "Minimum value of the Mimic Scale Scheduler": "Minimum value of the Mimic Scale Scheduler", + "CFG Scale Scheduler": "CFG Scale Scheduler", + "Minimum value of the CFG Scale Scheduler": "Minimum value of the CFG Scale Scheduler", + "Power Scheduler Value": "Power Scheduler Value", + "Linear Down": "Linear Down", + "Cosine Down": "Cosine Down", + "Half Cosine Down": "Half Cosine Down", + "Linear Up": "Linear Up", + "Cosine Up": "Cosine Up", + "Half Cosine Up": "Half Cosine Up", + "Power Up": "Power Up", + "Power Down": "Power Down", + "Maximum width or height (whichever is higher)": "最大寬度或高度(無論哪個較高)", + "Scale to maximum width or height": "放大至最大寬度或高度", + "-75%": "-75%", + "-50%": "-50%", + "-25%": "-25%", + "+25%": "+25%", + "+50%": "+50%", + "+75%": "+75%", + "+100%": "+100%", + "Expand by default": "預設展開擴充功能", + "Show maximum width or height button": "顯示最大寬度或高度按鈕", + "Maximum width or height default": "最大寬度或高度的預設值", + "Show predefined percentage buttons": "顯示預設的百分比按鈕", + "Predefined percentage buttons, applied to dimensions (75, 125, 150)": "預設的百分比按鈕,套用於尺寸(75,125,150)", + "Predefined percentage display format": "預設百分比顯示格式", + "sd-webui-aspect-ratio-helper": "sd-webui-aspect-ratio-helper", + "https://github.com/thomasasfk/sd-webui-aspect-ratio-helper.git": "https://github.com/thomasasfk/sd-webui-aspect-ratio-helper.git", + "Plot": "圖表", + "a1111-stable-diffusion-webui-vram-estimator": "a1111-stable-diffusion-webui-vram-estimator", + "https://github.com/space-nuko/a1111-stable-diffusion-webui-vram-estimator.git": "https://github.com/space-nuko/a1111-stable-diffusion-webui-vram-estimator.git", + "Note:": "Note:", + "json input path (Optional, only for append results)": "json input path (Optional, only for append results)", + "Overwrite if output file exists": "Overwrite if output file exists", + "Save metadata image key as fullpath": "Save metadata image key as fullpath", + "Dataset Filter": "Dataset Filter", + "Filter Apply": "套用過濾器", + "hidden_idx_next": "hidden_idx_next", + "hidden_idx_prev": "hidden_idx_prev", + "(INCLUSIVE)": "(INCLUSIVE)", + "(EXCLUSIVE)": "(EXCLUSIVE)", + "Number": "數量", + "Select Tags": "Select Tags", + "hidden_s_or_n": "hidden_s_or_n", + "BLIP": "BLIP", + "wd-v1-4-vit-tagger": "wd-v1-4-vit-tagger", + "wd-v1-4-convnext-tagger": "wd-v1-4-convnext-tagger", + "wd-v1-4-vit-tagger-v2": "wd-v1-4-vit-tagger-v2", + "wd-v1-4-convnext-tagger-v2": "wd-v1-4-convnext-tagger-v2", + "wd-v1-4-swinv2-tagger-v2": "wd-v1-4-swinv2-tagger-v2", + "Moved or deleted images will be unloaded.": "Moved or deleted images will be unloaded.", + "Target dataset num: 0": "Target dataset num: 0", + "Number of columns on image gallery": "Number of columns on image gallery", + "stable-diffusion-webui-dataset-tag-editor": "stable-diffusion-webui-dataset-tag-editor", + "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor": "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor", + "C:\\path\\to\\metadata.json": "C:\\path\\to\\metadata.json", + "C:\\directory\\of\\datasets": "C:\\directory\\of\\datasets", + ".txt (on Load and Save)": ".txt (on Load and Save)", "Erode size": "Erode size", "Foreground threshold": "Foreground threshold", "Background threshold": "Background threshold", @@ -3856,332 +8522,145 @@ "u2net_human_seg": "u2net_human_seg", "u2net_cloth_seg": "u2net_cloth_seg", "silueta": "silueta", - "weight_gradient": "weight_gradient", - "https://github.com/DingoBite/weight_gradient": "https://github.com/DingoBite/weight_gradient", - "8aedef42 (Sun May 14 20:30:57 2023)": "8aedef42 (Sun May 14 20:30:57 2023)", - "Log in console": "Log in console", - "FigureBracesEXIF": "FigureBracesEXIF", - "Documentation": "Documentation", - "Info": "資訊", - "Modes Hint": "Modes Hint", - "Form": "Form", - "Required. Tokens": "Required. Tokens", - "Required. Weight start to end range": "Required. Weight start to end range", - "Steps where weight changes": "Steps where weight changes", - "Weight move to return weight": "Weight move to return weight", - "Gradient mode (e, ei, eo, c, ci, co)": "Gradient mode (e, ei, eo, c, ci, co)", - "Linear decreasing from 1 to 0 in 20% of steps (10% - 30%)": "Linear decreasing from 1 to 0 in 20% of steps (10% - 30%)", - "Exponencial decreasing from 1 to 0 in 8 steps then increasing from 0 to 1 in 7 steps": "Exponencial decreasing from 1 to 0 in 8 steps then increasing from 0 to 1 in 7 steps", - "Circle increasing from 0 to 1 at every step": "Circle increasing from 0 to 1 at every step", - "Circle": "Circle", - "CircleIn": "CircleIn", - "CircleOut": "CircleOut", - "Exponential": "Exponential", - "ExponentialIn": "ExponentialIn", - "ExponentialOut": "ExponentialOut", - "Click to view actions": "Click to view actions", - "Save as SVG": "Save as SVG", - "Save as PNG": "Save as PNG", - "View Source": "View Source", - "View Compiled Vega": "View Compiled Vega", - "Open in Vega Editor": "Open in Vega Editor", - "Create inspiration images": "建立靈感圖像", - "Artist or styles name list. '.txt' files with one name per line": "Artist or styles name list. '.txt' files with one name per line", - "Prompt Placeholder, which can be used at the top of prompt input": "Prompt Placeholder, which can be used at the top of prompt input", - "To activate inspiration function, you need get \"inspiration\" images first.": "To activate inspiration function, you need get \"inspiration\" images first.", - "You can create these images by run \"Create inspiration images\" script in txt2img page,": "You can create these images by run \"Create inspiration images\" script in txt2img page,", - "you can get the artists or art styles list from here": "you can get the artists or art styles list from here", - "download these files, and select these files in the \"Create inspiration images\" script UI": "download these files, and select these files in the \"Create inspiration images\" script UI", - "There about 6000 artists and art styles in these files.": "There about 6000 artists and art styles in these files.", - "This takes server hours depending on your GPU type and how many pictures you generate for each artist/style": "This takes server hours depending on your GPU type and how many pictures you generate for each artist/style", - "I suggest at least four images for each": "I suggest at least four images for each", - "You can also download generated pictures from here:": "You can also download generated pictures from here:", - "unzip the file to": "unzip the file to", - "/extections/stable-diffusion-webui-inspiration": "/extections/stable-diffusion-webui-inspiration", - "and restart webui, and enjoy the joy of creation!": "and restart webui, and enjoy the joy of creation!", - "Checkbox Group": "Checkbox Group", - "flavors": "flavors", - "mediums": "mediums", - "movements": "movements", - "Exclude abandoned": "Exclude abandoned", - "Abandoned": "Abandoned", - "Key word": "Key word", - "Get inspiration": "Get inspiration", - "to txt2img": "to txt2img", - "to img2img": "to img2img", - "Collect": "Collect", - "Don't show again": "Don't show again", - "Maximum number of samples, used to determine which folders to skip when continue running the create script": "Maximum number of samples, used to determine which folders to skip when continue running the create script", - "stable-diffusion-webui-inspiration": "stable-diffusion-webui-inspiration", - "https://github.com/yfszzx/stable-diffusion-webui-inspiration.git": "https://github.com/yfszzx/stable-diffusion-webui-inspiration.git", - "Additional Networks": "附加網路(LoRA擴充功能)", - "Separate UNet/Text Encoder weights": "單獨設定 UNet 及文字編碼器的權重", - "Network module 1": "附加網路類型 1️⃣", - "LoRA": "LoRA", - "Model 1": "模型 1️⃣", - "Weight 1": "權重 1️⃣", - "UNet Weight 1": "UNet 權重 1️⃣", - "TEnc Weight 1": "文字編碼器權重 1️⃣", - "Network module 2": "附加網路類型 2️⃣", - "Model 2": "模型 2️⃣", - "Weight 2": "權重 2️⃣", - "UNet Weight 2": "UNet 權重 2️⃣", - "TEnc Weight 2": "文字編碼器權重 2️⃣", - "Network module 3": "附加網路類型 3️⃣", - "Model 3": "模型 3️⃣", - "Weight 3": "權重 3️⃣", - "UNet Weight 3": "UNet 權重 3️⃣", - "TEnc Weight 3": "文字編碼器權重 3️⃣", - "Network module 4": "附加網路類型 4️⃣", - "Model 4": "模型 4️⃣", - "Weight 4": "權重 4️⃣", - "UNet Weight 4": "UNet 權重 4️⃣", - "TEnc Weight 4": "文字編碼器權重 4️⃣", - "Network module 5": "附加網路類型 5️⃣", - "Model 5": "模型 5️⃣", - "Weight 5": "權重 5️⃣", - "UNet Weight 5": "UNet 權重 5️⃣", - "TEnc Weight 5": "文字編碼器權重 5️⃣", - "Extra args": "額外參數", - "mask image:": "遮罩圖像:", - "Refresh models": "重新整理模型列表", - "AddNet Model 1": "[附加網絡] 模型 1️⃣", - "AddNet Weight 1": "[附加網路] 權重 1️⃣", - "AddNet UNet Weight 1": "[附加網路] UNet 權重 1️⃣", - "AddNet TEnc Weight 1": "[附加網路] 文字編碼器權重 1️⃣", - "AddNet Model 2": "[附加網路] 模型 2️⃣", - "AddNet Weight 2": "[附加網路] 權重 2️⃣", - "AddNet UNet Weight 2": "[附加網路] UNet 權重 2️⃣", - "AddNet TEnc Weight 2": "[附加網路] 文字編碼器權重 2️⃣", - "AddNet Model 3": "[附加網路] 模型 3️⃣", - "AddNet Weight 3": "[附加網路] 權重 3️⃣", - "AddNet UNet Weight 3": "[附加網路] UNet 權重 3️⃣", - "AddNet TEnc Weight 3": "[附加網路] 文字編碼器權重 3️⃣", - "AddNet Model 4": "[附加網路] 模型 4️⃣", - "AddNet Weight 4": "[附加網路] 權重 4️⃣", - "AddNet UNet Weight 4": "[附加網路] UNet 權重 4️⃣", - "AddNet TEnc Weight 4": "[附加網路] 文字編碼器權重 4️⃣", - "AddNet Model 5": "[附加網路] 模型 5️⃣", - "AddNet Weight 5": "[附加網路] 權重 5️⃣", - "AddNet UNet Weight 5": "[附加網路] UNet 權重 5️⃣", - "AddNet TEnc Weight 5": "[附加網路] 文字編碼器權重 5️⃣", - "Model path filter": "模型路徑過濾器", - "Filter models by path name": "模型列表將僅顯示此路徑下的模型", - "Network module": "附加網路類型", - "Model hash": "模型雜湊值", - "Legacy hash": "舊雜湊值", - "Model path": "模型路徑", - "Send to txt2img:": ">> 文生圖\n(數字對應模型號碼)", - "Send to img2img:": ">> 圖生圖\n(數字對應模型號碼)", - "Copy metadata to other models in directory": "複製中繼資料至其他目錄中的模型", - "Containing directory": "目標模型目錄", - "All models in this directory will receive the selected model's metadata": "此目錄下的所有模型將被寫入所選模型的中繼資料", - "Only copy to models with same session ID": "僅複製到具有相同作業階段 ID 的模型", - "Only copy to models with no metadata": "僅複製到沒有中繼資料的模型(不覆蓋原中繼資料)", - "Copy Metadata": "複製中繼資料", - "Display name for this model": "此模型的顯示名稱", - "Author": "作者", - "Author of this model": "此模型的作者", - "Keywords": "觸發提示詞", - "Activation keywords, comma-separated": "觸發提示詞,以逗號分隔", - "Model description/readme/notes/instructions": "模型的描述資訊", - "Source URL where this model could be found": "模型的發布網址", - "Rating": "評分", - "Comma-separated list of tags (\"artist, style, character, 2d, 3d...\")": "此模型的標記列表(\"artist, style, character, 2d, 3d...\")", - "Editing Enabled": "啟用中繼資料編輯", - "Save Metadata": "儲存中繼資料", - "Cover image": "封面圖像", - "Image Parameters": "圖像參數", - "Training info": "訓練資訊", - "Most frequent tags in captions": "訓練用描述最常用的標記", - "Dataset folder structure": "資料集資料夾結構", - "Image Count": "圖像數", - "Repeats": "重複", - "Total Images": "圖像總數", - "Training parameters": "訓練參數", - "copy to clipboard": "複製到剪貼簿", - "Extra paths to scan for LoRA models, comma-separated. Paths containing commas must be enclosed in double quotes. In the path, \" (one quote) must be replaced by \"\" (two quotes).": "掃描 LoRA模型的額外目錄,以逗號分隔。包含逗號的路徑必須用雙引號括起來。在路徑中,一個引號「\"」必須替換為「\"\"」兩個引號。", - "Sort LoRA models by": "LoRA 模型的排序方式", - "name": "名稱", - "rating": "評分", - "has user metadata": "有使用者中繼資料", - "Reverse model sort order": "反向排序", - "LoRA model name filter": "LoRA 模型名稱過濾器", - "Metadata to show in XY-Grid label for Model axes, comma-separated (example: \"ss_learning_rate, ss_num_epochs\")": "顯示於 X/Y 圖表的中繼資料,以逗號分隔(例如:\"ss_learning_rate, ss_num_epochs\")", - "# of threads to use for hash calculation (increase if using an SSD)": "用於雜湊值計算的線程數(如果使用 SSD 可適量增加)", - "Make a backup copy of the model being edited when saving its metadata.": "儲存中繼資料時,備份正在編輯的模型", - "Only show .safetensors format models": "僅顯示 .safetensors 檔案格式的模型", - "Only show models that have/don't have user-added metadata": "僅顯示(有 / 無)使用者中繼資料", - "has metadata": "有中繼資料", - "missing metadata": "缺少中繼資料", - "Max number of top tags to show": "最多顯示幾個常用標記", - "Max number of dataset folders to show": "最多顯示幾個數據集資料夾", - "sd-webui-additional-networks": "sd-webui-additional-networks", - "https://github.com/kohya-ss/sd-webui-additional-networks.git": "https://github.com/kohya-ss/sd-webui-additional-networks.git", - "This extension works well with text captions in comma-separated style (such as the tags generated by DeepBooru interrogator).": "This extension works well with text captions in comma-separated style (such as the tags generated by DeepBooru interrogator).", - "Save all changes": "Save all changes", - "Backup original text file (original file will be renamed like filename.000, .001, .002, ...)": "Backup original text file (original file will be renamed like filename.000, .001, .002, ...)", - "Note:": "Note:", - "New text file will be created if you are using filename as captions.": "New text file will be created if you are using filename as captions.", - "Use kohya-ss's finetuning metadata json": "Use kohya-ss's finetuning metadata json", - "json path": "json path", - "json input path (Optional, only for append results)": "json input path (Optional, only for append results)", - "Overwrite if output file exists": "Overwrite if output file exists", - "Save metadata as caption": "Save metadata as caption", - "Save metadata image key as fullpath": "Save metadata image key as fullpath", - "Results": "Results", - "Reload/Save Settings (config.json)": "Reload/Save Settings (config.json)", - "Reload settings": "Reload settings", - "Save current settings": "Save current settings", - "Restore settings to default": "Restore settings to default", - "Caption File Ext": "Caption File Ext", - "Load": "載入", - "Unload": "Unload", - "Dataset Load Settings": "Dataset Load Settings", - "Load from subdirectories": "Load from subdirectories", - "Load caption from filename if no text file exists": "Load caption from filename if no text file exists", - "Replace new-line character with comma": "Replace new-line character with comma", - "Use Interrogator Caption": "Use Interrogator Caption", - "If Empty": "If Empty", - "Overwrite": "Overwrite", - "Prepend": "Prepend", - "Append": "Append", - "Interrogators": "Interrogators", - "Interrogator Settings": "Interrogator Settings", - "Use Custom Threshold (Booru)": "Use Custom Threshold (Booru)", - "Booru Score Threshold": "Booru Score Threshold", - "Use Custom Threshold (WDv1.4 Tagger)": "Use Custom Threshold (WDv1.4 Tagger)", - "WDv1.4 Tagger Score Threshold": "WDv1.4 Tagger Score Threshold", - "Dataset Filter": "Dataset Filter", - "Filter Apply": "套用過濾器", - "hidden_idx_next": "hidden_idx_next", - "hidden_idx_prev": "hidden_idx_prev", - "Dataset Images": "Dataset Images", - "Filter by Tags": "Filter by Tags", - "Filter by Selection": "Filter by Selection", - "Batch Edit Captions": "Batch Edit Captions", - "Edit Caption of Selected Image": "Edit Caption of Selected Image", - "Move or Delete Files": "Move or Delete Files", - "Clear tag filters": "Clear tag filters", - "Clear ALL filters": "Clear ALL filters", - "Positive Filter": "Positive Filter", - "Negative Filter": "Negative Filter", - "Search tags / Filter images by tags": "Search tags / Filter images by tags", - "(INCLUSIVE)": "(INCLUSIVE)", - "Search Tags": "Search Tags", - "Prefix": "Prefix", - "Suffix": "Suffix", - "Use regex": "Use regex", - "Alphabetical Order": "Alphabetical Order", - "Frequency": "Frequency", - "Length": "Length", - "Token Length": "Token Length", - "Sort Order": "Sort Order", - "Ascending": "Ascending", - "Descending": "Descending", - "Filter Logic": "Filter Logic", - "OR": "OR", - "Filter Images by Tags": "Filter Images by Tags", - "(EXCLUSIVE)": "(EXCLUSIVE)", - "Select images from the left gallery.": "Select images from the left gallery.", - "Add selection [Enter]": "Add selection [Enter]", - "Add ALL Displayed": "Add ALL Displayed", - "Filter Images": "Filter Images", - "Selected Image :": "Selected Image :", - "Remove selection [Delete]": "Remove selection [Delete]", - "Invert selection": "Invert selection", - "Clear selection": "Clear selection", - "Apply selection filter": "Apply selection filter", - "Search and Replace": "Search and Replace", - "Edit common tags.": "Edit common tags.", - "Show only the tags selected in the Positive Filter": "Show only the tags selected in the Positive Filter", - "Common Tags": "Common Tags", - "Edit Tags": "Edit Tags", - "Prepend additional tags": "Prepend additional tags", - "Apply changes to filtered images": "Apply changes to filtered images", - "Show description of how to edit tags": "Show description of how to edit tags", - "1. The tags common to all displayed images are shown in comma separated style.": "1. The tags common to all displayed images are shown in comma separated style.", - "2. When changes are applied, all tags in each displayed images are replaced.": "2. When changes are applied, all tags in each displayed images are replaced.", - "3. If you change some tags into blank, they will be erased.": "3. If you change some tags into blank, they will be erased.", - "4. If you add some tags to the end, they will be added to the end/beginning of the text file.": "4. If you add some tags to the end, they will be added to the end/beginning of the text file.", - "5. Changes are not applied to the text files until the \"Save all changes\" button is pressed.": "5. Changes are not applied to the text files until the \"Save all changes\" button is pressed.", - "ex A.": "ex A.", - "Original Text = \"A, A, B, C\" Common Tags = \"B, A\" Edit Tags = \"X, Y\"": "Original Text = \"A, A, B, C\" Common Tags = \"B, A\" Edit Tags = \"X, Y\"", - "Result = \"Y, Y, X, C\" (B->X, A->Y)": "Result = \"Y, Y, X, C\" (B->X, A->Y)", - "ex B.": "ex B.", - "Original Text = \"A, B, C\" Common Tags = \"(nothing)\" Edit Tags = \"X, Y\"": "Original Text = \"A, B, C\" Common Tags = \"(nothing)\" Edit Tags = \"X, Y\"", - "Result = \"A, B, C, X, Y\" (add X and Y to the end (default))": "Result = \"A, B, C, X, Y\" (add X and Y to the end (default))", - "Result = \"X, Y, A, B, C\" (add X and Y to the beginning (\"Prepend additional tags\" checked))": "Result = \"X, Y, A, B, C\" (add X and Y to the beginning (\"Prepend additional tags\" checked))", - "ex C.": "ex C.", - "Original Text = \"A, B, C, D, E\" Common Tags = \"A, B, D\" Edit Tags = \", X, \"": "Original Text = \"A, B, C, D, E\" Common Tags = \"A, B, D\" Edit Tags = \", X, \"", - "Result = \"X, C, E\" (A->\"\", B->X, D->\"\")": "Result = \"X, C, E\" (A->\"\", B->X, D->\"\")", - "Search and Replace for all images displayed.": "Search and Replace for all images displayed.", - "Search Text": "Search Text", - "Replace Text": "Replace Text", - "Search and Replace in": "Search and Replace in", - "Only Selected Tags": "Only Selected Tags", - "Each Tags": "Each Tags", - "Entire Caption": "Entire Caption", - "Selected Tags": "Selected Tags", - "duplicate": "duplicate", - "tags from the images displayed.": "tags from the images displayed.", - "Remove duplicate tags": "Remove duplicate tags", - "selected": "selected", - "Remove selected tags": "Remove selected tags", - "Select visible tags": "Select visible tags", - "Deselect visible tags": "Deselect visible tags", - "Select Tags": "Select Tags", - "Sort tags in the images displayed.": "Sort tags in the images displayed.", - "Sort tags": "Sort tags", - "Truncate tags by token count.": "Truncate tags by token count.", - "Truncate tags by token count": "Truncate tags by token count", - "hidden_s_or_n": "hidden_s_or_n", - "Read Caption from Selected Image": "Read Caption from Selected Image", - "Interrogate Selected Image": "Interrogate Selected Image", - "Caption of Selected Image": "Caption of Selected Image", - "Copy and Overwrite": "Copy and Overwrite", - "BLIP": "BLIP", - "wd-v1-4-vit-tagger": "wd-v1-4-vit-tagger", - "wd-v1-4-convnext-tagger": "wd-v1-4-convnext-tagger", - "wd-v1-4-vit-tagger-v2": "wd-v1-4-vit-tagger-v2", - "wd-v1-4-convnext-tagger-v2": "wd-v1-4-convnext-tagger-v2", - "wd-v1-4-swinv2-tagger-v2": "wd-v1-4-swinv2-tagger-v2", - "Interrogate Result": "Interrogate Result", - "Copy caption from selected images automatically": "Copy caption from selected images automatically", - "Sort caption on save": "Sort caption on save", - "Warn if changes in caption is not saved": "Warn if changes in caption is not saved", - "Edit Caption": "Edit Caption", - "Apply changes to selected image": "Apply changes to selected image", - "Apply changes to ALL displayed images": "Apply changes to ALL displayed images", - "Changes are not applied to the text files until the \"Save all changes\" button is pressed.": "Changes are not applied to the text files until the \"Save all changes\" button is pressed.", - "Moved or deleted images will be unloaded.": "Moved or deleted images will be unloaded.", - "Move or Delete": "Move or Delete", - "Selected One": "Selected One", - "All Displayed Ones": "All Displayed Ones", - "Image File": "Image File", - "Caption Text File": "Caption Text File", - "Caption Backup File": "Caption Backup File", - "Target dataset num: 0": "Target dataset num: 0", - "Destination Directory": "Destination Directory", - "Move File(s)": "Move File(s)", - "DELETE cannot be undone. The files will be deleted completely.": "DELETE cannot be undone. The files will be deleted completely.", - "DELETE File(s)": "DELETE File(s)", - "Number of columns on image gallery": "Number of columns on image gallery", - "Force image gallery to use temporary files": "Force image gallery to use temporary files", - "Use raw CLIP token to calculate token count (without emphasis or embeddings)": "Use raw CLIP token to calculate token count (without emphasis or embeddings)", - "stable-diffusion-webui-dataset-tag-editor": "stable-diffusion-webui-dataset-tag-editor", - "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor": "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor", - "C:\\path\\to\\metadata.json": "C:\\path\\to\\metadata.json", - "C:\\directory\\of\\datasets": "C:\\directory\\of\\datasets", - ".txt (on Load and Save)": ".txt (on Load and Save)", - "txt": "txt", - "sd-discord-rich_presence": "sd-discord-rich_presence", - "https://github.com/davehornik/sd-discord-rich_presence": "https://github.com/davehornik/sd-discord-rich_presence", - "b675bf93 (Sun Apr 30 00:56:51 2023)": "b675bf93 (Sun Apr 30 00:56:51 2023)", - "Unprompted Seed": "Unprompted Seed", + "Model Pre​views": "模型預覽器", + "Embeddings": "嵌入", + "Filter": "過濾器", + "Model Preview XD": "模型預覽器", + "Name matching rule for preview files": "預覽檔的名稱符合規則", + "Loose": "寬鬆", + "Strict": "嚴格", + "Folder": "檔案夾", + "Limit the height of preivews to the height of the browser window (.html preview files are always limited regardless of this setting)": "將預覽的高度限制為瀏覽器視窗高度(.html 預覽檔始終受此設定的限制)", + "No Preview Found": "查無預覽檔", + "https://github.com/CurtisDS/sd-model-preview-xd.git": "https://github.com/CurtisDS/sd-model-preview-xd.git", + "Records list": "Records list", + "Reload": "Reload", + "Download All": "Download All", + "Display options": "Display options", + "Import/Export": "Import/Export", + "Back": "Back", + "Description:": "Description:", + "Name:": "Name:", + "Model title to display (Required)": "Model title to display (Required)", + "Model type:": "Model type:", + "Model type (Required)": "Model type (Required)", + "Download URL:": "Download URL:", + "Link to the model file (Required)": "Link to the model file (Required)", + "Preview image URL:": "Preview image URL:", + "Link to the image for preview (Optional)": "Link to the image for preview (Optional)", + "Model page URL:": "Model page URL:", + "Link to the model page (Optional)": "Link to the model page (Optional)", + "Groups": "Groups", + "Select existing groups or add new.": "Select existing groups or add new.", + "Add groups": "Add groups", + "Bind with existing model": "Bind with existing model", + "Download options": "Download options", + "description_output_widget": "description_output_widget", + "Record removal": "Record removal", + "Remove record": "Remove record", + "Remove files": "Remove files", + "Downloads": "Downloads", + "Start Download": "Start Download", + "Layout Type:": "Layout Type:", + "Cards": "Cards", + "Table": "Table", + "Card width (default if 0):": "Card width (default if 0):", + "Card height (default if 0):": "Card height (default if 0):", + "Storage Type:": "Storage Type:", + "SQLite": "SQLite", + "Firebase": "Firebase", + "Download Preview": "Download Preview", + "sd-model-organizer": "sd-model-organizer", + "https://github.com/alexandersokol/sd-model-organizer": "https://github.com/alexandersokol/sd-model-organizer", + "652580de (Mon May 8 11:00:16 2023)": "652580de (Mon May 8 11:00:16 2023)", + "Sort By": "Sort By", + "Downloaded first": "Downloaded first", + "Search by name": "Search by name", + "Model types": "Model types", + "Show downloaded": "Show downloaded", + "Show not downloaded": "Show not downloaded", + "Time Added": "Time Added", + "Time Added Reversed": "Time Added Reversed", + "Name Reversed": "Name Reversed", + "Hyper Network": "Hyper Network", + "LyCORIS": "LyCORIS", + "Import .json file": "Import .json file", + "Add model": "Add model", + "Download Path:": "Download Path:", + "Path to the download dir, default if empty. (Required for \"Other\" model type)": "Path to the download dir, default if empty. (Required for \"Other\" model type)", + "Download File Name:": "Download File Name:", + "Downloaded file name. Default if empty (Optional)": "Downloaded file name. Default if empty (Optional)", + "Subdir": "Subdir", + "Download file into sub directory (Optional)": "Download file into sub directory (Optional)", + "Add new group": "Add new group", + "Type comma-separated group names": "Type comma-separated group names", + "Add Group": "Add Group", + "Positive prompts:": "Positive prompts:", + "Model positive prompts (Optional)": "Model positive prompts (Optional)", + "Negative prompts:": "Negative prompts:", + "Model negative prompts (Optional)": "Model negative prompts (Optional)", + "Aesthetic": "Aesthetic", + "Waifu": "Waifu", + "Start": "Start", + "stable-diffusion-webui-cafe-aesthetic": "stable-diffusion-webui-cafe-aesthetic", + "https://github.com/p1atdev/stable-diffusion-webui-cafe-aesthetic": "https://github.com/p1atdev/stable-diffusion-webui-cafe-aesthetic", + "stable-diffusion-webui-depthmap-script": "stable-diffusion-webui-depthmap-script", + "Compute on": "計算於", + "Match input size (size is ignored when using boost)": "符合輸入大小(當使用加速器時,大小將被忽略)", + "Combine into one image.": "Combine into one image.", + "Generate Stereo side-by-side image": "Generate Stereo side-by-side image", + "Generate Stereo anaglyph image (red/cyan)": "Generate Stereo anaglyph image (red/cyan)", + "Generate 3D inpainted mesh. (Sloooow)": "Generate 3D inpainted mesh. (Sloooow)", + "Save the foreground masks": "儲存前景遮罩", + "pre-depth background removal": "pre-depth background removal", + "Rembg Model": "Rembg Model", + "Input Mesh (.ply)": "Input Mesh (.ply)", + "Generate video from inpainted mesh.": "Generate video from inpainted mesh.", + "https://github.com/thygate/stable-diffusion-webui-depthmap-script.git": "https://github.com/thygate/stable-diffusion-webui-depthmap-script.git", + "Smart Preprocess": "智慧預處理", + "sd_smartprocess": "sd_smartprocess", + "Rename images": "重新命名圖像", + "Output Size": "輸出尺寸", + "Pad Images": "Pad Images", + "Generate Captions": "產生描述", + "Max Caption Length (0=unlimited)": "最大描述長度(設 0 為無限制)", + "Existing Caption Action": "既有描述處理", + "Add CLIP results to Caption": "將 CLIP 結果加入描述", + "Number of CLIP beams": "Number of CLIP beams", + "Use v2 CLIP Model": "使用 v2 CLIP 模型", + "Max flavors to append.": "Max flavors to append.", + "Append Movement tags from CLIP": "Append Movement tags from CLIP", + "Append Trending tags from CLIP": "Append Trending tags from CLIP", + "Add WD14 Tags to Caption": "Add WD14 Tags to Caption", + "Minimum Score for WD14 Tags": "Minimum Score for WD14 Tags", + "Minimum Score for DeepDanbooru Tags": "Minimum Score for DeepDanbooru Tags", + "Tags To Ignore": "要忽略的標記", + "Replace Class with Subject in Caption": "在描述中將類別取代為主題", + "Subject Class": "主題類別", + "Subject class to crop (leave blank to auto-detect)": "要裁切的主題類別(留空以自動偵測)", + "Subject Name": "主題名稱", + "Subject Name to replace class with in captions": "要在描述中將類別取代的主題名稱", + "Upscale and Resize": "放大並縮放比例", + "https://github.com/d8ahazard/sd_smartprocess.git": "https://github.com/d8ahazard/sd_smartprocess.git", + "Ultimate SD upscale": "終極 SD 放大", + "Will upscale the image depending on the selected target size type": "將根據選擇的目標尺寸類型對圖像進行放大。", + "Target size type": "圖像尺寸類型", + "From img2img2 settings": "依照圖生圖設定", + "Scale from image size": "從圖像大小縮放", + "Custom width": "自訂寬度", + "Custom height": "自訂高度", + "Redraw options:": "重繪選項:", + "Padding": "內距", + "Seams fix:": "儲存接縫修復圖像", + "Band pass": "帶通", + "Half tile offset pass": "半圖塊偏移過濾", + "Half tile offset pass + intersections": "半圖塊偏移過濾 + 交集", + "Save options:": "輸出到 output 的圖像:", + "Upscaled": "儲存 SD 放大的圖像", + "ultimate-upscale-for-automatic1111": "ultimate-upscale-for-automatic1111", + "https://github.com/Coyote-A/ultimate-upscale-for-automatic1111.git": "https://github.com/Coyote-A/ultimate-upscale-for-automatic1111.git", "Functions": "Functions", "Select function:": "Select function:", - "Options": "Options", "Example Function": "Example Function", "Enter a subject 🡢 subject": "Enter a subject 🡢 subject", "Add fluff terms? 🡢 use_fluff": "Add fluff terms? 🡢 use_fluff", @@ -4320,7 +8799,6 @@ "repeat: Returns the content an arbitrary number of times.": "repeat: Returns the content an arbitrary number of times.", "Number of times to repeat the content 🡢 int": "Number of times to repeat the content 🡢 int", "Delimiter string between outputs 🡢 _sep": "Delimiter string between outputs 🡢 _sep", - "replace": "replace", "replace: Updates a string using the arguments for replacement logic.": "replace: Updates a string using the arguments for replacement logic.", "Arbitrary replacement arguments in old=new format 🡢 verbatim": "Arbitrary replacement arguments in old=new format 🡢 verbatim", "Original value, with advanced expression support 🡢 _from": "Original value, with advanced expression support 🡢 _from", @@ -4360,7 +8838,6 @@ "Arbitrary conditional statement(s) to test against 🡢 verbatim": "Arbitrary conditional statement(s) to test against 🡢 verbatim", "Invert evaluation such that a false condition will end the loop 🡢 _not": "Invert evaluation such that a false condition will end the loop 🡢 _not", "controlnet": "controlnet", - "controlnet: A neural network structure to control diffusion models by adding extra conditions. Check manual for setup info.": "controlnet: A neural network structure to control diffusion models by adding extra conditions. Check manual for setup info.", "Model name (do not include extension) 🡢 model": "Model name (do not include extension) 🡢 model", "Resolution of the detection map 🡢 detect_resolution": "Resolution of the detection map 🡢 detect_resolution", "Use low VRAM mode? 🡢 save_memory": "Use low VRAM mode? 🡢 save_memory", @@ -4377,9 +8854,6 @@ "file2mask: Modify or replace your img2img mask with arbitrary files.": "file2mask: Modify or replace your img2img mask with arbitrary files.", "Path to image file 🡢 str": "Path to image file 🡢 str", "Mask blend mode 🡢 mode": "Mask blend mode 🡢 mode", - "add": "add", - "subtract": "subtract", - "discard": "discard", "Show mask in output 🡢 show": "Show mask in output 🡢 show", "img2img: Runs an img2img task inside of an [after] block.": "img2img: Runs an img2img task inside of an [after] block.", "img2img_autosize": "img2img_autosize", @@ -4403,7 +8877,6 @@ "Try freeing CLIP model from memory? 🡢 free_memory": "Try freeing CLIP model from memory? 🡢 free_memory", "init_image": "init_image", "init_image: Loads an image from the given path and sets it as the initial image for use with img2img.": "init_image: Loads an image from the given path and sets it as the initial image for use with img2img.", - "Image path": "Image path", "instance2mask": "instance2mask", "instance2mask: Creates an image mask from instances of types specified by the content for use with inpainting.": "instance2mask: Creates an image mask from instances of types specified by the content for use with inpainting.", "refine": "refine", @@ -4457,527 +8930,9 @@ "Upscale height 🡢 upscale_height": "Upscale height 🡢 upscale_height", "Include original image in output window 🡢 include_original": "Include original image in output window 🡢 include_original", "Save debug images to WebUI folder 🡢 save": "Save debug images to WebUI folder 🡢 save", - "Test prompt": "測試提示詞", - "Process Text": "Process Text", "Re-process extra networks after Unprompted is finished (WIP - this is not yet functional!)": "Re-process extra networks after Unprompted is finished (WIP - this is not yet functional!)", "unprompted": "unprompted", "https://github.com/ThereforeGames/unprompted.git": "https://github.com/ThereforeGames/unprompted.git", - "Conditioning Highres": "調整高解析度", - "Conditioning Highres.fix strength (for sd-v1-5-inpainting)": "高解析度修復原圖調節強度(專為 sd-v1-5-inpainting 設計)", - "Cond.fix: Disabled (none)": "條件修復:停用(無)", - "Cond.fix: Empty": "條件修復: 無", - "Cond.fix: Lowest": "條件修復: 最小", - "Cond.fix: Low": "條件修復: 小", - "Cond.fix: Medium": "條件修復: 中", - "Cond.fix: High (recommended)": "條件修復: 高(推薦)", - "Cond.fix: Highest": "條件修復: 最高", - "Cond.fix: Full": "條件修復: 完全", - "stable-diffusion-webui-conditioning-highres-fix": "stable-diffusion-webui-conditioning-highres-fix", - "https://github.com/klimaleksus/stable-diffusion-webui-conditioning-highres-fix.git": "https://github.com/klimaleksus/stable-diffusion-webui-conditioning-highres-fix.git", - "Embedding Editor": "嵌入編輯器", - "Vector": "向量", - "Refresh Embeddings": "重新整理多個嵌入", - "Save Embedding": "儲存嵌入", - "Enter words and color hexes to mark weights on the sliders for guidance. Hint: Use the txt2img prompt token counter or": "輸入文字和顏色十六進制代碼以在滑桿上標記權重作為引導。 提示:使用文生圖提示詞標記計數器或使用", - "webui-tokenizer": "標記解析器擴充功能", - "to see which words are constructed using multiple sub-words, e.g. 'computer' doesn't exist in stable diffusion's CLIP dictionary and instead 'compu' and 'ter' are used (1 word but 2 embedding vectors). Currently buggy and needs a moment to process before pressing the button. If it doesn't work after a moment, try adding a random space to refresh it.": "查看哪些詞是使用多個子詞構成的,例如 Stable Diffusion 的 CLIP 字典中不存在 'computer',而是使用 'compu' 以及 'ter'(一個單字但使用兩個嵌入向量)。目前這個擴充功能還有點問題,在按下按鈕之前需要一點時間來處理。如果過了一段時間還是不行,試試隨便加個空格重新整理一下", - "Sampling Steps": "採樣疊代步數", - "Generate Preview": "產生預覽", - "stable-diffusion-webui-embedding-editor": "stable-diffusion-webui-embedding-editor", - "https://github.com/CodeExplode/stable-diffusion-webui-embedding-editor.git": "https://github.com/CodeExplode/stable-diffusion-webui-embedding-editor.git", - "symbol:color-hex, symbol:color-hex, ...": "文字:顏色代碼, 文字:顏色代碼, ...", - "e.g. A portrait photo of embedding_name": "示例: A portrait photo of embedding_name", - "Create aesthetic embedding": "建立美學嵌入", - "Open for Clip Aesthetic!": "打開以調整 CLIP 美學!", - "Aesthetic weight": "美學權重", - "Aesthetic steps": "美術風格疊代步數", - "Aesthetic learning rate": "美學學習率", - "Slerp interpolation": "球面線性插值角度", - "Aesthetic imgs embedding": "美學圖集嵌入", - "Aesthetic text for imgs": "該圖集的美學描述", - "Slerp angle": "球面線性插值角度", - "Is negative text": "是反向提示詞", - "Create an aesthetic embedding out of any number of images": "從任意數量的圖像中建立美學嵌入", - "Create images embedding": "建立圖集嵌入", - "stable-diffusion-webui-aesthetic-gradients": "stable-diffusion-webui-aesthetic-gradients", - "This text is used to rotate the feature space of the imgs embs": "此文本用於旋轉圖集嵌入的特徵空間", - "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients.git", - "✕": "✕", - "[NPW] Weight": "[NPW] 權重", - "stable-diffusion-NPW": "stable-diffusion-NPW", - "https://github.com/muerrilla/stable-diffusion-NPW": "https://github.com/muerrilla/stable-diffusion-NPW", - "Generate video": "Generate video", - "Outpaint": "Outpaint", - "Post proccess": "Post proccess", - "Total Outpaint Steps": "Total Outpaint Steps", - "The more it is, the longer your videos will be": "The more it is, the longer your videos will be", - "outpaint steps": "outpaint steps", - "Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic, Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2) ": "Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic, Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2) ", - "New row": "New row", - "Negative Prompt": "Negative Prompt", - "Export prompts": "Export prompts", - "Import prompts": "Import prompts", - "Clear prompts": "Clear prompts", - "Output Width": "Output Width", - "Output Height": "Output Height", - "Guidance Scale": "Guidance Scale", - "Sampling Steps for each outpaint": "Sampling Steps for each outpaint", - "custom initial image": "custom initial image", - "custom exit image": "custom exit image", - "Batch Count": "Batch Count", - "Frames per second": "每秒多少幀", - "Zoom mode": "Zoom mode", - "Zoom-out": "Zoom-out", - "Zoom-in": "Zoom-in", - "number of start frame dupe": "number of start frame dupe", - "Frames to freeze at the start of the video": "Frames to freeze at the start of the video", - "number of last frame dupe": "number of last frame dupe", - "Frames to freeze at the end of the video": "Frames to freeze at the end of the video", - "Zoom Speed": "Zoom Speed", - "Zoom speed in seconds (higher values create slower zoom)": "Zoom speed in seconds (higher values create slower zoom)", - "Denoising Strength": "Denoising Strength", - "Mask Blur": "Mask Blur", - "Inpaint Full Resolution": "Inpaint Full Resolution", - "masked padding": "masked padding", - "Enable Upscale": "Enable Upscale", - "Upscale by factor": "Upscale by factor", - "Path where to store your infinite video. Default is Outputs": "Path where to store your infinite video. Default is Outputs", - "Which subfolder name to be created in the outpath. Default is 'infinite-zooms'": "Which subfolder name to be created in the outpath. Default is 'infinite-zooms'", - "Default width of your video": "Default width of your video", - "Default height your video": "Default height your video", - "Writing videos has dependency to an existing FFPROBE executable on your machine. D/L here (https://github.com/BtbN/FFmpeg-Builds/releases) your OS variant and point to your installation path": "Writing videos has dependency to an existing FFPROBE executable on your machine. D/L here (https://github.com/BtbN/FFmpeg-Builds/releases) your OS variant and point to your installation path", - "Name of your desired model to render keyframes (txt2img)": "Name of your desired model to render keyframes (txt2img)", - "Name of your desired inpaint model (img2img-inpaint). Default is vanilla sd-v1-5-inpainting.ckpt": "Name of your desired inpaint model (img2img-inpaint). Default is vanilla sd-v1-5-inpainting.ckpt", - "Default prompt-setup to start with'": "Default prompt-setup to start with'", - "infinite-zoom-automatic1111-webui": "infinite-zoom-automatic1111-webui", - "https://github.com/v8hid/infinite-zoom-automatic1111-webui": "https://github.com/v8hid/infinite-zoom-automatic1111-webui", - "28a1a0f4 (Sat Apr 29 11:45:36 2023)": "28a1a0f4 (Sat Apr 29 11:45:36 2023)", - "Cutoff": "Cutoff", - "Target tokens (comma separated)": "Target tokens (comma separated)", - "Details": "Details", - "Disable for Negative prompt.": "Disable for Negative prompt.", - "Cutoff strongly.": "Cutoff strongly.", - "Padding token (ID or single token)": "Padding token (ID or single token)", - "Interpolation method": "Interpolation method", - "Debug log": "Debug log", - "Cutoff Enabled": "Cutoff Enabled", - "Cutoff Targets": "Cutoff Targets", - "Cutoff Weight": "Cutoff Weight", - "Cutoff Disable for Negative Prompt": "Cutoff Disable for Negative Prompt", - "Cutoff Strong": "Cutoff Strong", - "Cutoff Padding": "Cutoff Padding", - "Cutoff Interpolation": "Cutoff Interpolation", - "sd-webui-cutoff": "sd-webui-cutoff", - "https://github.com/hnmr293/sd-webui-cutoff.git": "https://github.com/hnmr293/sd-webui-cutoff.git", - "red, blue": "red, blue", - "Server start time": "Server start time", - "Memory": "Memory", - "System data": "System data", - "Platform": "Platform", - "Torch": "Torch", - "Memory optimization": "Memory optimization", - "Cross-attention": "Cross-attention", - "Libs": "Libs", - "Repos": "Repos", - "Benchmarks...": "Benchmarks...", - "Benchmark Data": "Benchmark Data", - "timestamp": "timestamp", - "performance": "performance", - "version": "version", - "system": "system", - "libraries": "libraries", - "gpu": "gpu", - "optimizations": "optimizations", - "username": "username", - "note": "note", - "hash": "雜湊值", - "Username": "Username", - "Note": "Note", - "Console logging": "Console logging", - "Perform warmup": "Perform warmup", - "Extra steps": "Extra steps", - "Benchmark level": "Benchmark level", - "quick": "quick", - "normal": "普通", - "extensive": "extensive", - "Run benchmark": "執行效能評測", - "Submit results": "Submit results", - "Link to online results": "Link to online results", - "performance is measured in iterations per second (it/s) and reported for different batch sizes (e.g. 1, 2, 4, 8, 16...)": "performance is measured in iterations per second (it/s) and reported for different batch sizes (e.g. 1, 2, 4, 8, 16...)", - "running benchmark may take a while. extensive tests may result in gpu out-of-memory conditions.": "running benchmark may take a while. extensive tests may result in gpu out-of-memory conditions.", - "Refresh bench": "Refresh bench", - "Models...": "Models...", - "Info object": "Info object", - "Refresh state": "Refresh state", - "Refresh data": "Refresh data", - "Send interrupt": "Send interrupt", - "sd-extension-system-info": "sd-extension-system-info", - "https://github.com/vladmandic/sd-extension-system-info": "https://github.com/vladmandic/sd-extension-system-info", - "70ab5cf3 (Sat Apr 29 21:35:48 2023)": "70ab5cf3 (Sat Apr 29 21:35:48 2023)", - "enter username for submission": "enter username for submission", - "enter any additional notes": "enter any additional notes", - "Models": "Models", - "Embeddings: loaded": "Embeddings: loaded", - "Embeddings: skipped": "Embeddings: skipped", - "Available LORAs": "Available LORAs", - "Save to DB": "Save to DB", - "Database Name": "Database Name", - "Collection Name": "Collection Name", - "https://github.com/takoyaro/db-storage1111.git": "https://github.com/takoyaro/db-storage1111.git", - "Maximum width or height (whichever is higher)": "最大寬度或高度(無論哪個較高)", - "Scale to maximum width or height": "放大至最大寬度或高度", - "-75%": "-75%", - "-50%": "-50%", - "-25%": "-25%", - "+25%": "+25%", - "+50%": "+50%", - "+75%": "+75%", - "+100%": "+100%", - "Expand by default": "預設展開擴充功能", - "Show maximum width or height button": "顯示最大寬度或高度按鈕", - "Maximum width or height default": "最大寬度或高度的預設值", - "Show predefined percentage buttons": "顯示預設的百分比按鈕", - "Predefined percentage buttons, applied to dimensions (75, 125, 150)": "預設的百分比按鈕,套用於尺寸(75,125,150)", - "Predefined percentage display format": "預設百分比顯示格式", - "Incremental/decremental percentage (-50%, +50%)": "增減百分比(-50%,+50%)", - "Raw percentage (50%, 150%)": "原始百分比(50%,150%)", - "Multiplication (x0.5, x1.5)": "倍率 (x0.5, x1.5)", - "sd-webui-aspect-ratio-helper": "sd-webui-aspect-ratio-helper", - "https://github.com/thomasasfk/sd-webui-aspect-ratio-helper.git": "https://github.com/thomasasfk/sd-webui-aspect-ratio-helper.git", - "Metadeta": "Metadeta", - "Merge models and load it for generation": "Merge models and load it for generation", - "Model A": "Model A", - "Model B": "Model B", - "Model C": "Model C", - "Merge Mode": "Merge Mode", - "Weight sum:A*(1-alpha)+B*alpha": "Weight sum:A*(1-alpha)+B*alpha", - "Add difference:A+(B-C)*alpha": "Add difference:A+(B-C)*alpha", - "Triple sum:A*(1-alpha-beta)+B*alpha+C*beta": "Triple sum:A*(1-alpha-beta)+B*alpha+C*beta", - "sum Twice:(A*(1-alpha)+B*alpha)*(1-beta)+C*beta": "sum Twice:(A*(1-alpha)+B*alpha)*(1-beta)+C*beta", - "Calcutation Mode": "Calcutation Mode", - "cosineA": "cosineA", - "cosineB": "cosineB", - "smoothAdd": "smoothAdd", - "tensor": "tensor", - "use MBW": "use MBW", - "alpha": "alpha", - "beta": "beta", - "Merge!": "Merge!", - "Merge&Gen": "Merge&Gen", - "Gen": "Gen", - "save settings": "save settings", - "save model": "save model", - "overwrite": "overwrite", - "save metadata": "save metadata", - "write merged model ID to": "write merged model ID to", - "image": "image", - "merge from ID": "merge from ID", - "Set from ID(-1 for last)": "Set from ID(-1 for last)", - "Hires Fix , Batch size": "Hires Fix , Batch size", - "Elemental Merge": "Elemental Merge", - "Tensor Merge": "Tensor Merge", - "number of -1": "number of -1", - "Sequential Merge Parameters": "Sequential Merge Parameters", - "Y grid (Disabled if blank)": "Y grid (Disabled if blank)", - "Sequential XY Merge and Generation": "Sequential XY Merge and Generation", - "Stop XY": "Stop XY", - "Reserve XY Plot": "Reserve XY Plot", - "Current Model": "Current Model", - "Message": "Message", - "Add to Sequence X": "Add to Sequence X", - "Add to Sequence Y": "Add to Sequence Y", - "block IDs": "block IDs", - "BASE": "BASE", - "IN00": "IN00", - "IN01": "IN01", - "IN02": "IN02", - "IN03": "IN03", - "IN04": "IN04", - "IN05": "IN05", - "IN06": "IN06", - "IN07": "IN07", - "IN08": "IN08", - "IN09": "IN09", - "IN10": "IN10", - "IN11": "IN11", - "M00": "M00", - "OUT00": "OUT00", - "OUT01": "OUT01", - "OUT02": "OUT02", - "OUT03": "OUT03", - "OUT04": "OUT04", - "OUT05": "OUT05", - "OUT06": "OUT06", - "OUT07": "OUT07", - "OUT08": "OUT08", - "OUT09": "OUT09", - "OUT10": "OUT10", - "OUT11": "OUT11", - "calcmode": "calcmode", - "checkpoint": "checkpoint", - "effective chekcer settings": "effective chekcer settings", - "save csv": "save csv", - "save anime gif": "save anime gif", - "not save grid": "not save grid", - "print change": "print change", - "Weights Setting": "Weights Setting", - "Weights Presets": "Weights Presets", - "Reservation": "Reservation", - "set to alpha": "set to alpha", - "read from alpha": "read from alpha", - "set to beta": "set to beta", - "read from beta": "read from beta", - "set to X": "set to X", - "weights for alpha, base alpha,IN00,IN02,...IN11,M00,OUT00,...,OUT11": "weights for alpha, base alpha,IN00,IN02,...IN11,M00,OUT00,...,OUT11", - "weights,for beta, base beta,IN00,IN02,...IN11,M00,OUT00,...,OUT11": "weights,for beta, base beta,IN00,IN02,...IN11,M00,OUT00,...,OUT11", - "Base": "Base", - "Reload Presets": "Reload Presets", - "Reload Tags": "Reload Tags", - "Save Presets": "Save Presets", - "Open TextEditor": "Open TextEditor", - "available": "available", - "Reloat List": "Reloat List", - "Start XY plot": "Start XY plot", - "Delete list(-1 for all)": "Delete list(-1 for all)", - "Delete num :": "Delete num :", - "No.": "No.", - "status": "status", - "xtype": "xtype", - "xmenber": "xmenber", - "ytype": "ytype", - "ymenber": "ymenber", - "model A": "model A", - "model B": "model B", - "model C": "model C", - "mode": "mode", - "weights alpha": "weights alpha", - "weights beta": "weights beta", - "Current Cache": "Current Cache", - "Reload Cache List": "Reload Cache List", - "unload model": "unload model", - "Merge to Checkpoint": "Merge to Checkpoint", - "Make LoRA (alpha * A - beta * B)": "Make LoRA (alpha * A - beta * B)", - "Checkpoint A": "Checkpoint A", - "Checkpoint B": "Checkpoint B", - "Merge LoRAs": "Merge LoRAs", - "settings": "settings", - "same to Strength": "same to Strength", - "save precision": "save precision", - "float": "float", - "bf16": "bf16", - "remake dimension": "remake dimension", - "auto": "auto", - "filename(option)": "filename(option)", - "LoRAname1:ratio1:Blocks1,LoRAname2:ratio2:Blocks2,...(\":blocks\" is option, not necessary)": "LoRAname1:ratio1:Blocks1,LoRAname2:ratio2:Blocks2,...(\":blocks\" is option, not necessary)", - "limit dimension": "limit dimension", - "calculate dimension of LoRAs(It may take a few minutes if there are many LoRAs)": "calculate dimension of LoRAs(It may take a few minutes if there are many LoRAs)", - "update list": "update list", - "load_history": "load_history", - "search": "search", - "Search Mode": "Search Mode", - "ID": "ID", - "Time": "Time", - "Weights alpha": "Weights alpha", - "Weights beta": "Weights beta", - "Mode": "Mode", - "custum name": "custum name", - "save setting": "save setting", - "use ID": "use ID", - "load keys": "load keys", - "block": "block", - "key": "key", - "read metadata": "read metadata", - "sd-webui-supermerger": "sd-webui-supermerger", - "https://github.com/hako-mikan/sd-webui-supermerger.git": "https://github.com/hako-mikan/sd-webui-supermerger.git", - "847c8760 (Fri Apr 21 16:44:01 2023)": "847c8760 (Fri Apr 21 16:44:01 2023)", - "hiresfix": "hiresfix", - "Blocks:Element:Ratio,Blocks:Element:Ratio,...": "Blocks:Element:Ratio,Blocks:Element:Ratio,...", - "beta (if Triple or Twice is not selected,Twice automatically enable)": "beta (if Triple or Twice is not selected,Twice automatically enable)", - "alpha and beta": "alpha and beta", - "mbw alpha": "mbw alpha", - "mbw beta": "mbw beta", - "mbw alpha and beta": "mbw alpha and beta", - "model_A": "model_A", - "model_B": "model_B", - "model_C": "model_C", - "pinpoint blocks (alpha or beta must be selected for another axis)": "pinpoint blocks (alpha or beta must be selected for another axis)", - "elemental": "elemental", - "pinpoint element": "pinpoint element", - "effective elemental checker": "effective elemental checker", - "tensors": "tensors", - "Ultimate SD upscale": "終極 SD 放大", - "Will upscale the image depending on the selected target size type": "將根據選擇的目標尺寸類型對圖像進行放大。", - "Target size type": "圖像尺寸類型", - "From img2img2 settings": "依照圖生圖設定", - "Custom size": "自訂尺寸", - "Scale from image size": "從圖像大小縮放", - "Custom width": "自訂寬度", - "Custom height": "自訂高度", - "Redraw options:": "重繪選項:", - "Tile width": "圖塊寬度", - "Tile height": "圖塊高度", - "Padding": "內距", - "Seams fix:": "儲存接縫修復圖像", - "Denoise": "重繪幅度", - "Save options:": "輸出到 output 的圖像:", - "Upscaled": "儲存 SD 放大的圖像", - "ultimate-upscale-for-automatic1111": "ultimate-upscale-for-automatic1111", - "https://github.com/Coyote-A/ultimate-upscale-for-automatic1111.git": "https://github.com/Coyote-A/ultimate-upscale-for-automatic1111.git", - "HakuImg": "HakuImg", - "Send to Blend": ">> 混合器", - "Send to Layer5": ">> 圖層 5", - "Send to Layer4": ">> 圖層 4", - "Send to Layer3": ">> 圖層 3", - "Send to Layer2": ">> 圖層 2", - "Send to Layer1": ">> 圖層 1", - "Send to Effect": ">> 效果器", - "Blend": "Blend", - "Effect": "效果器", - "Image preview height": "圖像預覽高度", - "Layer5": "圖層 5", - "Layer4": "圖層 4", - "Layer3": "圖層 3", - "Layer2": "圖層 2", - "Layer1": "圖層 1", - "Layer5 opacity": "圖層 5 透明度", - "Layer5 mask blur": "圖層 5 遮罩模糊", - "Layer5 mask strength": "圖層 5 遮罩強度", - "Blend mode": "混合模式", - "darken": "變暗", - "multiply": "色彩增值", - "color_burn": "加深顏色", - "linear_burn": "線性加深", - "lighten": "變亮", - "screen": "濾色", - "color_dodge": "加亮顏色", - "linear_dodge": "線性加亮", - "overlay": "覆蓋", - "soft_light": "柔光", - "hard_light": "實光", - "vivid_light": "強烈光線", - "linear_light": "線性光線", - "pin_light": "小光源", - "difference": "差異化", - "exclusion": "排除", - "Layer4 opacity": "圖層 4 透明度", - "Layer4 mask blur": "圖層 4 遮罩模糊", - "Layer4 mask strength": "圖層 4 遮罩強度", - "Layer3 opacity": "圖層 3 透明度", - "Layer3 mask blur": "圖層 3 遮罩模糊", - "Layer3 mask strength": "圖層 3 遮罩強度", - "Layer2 opacity": "圖層 2 透明度", - "Layer2 mask blur": "圖層 2 遮罩模糊", - "Layer2 mask strength": "圖層 2 遮罩強度", - "Layer1 opacity": "圖層 1 透明度", - "Layer1 mask blur": "圖層 1 遮罩模糊", - "Layer1 mask strength": "圖層 1 遮罩強度", - "background color": "背景顏色", - "refresh": "重新整理", - "img": "img", - "Color": "色彩", - "Tone Curve": "Tone Curve", - "Blur": "Blur", - "Pixelize": "Pixelize", - "Glow": "Glow", - "temparature": "temparature", - "hue": "hue", - "brightness": "brightness", - "contrast": "contrast", - "saturation": "saturation", - "Gamma": "Gamma", - "reset": "reset", - "G": "G", - "point1 x": "point1 x", - "point1 y": "point1 y", - "point2 x": "point2 x", - "point2 y": "point2 y", - "point3 x": "point3 x", - "point3 y": "point3 y", - "blur": "模糊", - "kernel size": "kernel size", - "sigma": "sigma", - "k_sigma": "k_sigma", - "epsilon": "epsilon", - "phi": "phi", - "gamma": "gamma", - "color mode": "color mode", - "gray": "gray", - "rgb": "rgb", - "use scale": "use scale", - "colors": "colors", - "dot size": "dot size", - "outline inflating": "outline inflating", - "Smoothing": "Smoothing", - "Color reduce algo": "Color reduce algo", - "kmeans": "kmeans", - "dithering": "dithering", - "kmeans with dithering": "kmeans with dithering", - "Glow mode": "Glow mode", - "BS": "BS", - "BMBL": "BMBL", - "range": "range", - "strength": "strength", - "InOutPaint": "InOutPaint", - "fill up": "fill up", - "fill down": "fill down", - "fill left": "fill left", - "fill right": "fill right", - "Resolution": "解析度", - "haku_output": "haku_output", - "Send to inpaint upload": ">> 局部重繪", - "Total num of layers (reload required)": "Total num of layers (reload required)", - "Total num of point for curve (reload required)": "Total num of point for curve (reload required)", - "a1111-sd-webui-haku-img": "a1111-sd-webui-haku-img", - "https://github.com/KohakuBlueleaf/a1111-sd-webui-haku-img.git": "https://github.com/KohakuBlueleaf/a1111-sd-webui-haku-img.git", - "Mine Diffusion": "Mine Diffusion", - "Blocks Blacklist": "Blocks Blacklist", - "Schematic Settings": "Schematic Settings", - "Orientation": "Orientation", - "Flip": "Flip", - "Rotation": "Rotation", - "Schematic Size": "Schematic Size", - "Dithering": "Dithering", - "Save Schematic": "Save Schematic", - "litematic": "litematic", - "schematic(in development)": "schematic(in development)", - "Path to Schematics Folder": "Path to Schematics Folder", - "Schematic Name": "Schematic Name", - "exclude": "exclude", - "Blacklist Presets": "Blacklist Presets", - "Update Presets": "Update Presets", - "Blacklist Name": "Blacklist Name", - "Save Blacklist": "Save Blacklist", - "Bedrock": "Bedrock", - "Water (Unsupported!)": "Water (Unsupported!)", - "Last schemtatics dir path": "Last schemtatics dir path", - "https://github.com/fropych/mine-diffusion": "https://github.com/fropych/mine-diffusion", - "039062f0 (Fri Mar 31 14:38:41 2023)": "039062f0 (Fri Mar 31 14:38:41 2023)", - "Remove Bedrock": "Remove Bedrock", - "Remove Water (Unsupported!)": "Remove Water (Unsupported!)", - "All Blocks": "All Blocks", - "Shift attention": "轉移注意力", - "Show generated images in ui": "在用戶介面上顯示產生了的圖像", - "Save results as video": "儲存結果為影片", - "Number of frames for lead in/out": "導入/導出幀數", - "Upscale ratio": "放大比率", - "https://github.com/yownas/shift-attention.git": "https://github.com/yownas/shift-attention.git", - "sd-webui-regional-prompter": "sd-webui-regional-prompter", - "https://github.com/hako-mikan/sd-webui-regional-prompter.git": "https://github.com/hako-mikan/sd-webui-regional-prompter.git", - "Divide mode": "Divide mode", - "Generation mode": "Generation mode", - "Attention": "Attention", - "Divide Ratio": "Divide Ratio", - "Base Ratio": "Base Ratio", - "Use base prompt": "Use base prompt", - "Use common prompt": "Use common prompt", - "Use common negative prompt": "Use common negative prompt", - "visualize and make template": "visualize and make template", - "template": "template", - "disable convert 'AND' to 'BREAK'": "disable convert 'AND' to 'BREAK'", - "debug": "debug", - "Apply Presets": "Apply Presets", - "Preset Name": "Preset Name", - "Save to Presets": "Save to Presets", "auto-sd-paint-ext Guide/Panel": "auto-sd-paint-ext Guide/Panel", "Generate Krita Plugin Symlink Command": "Generate Krita Plugin Symlink Command", "Launch Krita.": "Launch Krita.", @@ -5001,13 +8956,11 @@ "folder location and (auto-detected) repository location.": "folder location and (auto-detected) repository location.", ": Ensure": ": Ensure", "webui-user.bat": "webui-user.bat", - "/": "/", "webui-user.sh": "webui-user.sh", "contains": "contains", "--api": "--api", "in": "in", "COMMANDLINE_ARGS": "COMMANDLINE_ARGS", - "!": "!", "Enabling the Krita Plugin": "Enabling the Krita Plugin", "Restart Krita.": "Restart Krita.", "Settings > Configure Krita...": "Settings > Configure Krita...", @@ -5017,7 +8970,6 @@ "Stable Diffusion Plugin": "Stable Diffusion Plugin", "and tick the checkbox.": "and tick the checkbox.", "Restart Krita again for changes to take effect.": "Restart Krita again for changes to take effect.", - "The": "The", "SD Plugin": "SD Plugin", "docked window should appear on the left of the Krita window. If it does not, look on the menubar under": "docked window should appear on the left of the Krita window. If it does not, look on the menubar under", "Settings > Dockers": "Settings > Dockers", @@ -5027,249 +8979,41 @@ "Usage Guide": "Usage Guide", "TODO: Control/status panel": "TODO: Control/status panel", "https://github.com/Interpause/auto-sd-paint-ext.git": "https://github.com/Interpause/auto-sd-paint-ext.git", - "BLIP2 Captioner": "BLIP2 Captioner", - "Single": "Single", - "Generated Caption": "已產生的描述", - "Image Directory": "圖像目錄", - "Output Directory": "輸出目錄", - "Output Caption Extension": "Output Caption Extension", - "Unload models": "Unload models", - "Status": "Status", - "Idle": "Idle", - "Nucleus": "Nucleus", - "Top-K": "Top-K", - "Number of beams (0 = no beam search)": "Number of beams (0 = no beam search)", - "Caption min length": "描述最小長度", - "Caption max length": "描述最大長度", - "Top p": "Top p", - "stable-diffusion-webui-blip2-captioner": "stable-diffusion-webui-blip2-captioner", - "https://github.com/p1atdev/stable-diffusion-webui-blip2-captioner": "https://github.com/p1atdev/stable-diffusion-webui-blip2-captioner", - "path/to/caption": "path/to/caption", - "path/to/output": "path/to/output", - "Attention Heatmap": "注意力熱度圖", - "Attention texts for visualization. (comma separated)": "視覺化的注意文字(以逗號分隔)", - "Hide heatmap images": "隱藏熱度圖", - "Do not save heatmap images": "不儲存熱度圖", - "Hide caption": "隱藏描述", - "Use grid (output to grid dir)": "使用網格(輸出到網格目錄)", - "Grid layout": "網格布局", - "Auto": "自動", - "Prevent Empty Spot": "防止空白區域", - "Batch Length As Row": "批次長度作為一列", - "Heatmap blend alpha": "熱度圖混合透明度", - "Heatmap image scale": "熱度圖縮放比例", - "Trace each layers": "追蹤每個層級", - "Use layers as row instead of Batch Length": "將圖層作為行而非批次長度使用", - "stable-diffusion-webui-daam": "stable-diffusion-webui-daam", - "https://github.com/toriato/stable-diffusion-webui-daam.git": "https://github.com/toriato/stable-diffusion-webui-daam.git", - "prompt-fusion-extension": "prompt-fusion-extension", - "https://github.com/ljleb/prompt-fusion-extension.git": "https://github.com/ljleb/prompt-fusion-extension.git", "Only save background free pictures": "Only save background free pictures", "Do not auto save": "Do not auto save", "Custom Background": "Custom Background", "Random Custom Background": "Random Custom Background", "https://github.com/KutsuyaYuki/ABG_extension.git": "https://github.com/KutsuyaYuki/ABG_extension.git", "ABG Remover": "ABG Remover", - "Seed travel": "種子變遷", - "Destination seed(s) (Comma separated)": "目標種子(逗號分割)", - "Only use Random seeds (Unless comparing paths)": "只用隨機種子(除非需要對比變遷軌跡)", - "Number of random seed(s)": "隨機種子數量", - "Compare paths (Separate travels from 1st seed to each destination)": "對比變遷軌跡(從第一個種子分別變遷到每一個目標種子)", - "Steps (Number of images between each seed)": "步數(每個種子之間的圖像數量)", - "Loop back to initial seed": "再變遷回初始種子", - "Bump seed (If > 0 do a Compare Paths but only one image. No video will be generated.)": "提高種子值(如果大於 0,則進行對比變遷軌跡,但僅產生一張圖像而非影片。)", - "Use cache": "使用快取", - "Interpolation rate": "插值速率", - "Hug-the-middle": "保留核心(Hug-the-middle)", - "Slow start": "緩慢開始(Slow start)", - "Quick start": "快速開始(Quick start)", - "Rate strength": "速率強度", - "Allow the default Euler a Sampling method. (Does not produce good results)": "允許使用默認的 Eular a 採樣方法.(通常不會產生好的結果)", - "seed_travel": "seed_travel", - "https://github.com/yownas/seed_travel.git": "https://github.com/yownas/seed_travel.git", - "Enable pixelization": "啟用像素化", - "Keep resolution": "保持解析度", - "Pixel size": "像素尺寸", - "stable-diffusion-webui-pixelization": "stable-diffusion-webui-pixelization", - "https://github.com/AUTOMATIC1111/stable-diffusion-webui-pixelization.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-pixelization.git", - "Cafe Aesthetic": "Cafe Aesthetic", - "Judge": "Judge", - "Aesthetic": "Aesthetic", - "Style": "Style", - "Waifu": "Waifu", - "Classify type": "Classify type", - "Output style": "Output style", - "Copy": "複製", - "Move": "移動", - "Copy or move captions together": "Copy or move captions together", - "Basis": "Basis", - "Relative": "Relative", - "Absolute": "Absolute", - "Threshold (Use only when basis is absolute)": "Threshold (Use only when basis is absolute)", - "Start": "Start", - "stable-diffusion-webui-cafe-aesthetic": "stable-diffusion-webui-cafe-aesthetic", - "https://github.com/p1atdev/stable-diffusion-webui-cafe-aesthetic": "https://github.com/p1atdev/stable-diffusion-webui-cafe-aesthetic", - "path/to/classify": "path/to/classify", - "Enable Bilingual Localization": "啟用雙語翻譯對照", - "Localization file (Please leave `User interface` - `Localization` as None)": "本地化翻譯(請將使用者介面下的本地化翻譯設為無)", - "Translation display order": "翻譯顯示順序", - "Translation First": "翻譯優先", - "Original First": "原文優先", - "Localization dirs": "本地化路徑", - "sd-webui-bilingual-localization": "sd-webui-bilingual-localization", - "https://github.com/journey-ad/sd-webui-bilingual-localization.git": "https://github.com/journey-ad/sd-webui-bilingual-localization.git", - "Enqueue": "Enqueue", - "Task Queue": "Task Queue", - "Task History": "Task History", - "Pause": "Pause", - "Resume": "Resume", - "Task Id": "Task Id", - "Params": "Params", - "Size": "Size", - "No Rows To Show": "No Rows To Show", - "of": "of", - "Page": "Page", - "Disable queue auto-processing": "Disable queue auto-processing", - "Queue button placement": "Queue button placement", - "Under Generate button": "Under Generate button", - "Between Prompt and Generate button": "Between Prompt and Generate button", - "Hide the checkpoint dropdown": "Hide the checkpoint dropdown", - "Auto delete queue history (bookmarked tasks excluded)": "Auto delete queue history (bookmarked tasks excluded)", - "7 days": "7 days", - "14 days": "14 days", - "30 days": "30 days", - "90 days": "90 days", - "Keep forever": "Keep forever", - "Enqueue keyboard shortcut": "Enqueue keyboard shortcut", - "Key": "Key", - "Disable keyboard shortcut": "Disable keyboard shortcut", - "Task queue UI placement": "Task queue UI placement", - "As a tab": "As a tab", - "Append to main UI": "Append to main UI", - "sd-webui-agent-scheduler": "sd-webui-agent-scheduler", - "https://github.com/ArtVentureX/sd-webui-agent-scheduler": "https://github.com/ArtVentureX/sd-webui-agent-scheduler", - "LAB Tools": "LAB Tools", - "Guide": "Guide", - "Abysz LAB 0.1.9 Temporal coherence tools": "Abysz LAB 0.1.9 Temporal coherence tools", - "DFI Render": "DFI Render", - "Original frames folder": "Original frames folder", - "Generated frames folder": "Generated frames folder", - "Output folder": "Output folder", - "The new algorithm will adapt to DFI tolerance to choose the parameters for each frame. IMPORTANT: The algorithm is optimized to maintain a balance between deflicking and corruption, so that it is easier to use StableDiffusion at low denoising to reconstruct lost detail while preserving the stability gained.": "The new algorithm will adapt to DFI tolerance to choose the parameters for each frame. IMPORTANT: The algorithm is optimized to maintain a balance between deflicking and corruption, so that it is easier to use StableDiffusion at low denoising to reconstruct lost detail while preserving the stability gained.", - "Source denoise:": "Source denoise:", - "A noisy source can interfere with the accuracy of the scan. This will reduce noise, but also detail. However, this does not affect the original, and sometimes flatter images are not bad for the process, although you may need to balance by reducing the DFI tolerance.": "A noisy source can interfere with the accuracy of the scan. This will reduce noise, but also detail. However, this does not affect the original, and sometimes flatter images are not bad for the process, although you may need to balance by reducing the DFI tolerance.", - "(This is a demanding algorithm)": "(This is a demanding algorithm)", - "DFI Tolerance:": "DFI Tolerance:", - "Determines the movement tolerance of the scan. Low tolerance will detect even small changes in static areas. High values will detect less movements. Ideally, it should detect the movements that are important to you, and skip the static and useless areas, reducing the flick in those.": "Determines the movement tolerance of the scan. Low tolerance will detect even small changes in static areas. High values will detect less movements. Ideally, it should detect the movements that are important to you, and skip the static and useless areas, reducing the flick in those.", - "This parameter commands the new dynamic algorithm.": "This parameter commands the new dynamic algorithm.", - "DFI Expand:": "DFI Expand:", - "DFI expand fattens the edges of the areas detected by DFI. Note: DFI tolerance modifies the amount of movement detected. This only affects that result, be it big or small. Its a complementary parameter. 0=Off.": "DFI expand fattens the edges of the areas detected by DFI. Note: DFI tolerance modifies the amount of movement detected. This only affects that result, be it big or small. Its a complementary parameter. 0=Off.", - "Source Denoise": "Source Denoise", - "DFI Tolerance": "DFI Tolerance", - "DFI Expand": "DFI Expand", - "Here you can check examples of the motion map for those parameters. It is useful, for example, to adjust denoise if you see that it detects unnecessary graininess. Keep in mind that what you see represents movement between two frames.": "Here you can check examples of the motion map for those parameters. It is useful, for example, to adjust denoise if you see that it detects unnecessary graininess. Keep in mind that what you see represents movement between two frames.", - "The black is basically what it won't process (it will let it through to preserve the movement), and the white what it will try to keep stable in that frame interpolation. Try freely. Here you can also test how the manual smooth works (advanced section).": "The black is basically what it won't process (it will let it through to preserve the movement), and the white what it will try to keep stable in that frame interpolation. Try freely. Here you can also test how the manual smooth works (advanced section).", - "Preview DFI Map": "Preview DFI Map", - "Preview amount. 0 = Quick shoot": "Preview amount. 0 = Quick shoot", - "Inter Denoise:": "Inter Denoise:", - "Reduces render pixelation generated by corruption. However, be careful. It's resource hungry, and might remove excess detail. Not recommended to change size or FPD, but to use Stable Diffusion to remove the pixelation later.": "Reduces render pixelation generated by corruption. However, be careful. It's resource hungry, and might remove excess detail. Not recommended to change size or FPD, but to use Stable Diffusion to remove the pixelation later.", - "Inter Blur:": "Inter Blur:", - "Fine tunes the dynamic blur algorithm for DFI map. Lower = Stronger blur effects. Between 2-3 recommended.": "Fine tunes the dynamic blur algorithm for DFI map. Lower = Stronger blur effects. Between 2-3 recommended.", - "Corruption Refresh:": "Corruption Refresh:", - "To reduce the distortion generated by the process, you can recover original information every X number of frames. Lower number = faster refresh.": "To reduce the distortion generated by the process, you can recover original information every X number of frames. Lower number = faster refresh.", - "Corruption Preserve:": "Corruption Preserve:", - "Here you decide how much corruption keep in each corruption refresh. Low values will recover more of the original frame, with its changes and flickering, in exchange for reducing corruption. You must find the balance that works best for your goal.": "Here you decide how much corruption keep in each corruption refresh. Low values will recover more of the original frame, with its changes and flickering, in exchange for reducing corruption. You must find the balance that works best for your goal.", - "Smooth:": "Smooth:", - "This smoothes the edges of the interpolated areas. Low values are currently recommended until the algorithm is updated.": "This smoothes the edges of the interpolated areas. Low values are currently recommended until the algorithm is updated.", - "Inter Denoise": "Inter Denoise", - "Inter Denoise Size": "Inter Denoise Size", - "Inter Denoise FPD": "Inter Denoise FPD", - "Inter Blur": "Inter Blur", - "The new dynamic algorithm will handle these parameters. Activate them only for manual control.": "The new dynamic algorithm will handle these parameters. Activate them only for manual control.", - "Corruption Refresh (Lower = Faster)": "Corruption Refresh (Lower = Faster)", - "Corruption Preserve": "Corruption Preserve", - "Smooth": "Smooth", - "Frames to render. 0=ALL": "Frames to render. 0=ALL", - "Run DFI": "Run DFI", - "Show output folder video": "Show output folder video", - "|": "|", - "Deflickers Playground": "Deflickers Playground", - "Frames folder": "Frames folder", - "I made this series of deflickers based on the standard that Vegas Pro includes. You can use them together or separately. Be careful when mixing them.": "I made this series of deflickers based on the standard that Vegas Pro includes. You can use them together or separately. Be careful when mixing them.", - "Blend:": "Blend:", - "Blends a percentage between frames. This can soften transitions and highlights. 50 is half of each frame. 80 or 20 are recommended values.": "Blends a percentage between frames. This can soften transitions and highlights. 50 is half of each frame. 80 or 20 are recommended values.", - "Overlay:": "Overlay:", - "Use the overlay image blending mode. Note that it works particularly good at mid-high values, wich will modify the overall contrast. You will have to decide what works for you.": "Use the overlay image blending mode. Note that it works particularly good at mid-high values, wich will modify the overall contrast. You will have to decide what works for you.", - "Normalize:": "Normalize:", - "Calculates the average between frames to merge them. It may be more practical if you don't have a specific Blend deflicker value in mind.": "Calculates the average between frames to merge them. It may be more practical if you don't have a specific Blend deflicker value in mind.", - "BLEND (0=Off)": "BLEND (0=Off)", - "OVERLAY (0=Off)": "OVERLAY (0=Off)", - "NORMALIZE (0=Off))": "NORMALIZE (0=Off))", - "Deflickers": "Deflickers", - "Style Fuse": "Style Fuse", - "With this you can merge two sets of frames with overlay technique. For example, you can take a style video that is just lights and/or colors, and overlay it on top of another video.": "With this you can merge two sets of frames with overlay technique. For example, you can take a style video that is just lights and/or colors, and overlay it on top of another video.", - "The resulting video will be useful for use in Img2Img Batch and that the AI render preserves these added color and lighting details, along with the details of the original video.": "The resulting video will be useful for use in Img2Img Batch and that the AI render preserves these added color and lighting details, along with the details of the original video.", - "Style frames": "Style frames", - "Video frames": "Video frames", - "Fuse Strength": "Fuse Strength", - "Fuse": "Fuse", - "Video extract": "Video extract", - "Video path": "Video path", - "Fps. 0=Original": "Fps. 0=Original", - "Extract": "Extract", - "What DFI does?": "What DFI does?", - "DFI processing analyzes the motion of the original video, and attempts to force that information into the generated video. Demo on https://github.com/AbyszOne/Abysz-LAB-Ext": "DFI processing analyzes the motion of the original video, and attempts to force that information into the generated video. Demo on https://github.com/AbyszOne/Abysz-LAB-Ext", - "In short, this will reduce flicker in areas of the video that don't need to change, but SD does. For example, for a man smoking, leaning against a pole, it will detect that the pole is static, and will try to prevent it from changing as much as possible.": "In short, this will reduce flicker in areas of the video that don't need to change, but SD does. For example, for a man smoking, leaning against a pole, it will detect that the pole is static, and will try to prevent it from changing as much as possible.", - "This is an aggressive process that requires a lot of control for each context. Read the recommended strategies.": "This is an aggressive process that requires a lot of control for each context. Read the recommended strategies.", - "Although Video to Video is the most efficient way, a DFI One Shot method is under experimental development as well.": "Although Video to Video is the most efficient way, a DFI One Shot method is under experimental development as well.", - "Usage strategies": "Usage strategies", - "If you get enough understanding of the tool, you can achieve a much more stable and clean enough rendering. However, this is quite demanding.": "If you get enough understanding of the tool, you can achieve a much more stable and clean enough rendering. However, this is quite demanding.", - "Instead, a much friendlier and faster way to use this tool is as an intermediate step. For this, you can allow a reasonable degree of corruption in exchange for more general stability.": "Instead, a much friendlier and faster way to use this tool is as an intermediate step. For this, you can allow a reasonable degree of corruption in exchange for more general stability.", - "You can then clean up the corruption and recover details with a second step in Stable Diffusion at low denoising (0.2-0.4), using the same parameters and seed.": "You can then clean up the corruption and recover details with a second step in Stable Diffusion at low denoising (0.2-0.4), using the same parameters and seed.", - "In this way, the final result will have the stability that we have gained, maintaining final detail. If you find a balanced workflow, you will get something at least much more coherent and stable than the raw AI render.": "In this way, the final result will have the stability that we have gained, maintaining final detail. If you find a balanced workflow, you will get something at least much more coherent and stable than the raw AI render.", - "Abysz-LAB-Ext": "Abysz-LAB-Ext", - "https://github.com/AbyszOne/Abysz-LAB-Ext": "https://github.com/AbyszOne/Abysz-LAB-Ext", - "The RAW frames you have used as base for IA generation.": "The RAW frames you have used as base for IA generation.", - "The frames of AI generated video": "The frames of AI generated video", - "Remember that each generation overwrites previous frames in the same folder.": "Remember that each generation overwrites previous frames in the same folder.", - "STAND BY...": "STAND BY...", - "Frames to process": "Frames to process", - "Processed frames": "Processed frames", - "Style to fuse": "Style to fuse", - "Remember to use same fps as generated video for DFI": "Remember to use same fps as generated video for DFI", - "Travel mode": "Travel mode", - "Linear interp method": "Linear interp method", - "lerp": "lerp", - "Replace dimension": "Replace dimension", - "token": "token", - "Replace order": "Replace order", - "Travel steps between stages": "Travel steps between stages", - "Frame genesis": "Frame genesis", - "fixed": "fixed", - "Denoise steps for embryo": "Denoise steps for embryo", - "Depth image file": "Depth image file", - "Upscale width": "Upscale width", - "Upscale height": "Upscale height", - "Video file format": "Video file format", - "Video FPS": "Video FPS", - "Pad begin/end frames": "Pad begin/end frames", - "Pick frame by slice": "Pick frame by slice", - "Ext. export video": "Ext. export video", - "Ext. upscale": "Ext. upscale", - "Ext. depth-image-io (for depth2img models)": "Ext. depth-image-io (for depth2img models)", - "stable-diffusion-webui-prompt-travel": "stable-diffusion-webui-prompt-travel", - "https://github.com/Kahsolt/stable-diffusion-webui-prompt-travel.git": "https://github.com/Kahsolt/stable-diffusion-webui-prompt-travel.git", - "Prompt Travel": "Prompt Travel", - "slerp": "slerp", - "successive": "successive", - "embryo": "embryo", - "The untranslated characters will be translated automatically and will not affect the old translations. Use the function in the lower right corner to easily check and quickly modify the current translation.1,Save the setting;2,Click start button;3,Reload your browser.": "未被翻譯的字句將會自動翻譯且不會影響原有的翻譯。使用右下角的功能來簡單的查看並快速編輯正確的翻譯:1. 儲存設定 2. 點選開始按鈕 3. 重新載入 UI", - "Translated Status": "翻譯狀態", - "Start Auto Translate": "開始自動翻譯", + "Create inspiration images": "建立靈感圖像", + "Artist or styles name list. '.txt' files with one name per line": "Artist or styles name list. '.txt' files with one name per line", + "Prompt Placeholder, which can be used at the top of prompt input": "Prompt Placeholder, which can be used at the top of prompt input", + "To activate inspiration function, you need get \"inspiration\" images first.": "To activate inspiration function, you need get \"inspiration\" images first.", + "You can create these images by run \"Create inspiration images\" script in txt2img page,": "You can create these images by run \"Create inspiration images\" script in txt2img page,", + "you can get the artists or art styles list from here": "you can get the artists or art styles list from here", + "download these files, and select these files in the \"Create inspiration images\" script UI": "download these files, and select these files in the \"Create inspiration images\" script UI", + "There about 6000 artists and art styles in these files.": "There about 6000 artists and art styles in these files.", + "This takes server hours depending on your GPU type and how many pictures you generate for each artist/style": "This takes server hours depending on your GPU type and how many pictures you generate for each artist/style", + "I suggest at least four images for each": "I suggest at least four images for each", + "You can also download generated pictures from here:": "You can also download generated pictures from here:", + "/extections/stable-diffusion-webui-inspiration": "/extections/stable-diffusion-webui-inspiration", + "Checkbox Group": "Checkbox Group", + "flavors": "flavors", + "mediums": "mediums", + "movements": "movements", + "Exclude abandoned": "Exclude abandoned", + "Abandoned": "Abandoned", + "Key word": "Key word", + "Get inspiration": "Get inspiration", + "to txt2img": "to txt2img", + "to img2img": "to img2img", + "Collect": "Collect", + "Don't show again": "Don't show again", + "Maximum number of samples, used to determine which folders to skip when continue running the create script": "Maximum number of samples, used to determine which folders to skip when continue running the create script", + "stable-diffusion-webui-inspiration": "stable-diffusion-webui-inspiration", + "https://github.com/yfszzx/stable-diffusion-webui-inspiration.git": "https://github.com/yfszzx/stable-diffusion-webui-inspiration.git", "-->": "→", "<--": "←", - "Translated Text": "翻譯文字", - "To Language": "翻譯為:", "zh-CN": "zh-CN", "af": "af", "sq": "sq", @@ -5375,7 +9119,6 @@ "yi": "yi", "yo": "yo", "zu": "zu", - "Select Translater": "翻譯引擎:", "free_google": "free_google", "free_youdao_zh": "free_youdao_zh", "tp_alibaba": "tp_alibaba", @@ -5430,9 +9173,6 @@ "tp__volcEngine": "tp__volcEngine", "tp__yandex": "tp__yandex", "tp__youdao": "tp__youdao", - "display both english and target language": "同時顯示原文與翻譯", - "Save Setting": "儲存設定", - "Remove Auto Trans": "移除自動翻譯的文字", "stable-diffusion-webui-auto-translate-language": "stable-diffusion-webui-auto-translate-language", "https://github.com/hyd998877/stable-diffusion-webui-auto-translate-language": "https://github.com/hyd998877/stable-diffusion-webui-auto-translate-language", "your select language": "選擇的語言", @@ -5441,97 +9181,93 @@ "en2": "en2", "translate negative prompt.": "翻譯反向提示詞。", "N2": "N2", - "ui text": "使用者介面文字", - "translated text": "翻譯", "load": "載入", - "translate": "翻譯", - "save": "儲存", - "Save score as EXIF or PNG Info Chunk": "將分數儲存為 EXIF 或 PNG Info Chunk", - "sd_model_hash": "SD模型雜湊值", - "Save tags (Windows only)": "儲存標籤(僅限Windows)", - "Save category (Windows only)": "儲存類別(僅限Windows)", - "Save generation params text": "儲存生成參數文本", - "Force CPU (Requires Custom Script Reload)": "強制使用 CPU(需要重新加載自定義腳本)", - "stable-diffusion-webui-aesthetic-image-scorer": "stable-diffusion-webui-aesthetic-image-scorer", - "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer.git": "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer.git", - "Tiled Diffusion": "Tiled Diffusion", - "Tiled VAE": "分塊 VAE", - "multidiffusion-upscaler-for-automatic1111": "multidiffusion-upscaler-for-automatic1111", - "https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111.git": "https://github.com/pkuliyi2015/multidiffusion-upscaler-for-automatic1111.git", - "Overwrite image size": "覆寫圖像尺寸", - "Keep input image size": "維持輸入圖像尺寸", - "Image width": "圖像寬度", - "Image height": "圖像高度", - "Method": "Method", - "MultiDiffusion": "MultiDiffusion", - "Move ControlNet images to CPU (if applicable)": "將 ControlNet 圖像移至 CPU(如果可使用)", - "Free GPU": "Free GPU", - "Latent tile width": "潛在變數分塊寬度", - "Latent tile height": "潛在變數分塊高度", - "Latent tile overlap": "潛在變數分塊重疊", - "Latent tile batch size": "潛在變數分塊批次數量", - "Noise Inversion": "Noise Inversion", - "Enable Noise Inversion": "Enable Noise Inversion", - "Inversion steps": "Inversion steps", - "Please test on small images before actual upscale. Default params require denoise <= 0.6": "Please test on small images before actual upscale. Default params require denoise <= 0.6", - "Retouch": "Retouch", - "Renoise strength": "Renoise strength", - "Renoise kernel size": "Renoise kernel size", - "Region Prompt Control": "Region Prompt Control", - "Enable Control": "Enable Control", - "Draw full canvas background": "Draw full canvas background", - "Causalize layers": "Causalize layers", - "Create txt2img canvas": "Create txt2img canvas", - "Ref image (for conviently locate regions)": "Ref image (for conviently locate regions)", - "Custom Config File": "Custom Config File", - "Region 1": "Region 1", - "Region 2": "Region 2", - "Region 3": "Region 3", - "Region 4": "Region 4", - "Region 5": "Region 5", - "Region 6": "Region 6", - "Region 7": "Region 7", - "Region 8": "Region 8", - "Move VAE to GPU": "將 VAE 移至 GPU", - "Please use smaller tile size when see CUDA error: out of memory.": "如果看到 CUDA 錯誤:out of memory ,請降低分塊尺寸。", - "Encoder Tile Size": "編碼器分塊尺寸", - "Decoder Tile Size": "解碼器分塊尺寸", - "↻ Reset": "↻ Reset", - "Fast Encoder": "快速編碼器", - "Fast Decoder": "快速解碼器", - "Fast Encoder may change colors; Can fix it with more RAM and lower speed.": "快速編碼器會導致顏色變更;可以使用更多記憶體與時間來修復。", - "Encoder Color Fix": "編碼器顏色修復", - "Save intermediate images": "Save intermediate images", - "sd_save_intermediate_images": "sd_save_intermediate_images", - "https://github.com/AlUlkesh/sd_save_intermediate_images": "https://github.com/AlUlkesh/sd_save_intermediate_images", - "8115a847 (Mon Mar 27 13:58:26 2023)": "8115a847 (Mon Mar 27 13:58:26 2023)", - "Also save final image with intermediates": "Also save final image with intermediates", - "Save current settings as default": "Save current settings as default", - "Type of images to be saved": "Type of images to be saved", - "Denoised": "Denoised", - "Noisy": "Noisy", - "According to Live preview subject setting": "According to Live preview subject setting", - "Save every N images": "Save every N images", - "Start at N images (must be 0 = start at the beginning or a multiple of 'Save every N images')": "Start at N images (must be 0 = start at the beginning or a multiple of 'Save every N images')", - "Stop at N images (must be 0 = don't stop early or a multiple of 'Save every N images')": "Stop at N images (must be 0 = don't stop early or a multiple of 'Save every N images')", - "Make a video file": "Make a video file", - "Save intermediates with image number as suffix": "Save intermediates with image number as suffix", - "Standard operation": "Standard operation", - "mp4 parameters": "mp4 parameters", - "h264": "h264", - "h265/hevc": "h265/hevc", - "av1": "av1", - "fps": "fps", - "Display last image for additional frames at the beginning": "Display last image for additional frames at the beginning", - "Display last image for additional frames at the end": "Display last image for additional frames at the end", - "Smoothing / Interpolate": "Smoothing / Interpolate", - "Approx. how many seconds should the video run?": "Approx. how many seconds should the video run?", - "fps >= 30 recommended, caution: generates large gif-files": "fps >= 30 recommended, caution: generates large gif-files", - "lores": "lores", - "hires": "hires", - "Create Windows ffmpeg bat-files for lores and hires. Changes numbering logic.": "Create Windows ffmpeg bat-files for lores and hires. Changes numbering logic.", - "Only bat-files, no video": "Only bat-files, no video", - "Debug": "Debug", + "sd-discord-rich_presence": "sd-discord-rich_presence", + "https://github.com/davehornik/sd-discord-rich_presence": "https://github.com/davehornik/sd-discord-rich_presence", + "b675bf93 (Sun Apr 30 00:56:51 2023)": "b675bf93 (Sun Apr 30 00:56:51 2023)", + "Save to DB": "Save to DB", + "Database Name": "Database Name", + "Collection Name": "Collection Name", + "https://github.com/takoyaro/db-storage1111.git": "https://github.com/takoyaro/db-storage1111.git", + "Sub directories": "子目錄", + "Nothing selected": "未選取", + "Next Image After Ranking (To be implemented)": "Next Image After Ranking (To be implemented)", + "steps": "疊代步數", + "size": "尺寸", + "filename keyword": "檔名關鍵字", + "Renew Page": "刷新頁面", + "set_index": "設定索引", + "load_switch": "載入開關", + "to_dir_load_switch": "to_dir_load_switch", + "turn_page_switch": "翻頁開關", + "Preload images at startup": "在啟動時預加載圖像", + "Scan Exif-/.txt-data (slower, but required for exif-keyword-search)": "Scan Exif-/.txt-data (slower, but required for exif-keyword-search)", + "stable-diffusion-webui-images-browser": "stable-diffusion-webui-images-browser", + "https://github.com/AlUlkesh/stable-diffusion-webui-images-browser.git": "https://github.com/AlUlkesh/stable-diffusion-webui-images-browser.git", + "Torch": "Torch", + "Libs": "Libs", + "Repos": "Repos", + "gpu": "gpu", + "optimizations": "optimizations", + "Refresh bench": "Refresh bench", + "Refresh state": "Refresh state", + "sd-extension-system-info": "sd-extension-system-info", + "https://github.com/vladmandic/sd-extension-system-info": "https://github.com/vladmandic/sd-extension-system-info", + "70ab5cf3 (Sat Apr 29 21:35:48 2023)": "70ab5cf3 (Sat Apr 29 21:35:48 2023)", + "Suggestion is to leave the prompt field empty, anything here will be added at the end of the generated prompt.": "Suggestion is to leave the prompt field empty, anything here will be added at the end of the generated prompt.", + "It doesn’t add anything to the negative prompt field, so feel free to add your favorite negative prompts here.": "It doesn’t add anything to the negative prompt field, so feel free to add your favorite negative prompts here.", + "You can turn it off. Add your own artists to the prompt, and they will be added to the end of the prompt.": "You can turn it off. Add your own artists to the prompt, and they will be added to the end of the prompt.", + "prompt 3": "prompt 3", + "Keep at 1 for normal behavior.": "Keep at 1 for normal behavior.", + "Set to different values to compound that many prompts together. My suggestion is to try 2 first.": "Set to different values to compound that many prompts together. My suggestion is to try 2 first.", + "This was originally a bug in the first release when using multiple batches, now brought back as a feature.": "This was originally a bug in the first release when using multiple batches, now brought back as a feature.", + "Raised by redditor drone2222, to bring this back as a toggle, since it did create interesting results. So here it is.": "Raised by redditor drone2222, to bring this back as a toggle, since it did create interesting results. So here it is.", + "OneButtonPrompt": "OneButtonPrompt", + "https://github.com/AIrjen/OneButtonPrompt": "https://github.com/AIrjen/OneButtonPrompt", + "8bd1129c (Sat May 13 18:36:12 2023)": "8bd1129c (Sat May 13 18:36:12 2023)", + "all - force multiple": "all - force multiple", + "only other types": "only other types", + "BREAK": "BREAK", + "Cutoff": "Cutoff", + "SLerp": "SLerp", + "Cutoff Enabled": "Cutoff Enabled", + "Cutoff Targets": "Cutoff Targets", + "Cutoff Weight": "Cutoff Weight", + "Cutoff Disable for Negative Prompt": "Cutoff Disable for Negative Prompt", + "Cutoff Strong": "Cutoff Strong", + "Cutoff Padding": "Cutoff Padding", + "Cutoff Interpolation": "Cutoff Interpolation", + "sd-webui-cutoff": "sd-webui-cutoff", + "https://github.com/hnmr293/sd-webui-cutoff.git": "https://github.com/hnmr293/sd-webui-cutoff.git", + "red, blue": "red, blue", + "Posex": "Posex", + "Target ControlNet number": "目標 ControlNet 號碼", + "https://github.com/hnmr293/posex.git": "https://github.com/hnmr293/posex.git", + "Training Picker": "訓練圖挑選器", + "Video to extract frames from:": "要從中提取幀的影片:", + "Only extract keyframes (recommended)": "只提取關鍵幀(推薦)", + "Extract every nth frame": "每第 n 幀提取一次", + "Extract Frames": "提取幀", + "Extracted Frame Set": "提取好的幀", + "Resize crops to 512x512": "縮放裁剪至 512x512", + "Outfill method:": "填充方法:", + "Stretch image": "拉伸圖像", + "Solid color": "純色", + "Average image color": "平均圖像顏色", + "Stretch pixels at border": "延伸邊緣的畫素", + "Reflect image around border": "從邊緣鏡像圖像內容", + "Blurred & stretched overlay": "模糊拉伸的疊加層", + "Reset Aspect Ratio": "重置縱橫比", + "Image border outfill method:": "圖像邊緣的填充方法:", + "Black outfill": "填黑", + "Outfill border color:": "填充顏色:", + "Number of clusters:": "簇數:", + "Save crops to:": "儲存裁剪好的成品到:", + "Fixed size to resize images to": "調整圖像大小到固定大小", + "Path to read videos from": "讀取影片的路徑", + "Path to store extracted frame sets in": "儲存截取幀的路徑", + "Default cropped image output directory": "裁切後的成品的默認輸出目錄", + "https://github.com/Maurdekye/training-picker.git": "https://github.com/Maurdekye/training-picker.git", "CLIP_test": "CLIP_test", "Create Beta hypernetwork": "Create Beta hypernetwork", "Train_Gamma": "Train_Gamma", @@ -5603,391 +9339,195 @@ "Value should be in (0-1]": "Value should be in (0-1]", ". filename cannot have ',' inside, and files should be splitted by ','.": ". filename cannot have ',' inside, and files should be splitted by ','.", "https://github.com/aria1th/Hypernetwork-MonkeyPatch-Extension.git": "https://github.com/aria1th/Hypernetwork-MonkeyPatch-Extension.git", - "Add LyCORIS to prompt": "Add LyCORIS to prompt", + "img": "img", + "G": "G", + "sigma": "sigma", + "k_sigma": "k_sigma", + "epsilon": "epsilon", + "phi": "phi", + "BS": "BS", + "BMBL": "BMBL", + "a1111-sd-webui-haku-img": "a1111-sd-webui-haku-img", + "https://github.com/KohakuBlueleaf/a1111-sd-webui-haku-img.git": "https://github.com/KohakuBlueleaf/a1111-sd-webui-haku-img.git", "https://github.com/KohakuBlueleaf/a1111-sd-webui-lycoris": "https://github.com/KohakuBlueleaf/a1111-sd-webui-lycoris", "4d74a7b8 (Mon May 1 15:03:03 2023)": "4d74a7b8 (Mon May 1 15:03:03 2023)", - "Plot": "圖表", - "Max Image Size": "最大圖像尺寸", - "Max Batch Count": "最大批次數量", - "Load results": "載入結果", - "a1111-stable-diffusion-webui-vram-estimator": "a1111-stable-diffusion-webui-vram-estimator", - "https://github.com/space-nuko/a1111-stable-diffusion-webui-vram-estimator.git": "https://github.com/space-nuko/a1111-stable-diffusion-webui-vram-estimator.git", - "Depth Library": "深度圖圖庫", - "Pages:": "頁碼:", - "Selected": "已選取", - "https://github.com/jexom/sd-webui-depth-lib.git": "https://github.com/jexom/sd-webui-depth-lib.git", - "https://github.com/Bing-su/sd-webui-tunnels.git": "https://github.com/Bing-su/sd-webui-tunnels.git", - "Info, Links and Help": "資訊、連結與幫助", - "Made by": "製作者:", - "deforum.github.io": "deforum.github.io", - ", port for AUTOMATIC1111's webui maintained by": ",由 AUTOMATIC1111 的 WebUI 維護的端口", - "kabachuha": "kabachuha", - "FOR HELP CLICK HERE": "需要幫助請點擊這裡", - "The code for this extension:": "此擴充功能的程式碼:", - "here": "這裡", - "Join the": "加入我們", - "official Deforum Discord": "官方 Deforum Discord", - "to share your creations and suggestions.": "來分享您的創作和建議。", - "Official Deforum Wiki:": "官方 Deforum Wiki:", - "Anime-inclined great guide (by FizzleDorf) with lots of examples:": "充滿動漫風格的詳細指南(由 FizzleDorf 製作),內含許多範例:", - "For advanced keyframing with Math functions, see": "若欲進階使用數學函數進行關鍵幀動畫,請參考:", - "Alternatively, use": "或者,您也可以使用", - "sd-parseq": "sd-parseq", - "as a UI to define your animation schedules (see the Parseq section in the Keyframes tab).": "作為一個用來定義動畫時間軸的使用者介面(UI),您可以使用 sd-parseq(請參考「關鍵幀(Keyframes)」標籤中的 Parseq 部分)", - "framesync.xyz": "framesync.xyz", - "is also a good option, it makes compact math formulae for Deforum keyframes by selecting various waveforms.": "是一個不錯的選擇,它透過選擇不同的波形為 Deforum 關鍵幀生成簡潔的數學公式。", - "The other site allows for making keyframes using": "另一個網站允許使用", - "interactive splines and Bezier curves": "互動式的曲線和貝茲曲線進行關鍵幀的製作。", - "(select Disco output format).": "(選擇Disco輸出格式)。", - "If you want to use Width/Height which are not multiples of 64, please change noise_type to 'Uniform', in Keyframes --> Noise.": "如果您想使用非 64 的倍數作為寬度/高度,請在關鍵幀 --> 噪音中將 噪音類型(noise_type) 設置為 '均勻分佈(Uniform)'。", - "If you liked this extension, please": "如果您喜歡這個擴展,請", - "give it a star on GitHub": "在 GitHub 上給它一顆星星", - "Keyframes": "關鍵格", - "Init": "初始化", - "Hybrid Video": "混合影片", - "Batch name": "批次名稱", - "Restore Faces, Tiling & more": "恢復臉部、平鋪和更多功能", - "Restore Faces": "面部修復", - "DDIM Eta": "DDIM Eta", - "Pix2Pix img CFG schedule": "Pix2Pix 圖像CFG排程", - "Resume & Run from file": "繼續執行並從檔案運行", - "Run from Settings file": "從設定檔運行", - "Resume Animation": "繼續動畫", - "Custom settings file": "自訂設定檔", - "Resume from timestring": "從時間字串繼續執行", - "Resume timestring": "從時間字串繼續執行", - "Animation mode": "動畫模式", - "2D": "2D", - "Interpolation": "插值", - "Video Input": "影片輸入", - "Border": "邊界", - "replicate": "複製", - "wrap": "包裹", - "Cadence": "節奏", - "Max frames": "最大幀數量", - "Guided Images": "Guided Images", - "*READ ME before you use this mode!*": "*READ ME before you use this mode!*", - "You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. \n Set the keyframes and the images that you want to show up. \n Note: the number of frames between each keyframe should be greater than the tweening frames.": "You can use this as a guided image tool or as a looper depending on your settings in the keyframe images field. \n Set the keyframes and the images that you want to show up. \n Note: the number of frames between each keyframe should be greater than the tweening frames.", - "Prerequisites and Important Info:": "Prerequisites and Important Info:", - "This mode works ONLY with 2D/3D animation modes. Interpolation and Video Input modes aren't supported.": "This mode works ONLY with 2D/3D animation modes. Interpolation and Video Input modes aren't supported.", - "Set Init tab's strength slider greater than 0. Recommended value (.65 - .80).": "Set Init tab's strength slider greater than 0. Recommended value (.65 - .80).", - "Set 'seed_behavior' to 'schedule' under the Seed Scheduling section below.": "Set 'seed_behavior' to 'schedule' under the Seed Scheduling section below.", - "Looping recommendations:": "Looping recommendations:", - "seed_schedule should start and end on the same seed.": "seed_schedule should start and end on the same seed.", - "Example: seed_schedule could use 0:(5), 1:(-1), 219:(-1), 220:(5)": "Example: seed_schedule could use 0:(5), 1:(-1), 219:(-1), 220:(5)", - "The 1st and last keyframe images should match.": "The 1st and last keyframe images should match.", - "Set your total number of keyframes to be 21 more than the last inserted keyframe image.": "Set your total number of keyframes to be 21 more than the last inserted keyframe image.", - "Example: Default args should use 221 as total keyframes.": "Example: Default args should use 221 as total keyframes.", - "Prompts are stored in JSON format. If you've got an error, check it in validator,": "Prompts are stored in JSON format. If you've got an error, check it in validator,", - "like here": "like here", - "Enable guided images mode": "Enable guided images mode", - "Images to use for keyframe guidance": "Images to use for keyframe guidance", - "Guided images schedules": "Guided images schedules", - "Image strength schedule": "Image strength schedule", - "Blend factor max": "Blend factor max", - "Blend factor slope": "Blend factor slope", - "Tweening frames schedule": "Tweening frames schedule", - "Color correction factor": "Color correction factor", - "SubSeed": "SubSeed", - "Step": "Step", - "CLIP Skip": "CLIP Skip", - "Strength schedule": "Strength schedule", - "CFG scale schedule": "CFG scale schedule", - "Seed behavior": "Seed behavior", - "iter": "iter", - "ladder": "ladder", - "alternate": "alternate", - "schedule": "schedule", - "Seed iter N": "Seed iter N", - "Seed schedule": "Seed schedule", - "Enable Subseed scheduling": "Enable Subseed scheduling", - "Subseed schedule": "Subseed schedule", - "Subseed strength schedule": "Subseed strength schedule", - "Enable steps scheduling": "Enable steps scheduling", - "Steps schedule": "Steps schedule", - "Enable sampler scheduling": "啟用取樣器排程", - "Sampler schedule": "取樣器排程", - "Enable checkpoint scheduling": "Enable checkpoint scheduling", - "Checkpoint schedule": "Checkpoint schedule", - "Enable CLIP skip scheduling": "Enable CLIP skip scheduling", - "CLIP skip schedule": "CLIP skip schedule", - "Motion": "Motion", - "Noise": "Noise", - "Coherence": "Coherence", - "Anti Blur": "Anti Blur", - "Angle": "Angle", - "Translation X": "平移 X", - "Translation Y": "平移 Y", - "Translation Z": "平移 Z", - "Rotation 3D X": "3D 旋轉 X", - "Rotation 3D Y": "3D 旋轉 Y", - "Rotation 3D Z": "3D 旋轉 Z", - "Depth Warping & FOV": "Depth Warping & FOV", - "Depth Warping": "Depth Warping", - "Field Of View": "Field Of View", - "Use depth warping": "Use depth warping", - "MiDaS weight": "MiDaS weight", - "Padding mode": "Padding mode", - "border": "border", - "reflection": "reflection", - "zeros": "zeros", - "Sampling mode": "Sampling mode", - "bicubic": "bicubic", - "bilinear": "bilinear", - "nearest": "nearest", - "FOV schedule": "FOV schedule", - "Near schedule": "Near schedule", - "Far schedule": "Far schedule", - "Perspective Flip": "Perspective Flip", - "Enable perspective flip": "Enable perspective flip", - "Perspective flip theta": "Perspective flip theta", - "Perspective flip phi": "Perspective flip phi", - "Perspective flip gamma": "Perspective flip gamma", - "Perspective flip fv": "Perspective flip fv", - "Noise type": "Noise type", - "perlin": "perlin", - "Noise schedule": "Noise schedule", - "Perlin octaves": "Perlin octaves", - "Perlin persistence": "Perlin persistence", - "Color coherence": "Color coherence", - "Match Frame 0 HSV": "Match Frame 0 HSV", - "Match Frame 0 LAB": "Match Frame 0 LAB", - "Match Frame 0 RGB": "Match Frame 0 RGB", - "Color force Grayscale": "Color force Grayscale", - "Color coherence video every N frames": "Color coherence video every N frames", - "Contrast schedule": "Contrast schedule", - "Reroll blank frames": "Reroll blank frames", - "reroll": "reroll", - "interrupt": "interrupt", - "Kernel schedule": "Kernel schedule", - "Sigma schedule": "Sigma schedule", - "Amount schedule": "Amount schedule", - "Threshold schedule": "Threshold schedule", - "*Important* notes on Prompts": "*Important* notes on Prompts", - "Please always keep values in math functions above 0.": "Please always keep values in math functions above 0.", - "There is *no* Batch mode like in vanilla deforum. Please Use the txt2img tab for that.": "There is *no* Batch mode like in vanilla deforum. Please Use the txt2img tab for that.", - "For negative prompts, please write your positive prompt, then --neg ugly, text, assymetric, or any other negative tokens of your choice. OR:": "For negative prompts, please write your positive prompt, then --neg ugly, text, assymetric, or any other negative tokens of your choice. OR:", - "Use the negative_prompts field to automatically append all words as a negative prompt. *Don't* add --neg in the negative_prompts field!": "Use the negative_prompts field to automatically append all words as a negative prompt. *Don't* add --neg in the negative_prompts field!", - "Prompts are stored in JSON format. If you've got an error, check it in a": "Prompts are stored in JSON format. If you've got an error, check it in a", - "JSON Validator": "JSON Validator", - "Prompts positive": "Prompts positive", - "Prompts negative": "Prompts negative", - "Composable Mask scheduling": "Composable Mask scheduling", - "To enable, check use_mask in the Init tab": "To enable, check use_mask in the Init tab", - "Supports boolean operations: (! - negation, & - and, | - or, ^ - xor, \\ - difference, () - nested operations)": "Supports boolean operations: (! - negation, & - and, | - or, ^ - xor, \\ - difference, () - nested operations)", - "default variables: in \\{\\}, like \\{init_mask\\}, \\{video_mask\\}, \\{everywhere\\}": "default variables: in \\{\\}, like \\{init_mask\\}, \\{video_mask\\}, \\{everywhere\\}", - "masks from files: in [], like [mask1.png]": "masks from files: in [], like [mask1.png]", - "description-based:": "description-based:", - "word masks": "word masks", - "in <>, like , ": "in <>, like , ", - "Mask schedule": "Mask schedule", - "Use noise mask": "Use noise mask", - "Noise mask schedule": "Noise mask schedule", - "Image Init": "Image Init", - "Video Init": "Video Init", - "Mask Init": "Mask Init", - "Use init": "Use init", - "Strength 0 no init": "Strength 0 no init", - "Init image": "Init image", - "Video init path": "Video init path", - "Extract from frame": "Extract from frame", - "Extract to frame": "Extract to frame", - "Extract nth frame": "Extract nth frame", - "Overwrite extracted frames": "Overwrite extracted frames", - "Use mask video": "Use mask video", - "Video mask path": "Video mask path", - "Use mask": "Use mask", - "Use alpha as mask": "Use alpha as mask", - "Invert mask": "Invert mask", - "Overlay mask": "Overlay mask", - "Mask file": "Mask file", - "Mask overlay blur": "Mask overlay blur", - "Mask fill": "Mask fill", - "Full res mask": "Full res mask", - "Full res mask padding": "Full res mask padding", - "Parseq": "Parseq", - "Use an": "Use an", - "sd-parseq manifest": "sd-parseq manifest", - "for your animation (leave blank to ignore).": "for your animation (leave blank to ignore).", - "Note that parseq overrides:": "Note that parseq overrides:", - "Run: seed, subseed, subseed strength.": "Run: seed, subseed, subseed strength.", - "Keyframes: generation settings (noise, strength, contrast, scale).": "Keyframes: generation settings (noise, strength, contrast, scale).", - "Keyframes: motion parameters for 2D and 3D (angle, zoom, translation, rotation, perspective flip).": "Keyframes: motion parameters for 2D and 3D (angle, zoom, translation, rotation, perspective flip).", - "Parseq does": "Parseq does", - "not": "not", - "override:": "override:", - "Run: Sampler, Width, Height, tiling, resize seed.": "Run: Sampler, Width, Height, tiling, resize seed.", - "Keyframes: animation settings (animation mode, max frames, border)": "Keyframes: animation settings (animation mode, max frames, border)", - "Keyframes: coherence (color coherence & cadence)": "Keyframes: coherence (color coherence & cadence)", - "Keyframes: depth warping": "Keyframes: depth warping", - "Output settings: all settings (including fps and max frames)": "Output settings: all settings (including fps and max frames)", - "Parseq Manifest (JSON or URL)": "Parseq Manifest (JSON or URL)", - "Use delta values for movement parameters": "Use delta values for movement parameters", - "Requires the": "Requires the", - "extension to be installed.": "extension to be installed.", - "Due to ControlNet base extension's inner works it needs its models to be located at 'extensions/deforum-for-automatic1111-webui/models'. So copy, symlink or move them there until a more elegant solution is found. And, as of now, it requires use_init checked for the first run. The ControlNet extension version used in the dev process is a24089a62e70a7fae44b7bf35b51fd584dd55e25, if even with all the other options above used it still breaks, upgrade/downgrade your CN version to this one.": "Due to ControlNet base extension's inner works it needs its models to be located at 'extensions/deforum-for-automatic1111-webui/models'. So copy, symlink or move them there until a more elegant solution is found. And, as of now, it requires use_init checked for the first run. The ControlNet extension version used in the dev process is a24089a62e70a7fae44b7bf35b51fd584dd55e25, if even with all the other options above used it still breaks, upgrade/downgrade your CN version to this one.", - "ControlNet not found. Please install it :)": "ControlNet not found. Please install it :)", - "Please, change animation mode to 2D or 3D to enable Hybrid Mode": "Please, change animation mode to 2D or 3D to enable Hybrid Mode", - "Info & Help": "Info & Help", - "Hybrid Video Compositing in 2D/3D Mode": "Hybrid Video Compositing in 2D/3D Mode", - "by": "by", - "reallybigname": "reallybigname", - "Composite video with previous frame init image in": "Composite video with previous frame init image in", - "2D or 3D animation_mode": "2D or 3D animation_mode", - "(not for Video Input mode)": "(not for Video Input mode)", - "Uses your": "Uses your", - "settings for": "settings for", - "video_init_path, extract_nth_frame, overwrite_extracted_frames": "video_init_path, extract_nth_frame, overwrite_extracted_frames", - "In Keyframes tab, you can also set": "In Keyframes tab, you can also set", - "color_coherence": "color_coherence", - "= '": "= '", - "'": "'", - "color_coherence_video_every_N_frames": "color_coherence_video_every_N_frames", - "lets you only match every N frames": "lets you only match every N frames", - "Color coherence may be used with hybrid composite off, to just use video color.": "Color coherence may be used with hybrid composite off, to just use video color.", - "Hybrid motion may be used with hybrid composite off, to just use video motion.": "Hybrid motion may be used with hybrid composite off, to just use video motion.", - "Hybrid Video Schedules": "Hybrid Video Schedules", - "The alpha schedule controls overall alpha for video mix, whether using a composite mask or not.": "The alpha schedule controls overall alpha for video mix, whether using a composite mask or not.", - "hybrid_comp_mask_blend_alpha_schedule": "hybrid_comp_mask_blend_alpha_schedule", - "only affects the 'Blend'": "only affects the 'Blend'", - "hybrid_comp_mask_type": "hybrid_comp_mask_type", - "Mask contrast schedule is from 0-255. Normal is 1. Affects all masks.": "Mask contrast schedule is from 0-255. Normal is 1. Affects all masks.", - "Autocontrast low/high cutoff schedules 0-100. Low 0 High 100 is full range.": "Autocontrast low/high cutoff schedules 0-100. Low 0 High 100 is full range.", - "(": "(", - "hybrid_comp_mask_auto_contrast": "hybrid_comp_mask_auto_contrast", - "must be enabled": "must be enabled", - ")": ")", - "Click Here": "Click Here", - "for more info/ a Guide.": "for more info/ a Guide.", - "Hybrid Settings": "Hybrid Settings", - "Generate inputframes": "Generate inputframes", - "Hybrid composite": "Hybrid composite", - "First frame as init image": "First frame as init image", - "Motion use prev img": "Motion use prev img", - "Hybrid motion": "Hybrid motion", - "Optical Flow": "Optical Flow", - "Perspective": "Perspective", - "Affine": "Affine", - "Flow method": "Flow method", - "DIS Medium": "DIS Medium", - "Farneback": "Farneback", - "Comp mask type": "Comp mask type", - "Video Depth": "Video Depth", - "Difference": "Difference", - "Comp mask equalize": "Comp mask equalize", - "Before": "Before", - "After": "After", - "Both": "Both", - "Comp mask auto contrast": "Comp mask auto contrast", - "Comp mask inverse": "Comp mask inverse", - "Comp save extra frames": "Comp save extra frames", - "Hybrid Schedules": "Hybrid Schedules", - "Comp alpha schedule": "Comp alpha schedule", - "Comp mask blend alpha schedule": "Comp mask blend alpha schedule", - "Comp mask contrast schedule": "Comp mask contrast schedule", - "Comp mask auto contrast cutoff high schedule": "Comp mask auto contrast cutoff high schedule", - "Comp mask auto contrast cutoff low schedule": "Comp mask auto contrast cutoff low schedule", - "Humans Masking": "Humans Masking", - "Generate human masks": "Generate human masks", - "PNGs": "PNGs", - "Video Output Settings": "Video Output Settings", - "FPS": "FPS", - "Output format": "Output format", - "FFMPEG mp4": "FFMPEG mp4", - "Add soundtrack": "Add soundtrack", - "Init Video": "Init Video", - "Soundtrack path": "Soundtrack path", - "Skip video for run all": "Skip video for run all", - "Store frames in ram": "Store frames in ram", - "Save depth maps": "Save depth maps", - "Make GIF": "Make GIF", - "Upscale": "Upscale", - "Upscale model": "Upscale model", - "realesr-animevideov3": "realesr-animevideov3", - "realesrgan-x4plus": "realesrgan-x4plus", - "realesrgan-x4plus-anime": "realesrgan-x4plus-anime", - "Upscale factor": "Upscale factor", - "x2": "x2", - "x3": "x3", - "x4": "x4", - "Keep Imgs": "Keep Imgs", - "FFmpeg settings": "FFmpeg settings", - "CRF": "CRF", - "veryslow": "veryslow", - "slower": "slower", - "slow": "slow", - "medium": "medium", - "fast": "fast", - "faster": "faster", - "veryfast": "veryfast", - "superfast": "superfast", - "ultrafast": "ultrafast", - "Location": "Location", - "Frame Interoplation": "Frame Interoplation", - "Video Upscaling": "Video Upscaling", - "Frames to Video": "Frames to Video", - "Important notes and Help": "Important notes and Help", - "Use": "Use", - "RIFE": "RIFE", - "FILM": "FILM", - "Frame Interpolation to smooth out, slow-mo (or both) any video.": "Frame Interpolation to smooth out, slow-mo (or both) any video.", - "Supported engines:": "Supported engines:", - "RIFE v4.6 and FILM.": "RIFE v4.6 and FILM.", - "Important notes:": "Important notes:", - "Frame Interpolation will *not* run if any of the following are enabled: 'Store frames in ram' / 'Skip video for run all'.": "Frame Interpolation will *not* run if any of the following are enabled: 'Store frames in ram' / 'Skip video for run all'.", - "Audio (if provided) will *not* be transferred to the interpolated video if Slow-Mo is enabled.": "Audio (if provided) will *not* be transferred to the interpolated video if Slow-Mo is enabled.", - "'add_soundtrack' and 'soundtrack_path' aren't being honoured in \"Interpolate an existing video\" mode. Original vid audio will be used instead with the same slow-mo rules above.": "'add_soundtrack' and 'soundtrack_path' aren't being honoured in \"Interpolate an existing video\" mode. Original vid audio will be used instead with the same slow-mo rules above.", - "Engine": "Engine", - "RIFE v4.6": "RIFE v4.6", - "Slow Mo": "Slow Mo", - "Interp X": "Interp X", - "Slow-Mo X": "Slow-Mo X", - "Interpolate an existing video": "Interpolate an existing video", - "Video to Interpolate": "Video to Interpolate", - "In Frame Count": "In Frame Count", - "In FPS": "In FPS", - "Interpolated Vid FPS": "Interpolated Vid FPS", - "*Interpolate uploaded video*": "*Interpolate uploaded video*", - "* check your CLI for outputs": "* check your CLI for outputs", - "Video to Upscale": "Video to Upscale", - "Upscale V2": "Upscale V2", - "Upscale V1": "Upscale V1", - "In Res": "In Res", - "Out Res": "Out Res", - "*Upscale uploaded video*": "*Upscale uploaded video*", - "Path name modifier": "Path name modifier", - "x0_pred": "x0_pred", - "x": "x", - "Important Notes:": "Important Notes:", - "Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%05d.png', just replace 20230124234916 with your batch ID. The %05d is important, don't forget it!": "Enter relative to webui folder or Full-Absolute path, and make sure it ends with something like this: '20230124234916_%05d.png', just replace 20230124234916 with your batch ID. The %05d is important, don't forget it!", - "MP4 path": "MP4 path", - "Render steps": "Render steps", - "*Stitch frames to video*": "*Stitch frames to video*", - "INVISIBLE": "INVISIBLE", - "Mask contrast adjust": "Mask contrast adjust", - "Mask brightness adjust": "Mask brightness adjust", - "from_img2img_instead_of_link": "from_img2img_instead_of_link", - "Perlin W": "Perlin W", - "Perlin H": "Perlin H", - "Filename format": "Filename format", - "save_settings": "save_settings", - "save_samples": "save_samples", - "display_samples": "display_samples", - "Subseed controls & More": "Subseed controls & More", - "Enable subseed controls": "Enable subseed controls", - "N Batch": "N Batch", - "Save sample per step": "Save sample per step", - "Show sample per step": "Show sample per step", - "Click here after the generation to show the video": "Click here after the generation to show the video", - "Close the video": "Close the video", - "Deforum extension for auto1111 — version 2.2b": "Deforum extension for auto1111 — version 2.2b", - "* Paths can be relative to webui folder OR full - absolute": "* Paths can be relative to webui folder OR full - absolute", - "General Settings File": "General Settings File", - "Video Settings File": "Video Settings File", - "Save Video Settings": "Save Video Settings", - "Load Video Settings": "Load Video Settings", - "deforum-for-automatic1111-webui": "Deforum", - "https://github.com/deforum-art/deforum-for-automatic1111-webui.git": "https://github.com/deforum-art/deforum-for-automatic1111-webui.git", + "weight_gradient": "weight_gradient", + "https://github.com/DingoBite/weight_gradient": "https://github.com/DingoBite/weight_gradient", + "8aedef42 (Sun May 14 20:30:57 2023)": "8aedef42 (Sun May 14 20:30:57 2023)", + "Log in console": "Log in console", + "FigureBracesEXIF": "FigureBracesEXIF", + "Documentation": "Documentation", + "Modes Hint": "Modes Hint", + "Form": "Form", + "Required. Tokens": "Required. Tokens", + "Required. Weight start to end range": "Required. Weight start to end range", + "Steps where weight changes": "Steps where weight changes", + "Weight move to return weight": "Weight move to return weight", + "Gradient mode (e, ei, eo, c, ci, co)": "Gradient mode (e, ei, eo, c, ci, co)", + "Linear decreasing from 1 to 0 in 20% of steps (10% - 30%)": "Linear decreasing from 1 to 0 in 20% of steps (10% - 30%)", + "Exponencial decreasing from 1 to 0 in 8 steps then increasing from 0 to 1 in 7 steps": "Exponencial decreasing from 1 to 0 in 8 steps then increasing from 0 to 1 in 7 steps", + "Circle increasing from 0 to 1 at every step": "Circle increasing from 0 to 1 at every step", + "Circle": "Circle", + "CircleIn": "CircleIn", + "CircleOut": "CircleOut", + "Exponential": "Exponential", + "ExponentialIn": "ExponentialIn", + "ExponentialOut": "ExponentialOut", + "Click to view actions": "Click to view actions", + "Save as SVG": "Save as SVG", + "Save as PNG": "Save as PNG", + "View Source": "View Source", + "View Compiled Vega": "View Compiled Vega", + "Open in Vega Editor": "Open in Vega Editor", + "Save intermediate images": "Save intermediate images", + "sd_save_intermediate_images": "sd_save_intermediate_images", + "https://github.com/AlUlkesh/sd_save_intermediate_images": "https://github.com/AlUlkesh/sd_save_intermediate_images", + "8115a847 (Mon Mar 27 13:58:26 2023)": "8115a847 (Mon Mar 27 13:58:26 2023)", + "Also save final image with intermediates": "Also save final image with intermediates", + "Type of images to be saved": "Type of images to be saved", + "Denoised": "Denoised", + "Noisy": "Noisy", + "Save every N images": "Save every N images", + "Start at N images (must be 0 = start at the beginning or a multiple of 'Save every N images')": "Start at N images (must be 0 = start at the beginning or a multiple of 'Save every N images')", + "Stop at N images (must be 0 = don't stop early or a multiple of 'Save every N images')": "Stop at N images (must be 0 = don't stop early or a multiple of 'Save every N images')", + "Make a video file": "Make a video file", + "Save intermediates with image number as suffix": "Save intermediates with image number as suffix", + "Standard operation": "Standard operation", + "mp4 parameters": "mp4 parameters", + "h264": "h264", + "h265/hevc": "h265/hevc", + "av1": "av1", + "Display last image for additional frames at the beginning": "Display last image for additional frames at the beginning", + "Display last image for additional frames at the end": "Display last image for additional frames at the end", + "Smoothing / Interpolate": "Smoothing / Interpolate", + "Approx. how many seconds should the video run?": "Approx. how many seconds should the video run?", + "fps >= 30 recommended, caution: generates large gif-files": "fps >= 30 recommended, caution: generates large gif-files", + "lores": "lores", + "hires": "hires", + "Create Windows ffmpeg bat-files for lores and hires. Changes numbering logic.": "Create Windows ffmpeg bat-files for lores and hires. Changes numbering logic.", + "Only bat-files, no video": "Only bat-files, no video", + "Outpaint": "Outpaint", + "Post proccess": "Post proccess", + "Total Outpaint Steps": "Total Outpaint Steps", + "The more it is, the longer your videos will be": "The more it is, the longer your videos will be", + "outpaint steps": "outpaint steps", + "Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic, Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2) ": "Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic, Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2) ", + "Export prompts": "Export prompts", + "Import prompts": "Import prompts", + "Clear prompts": "Clear prompts", + "Output Width": "Output Width", + "Output Height": "Output Height", + "Guidance Scale": "Guidance Scale", + "Sampling Steps for each outpaint": "Sampling Steps for each outpaint", + "custom initial image": "custom initial image", + "custom exit image": "custom exit image", + "Batch Count": "Batch Count", + "Zoom mode": "Zoom mode", + "Zoom-out": "Zoom-out", + "Zoom-in": "Zoom-in", + "number of start frame dupe": "number of start frame dupe", + "Frames to freeze at the start of the video": "Frames to freeze at the start of the video", + "number of last frame dupe": "number of last frame dupe", + "Frames to freeze at the end of the video": "Frames to freeze at the end of the video", + "Zoom Speed": "Zoom Speed", + "Zoom speed in seconds (higher values create slower zoom)": "Zoom speed in seconds (higher values create slower zoom)", + "Denoising Strength": "Denoising Strength", + "Inpaint Full Resolution": "Inpaint Full Resolution", + "masked padding": "masked padding", + "Enable Upscale": "Enable Upscale", + "Upscale by factor": "Upscale by factor", + "Path where to store your infinite video. Default is Outputs": "Path where to store your infinite video. Default is Outputs", + "Which subfolder name to be created in the outpath. Default is 'infinite-zooms'": "Which subfolder name to be created in the outpath. Default is 'infinite-zooms'", + "Default width of your video": "Default width of your video", + "Default height your video": "Default height your video", + "Writing videos has dependency to an existing FFPROBE executable on your machine. D/L here (https://github.com/BtbN/FFmpeg-Builds/releases) your OS variant and point to your installation path": "Writing videos has dependency to an existing FFPROBE executable on your machine. D/L here (https://github.com/BtbN/FFmpeg-Builds/releases) your OS variant and point to your installation path", + "Name of your desired model to render keyframes (txt2img)": "Name of your desired model to render keyframes (txt2img)", + "Name of your desired inpaint model (img2img-inpaint). Default is vanilla sd-v1-5-inpainting.ckpt": "Name of your desired inpaint model (img2img-inpaint). Default is vanilla sd-v1-5-inpainting.ckpt", + "Default prompt-setup to start with'": "Default prompt-setup to start with'", + "infinite-zoom-automatic1111-webui": "infinite-zoom-automatic1111-webui", + "https://github.com/v8hid/infinite-zoom-automatic1111-webui": "https://github.com/v8hid/infinite-zoom-automatic1111-webui", + "28a1a0f4 (Sat Apr 29 11:45:36 2023)": "28a1a0f4 (Sat Apr 29 11:45:36 2023)", + "Resume": "Resume", + "No Rows To Show": "No Rows To Show", + "of": "of", + "Disable queue auto-processing": "Disable queue auto-processing", + "Queue button placement": "Queue button placement", + "Under Generate button": "Under Generate button", + "Between Prompt and Generate button": "Between Prompt and Generate button", + "Hide the checkpoint dropdown": "Hide the checkpoint dropdown", + "Auto delete queue history (bookmarked tasks excluded)": "Auto delete queue history (bookmarked tasks excluded)", + "7 days": "7 days", + "14 days": "14 days", + "30 days": "30 days", + "90 days": "90 days", + "Keep forever": "Keep forever", + "Enqueue keyboard shortcut": "Enqueue keyboard shortcut", + "Key": "Key", + "Disable keyboard shortcut": "Disable keyboard shortcut", + "Task queue UI placement": "Task queue UI placement", + "As a tab": "As a tab", + "Append to main UI": "Append to main UI", + "sd-webui-agent-scheduler": "sd-webui-agent-scheduler", + "https://github.com/ArtVentureX/sd-webui-agent-scheduler": "https://github.com/ArtVentureX/sd-webui-agent-scheduler", + "[name]": "[name]", + "[extension]": "[extension]", + "[hash:]": "[hash:]", + "sha1, blake2s, shake_256, sha256, md5-sha1, sha512_256, shake_128, mdc2, ripemd160, whirlpool, md5, sha3_384, sha512, sha3_512, blake2b, sha224, sm3, sha512_224, sha3_224, sha384, md4, sha3_256": "sha1, blake2s, shake_256, sha256, md5-sha1, sha512_256, shake_128, mdc2, ripemd160, whirlpool, md5, sha3_384, sha512, sha3_512, blake2b, sha224, sm3, sha512_224, sha3_224, sha384, md4, sha3_256", + "[output_extension]": "[output_extension]", + "[name].[output_extension]": "[name].[output_extension]", + "Original file's hash (good for deleting duplication)": "Original file's hash (good for deleting duplication)", + "[hash:sha1].[output_extension]": "[hash:sha1].[output_extension]", + "default.json": "default.json", + "wd14-convnext": "wd14-convnext", + "wd14-convnext-v2": "wd14-convnext-v2", + "wd14-convnext-v2-git": "wd14-convnext-v2-git", + "wd14-swinv2-v2": "wd14-swinv2-v2", + "wd14-swinv2-v2-git": "wd14-swinv2-v2-git", + "wd14-vit": "wd14-vit", + "wd14-vit-v2": "wd14-vit-v2", + "wd14-vit-v2-git": "wd14-vit-v2-git", + "stable-diffusion-webui-wd14-tagger": "stable-diffusion-webui-wd14-tagger", + "https://github.com/toriato/stable-diffusion-webui-wd14-tagger.git": "https://github.com/toriato/stable-diffusion-webui-wd14-tagger.git", + "Leave blank to use same filename as original.": "Leave blank to use same filename as original.", + "NAIConvert": "NAI轉換", + "https://github.com/animerl/novelai-2-local-prompt.git": "https://github.com/animerl/novelai-2-local-prompt.git", + "Resblock": "Resblock", + "Transformer": "Transformer", + "S. Attn.": "S. Attn.", + "X. Attn.": "X. Attn.", + "LLuL Enabled": "LLuL Enabled", + "LLuL Multiply": "LLuL Multiply", + "LLuL Weight": "LLuL Weight", + "LLuL Layers": "LLuL Layers", + "LLuL Apply to": "LLuL Apply to", + "LLuL Start steps": "LLuL Start steps", + "LLuL Max steps": "LLuL Max steps", + "LLuL Upscaler": "LLuL Upscaler", + "LLuL Upscaler AA": "LLuL Upscaler AA", + "LLuL Downscaler": "LLuL Downscaler", + "LLuL Downscaler AA": "LLuL Downscaler AA", + "LLuL Interpolation method": "LLuL Interpolation method", + "sd-webui-llul": "sd-webui-llul", + "https://github.com/hnmr293/sd-webui-llul.git": "https://github.com/hnmr293/sd-webui-llul.git", + "Localization dirs": "本地化路徑", + "sd-webui-bilingual-localization": "sd-webui-bilingual-localization", + "https://github.com/journey-ad/sd-webui-bilingual-localization.git": "https://github.com/journey-ad/sd-webui-bilingual-localization.git", + "Most frequent tags in captions": "訓練用描述最常用的標記", + "Repeats": "重複", + "Total Images": "圖像總數", + "sd-webui-additional-networks": "sd-webui-additional-networks", + "https://github.com/kohya-ss/sd-webui-additional-networks.git": "https://github.com/kohya-ss/sd-webui-additional-networks.git", + "Tokenizer": "標記解析器", + "Before your text is sent to the neural network, it gets turned into numbers in a process called tokenization. These tokens are how the neural network reads and interprets text. Thanks to our great friends at Shousetsu愛 for inspiration for this feature.": "在你的文本被發送到神經網路之前,它在一個稱為標記化的過程中被轉化為數字。這些標記是神經網路閱讀和解釋文本的方式。感謝我們的好朋友 Shousetsu愛 為這個功能帶來的靈感", + "ID input": "ID 輸入", + "Tokens": "標記", + "stable-diffusion-webui-tokenizer": "stable-diffusion-webui-tokenizer", + "Prompt for tokenization": "給標記化準備的提示詞", + "Ids for tokenization (example: 9061, 631, 736)": "用於標記化的 ID(例:9061,631,736)", + "https://github.com/AUTOMATIC1111/stable-diffusion-webui-tokenizer.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-tokenizer.git", + "json": "json", + "openpose-editor": "openPose 編輯器", + "https://github.com/fkunn1326/openpose-editor.git": "https://github.com/fkunn1326/openpose-editor.git", "Artists To Study": "藝術家圖庫", "dog": "dog", "house": "house", @@ -6066,7 +9606,6 @@ "spaceship-c": "spaceship-c", "spaceship-n": "spaceship-n", "artists to study extension by camenduru |": "artists to study extension by camenduru |", - "github": "github", "twitter": "twitter", "youtube": "youtube", "hi-res images": "hi-res images", @@ -6075,13 +9614,5 @@ "| License: Attribution 4.0 International (CC BY 4.0)": "| License: Attribution 4.0 International (CC BY 4.0)", "stable-diffusion-webui-artists-to-study": "stable-diffusion-webui-artists-to-study", "https://github.com/camenduru/stable-diffusion-webui-artists-to-study.git": "https://github.com/camenduru/stable-diffusion-webui-artists-to-study.git", - "stable-diffusion-webui-two-shot": "stable-diffusion-webui-two-shot", - "Divisions": "分割", - "Positions": "位置", - "Weights": "權重", - "end at this step": "在此疊代步數停止", - "Visualize": "視覺化", - "Regions": "區域", - "Extra generation params": "附加生成參數", - "https://github.com/opparco/stable-diffusion-webui-two-shot.git": "https://github.com/opparco/stable-diffusion-webui-two-shot.git" + "https://github.com/Bing-su/sd-webui-tunnels.git": "https://github.com/Bing-su/sd-webui-tunnels.git" } \ No newline at end of file