Integrate ComfyUI support in txt2img mode

pull/369/head
Abdullah Alfaraj 2023-11-04 19:12:36 +03:00
parent fe42e8e29c
commit 19a0a6cfac
9 changed files with 1186 additions and 13 deletions

View File

@ -70,6 +70,7 @@ const {
logger,
toJS,
viewer,
viewer_util,
preview,
// session_ts,
session_store,
@ -95,7 +96,9 @@ const {
api_ts,
comfyui,
comfyui_util,
comfyui_main_ui,
diffusion_chain,
comfyapi,
} = require('./typescripts/dist/bundle')
const io = require('./utility/io')
@ -535,8 +538,6 @@ let g_selection = {}
let g_b_use_smart_object = true // true to keep layer as smart objects, false to rasterize them
let g_sd_options_obj = new sd_options.SdOptions()
g_sd_options_obj.getOptions()
let g_controlnet_max_models
let g_generation_session = new session.GenerationSession(0) //session manager

View File

@ -0,0 +1,238 @@
import txt2img from './txt2img_workflow_v0.0.2.json'
import txt2img_api from './txt2img_api_v0.0.2.json'
import vae_settings from '../settings/vae'
import sd_tab_util from '../sd_tab/util'
import comfyui_util from './util'
import util from './util'
import { store } from './util'
import { base64UrlToBase64, copyJson } from '../util/ts/general'
// Function to parse metadata from a title string
function parseMetadata(title: string) {
if (!title) return {}
// Split the title into parts using the "|" character and trim whitespace from each part
var parts = title.split('|').map((part) => part.trim())
// Take the last part as the metadata
var metadataPart = parts[parts.length - 1]
// Initialize an empty object to store the key-value pairs
var result: Record<string, string> = {}
// If there is more than one part, there is metadata to parse
if (parts.length > 1) {
// Split the metadata into pairs using the "," character and trim whitespace from each pair
var pairs = metadataPart.split(',').map((pair) => pair.trim())
// For each pair...
for (var i = 0; i < pairs.length; i++) {
// If the pair includes a ":" character, it is a key-value pair
if (pairs[i].includes(':')) {
// Split the pair into a key and a value using the ":" character and trim whitespace
var pair = pairs[i].split(':').map((part) => part.trim())
// Add the key-value pair to the result object
result[pair[0]] = pair[1]
}
}
}
// Return the result object containing the key-value pairs
return result
}
// Example usage:
var title = 'original title | key1:value1, key2:value2'
console.log(parseMetadata(title)) // Output: {key1: 'value1', key2: 'value2'}
const nodes = txt2img.nodes
function getInput(node: any, name: string) {
const input = node.inputs.filter((input: any) => {
return input?.widget?.name === name
})?.[0]
return input
}
function getNode(nodes: any[], node_id: string) {
const node = nodes.filter((node: any) => {
return parseInt(node.id) === parseInt(node_id)
})?.[0]
return node
}
function getNodeByNameId(nodes: any[], node_name_id: string) {
const node = nodes.filter((node: any) => {
const node_metadata = parseMetadata(node.title)
return node_metadata.id === node_name_id
})?.[0]
return node
}
function getPromptNodeByNameId(
nodes: any[], //nodes from workflow.json
prompt: any, //prompt from api.json
node_name_id: string // name_id I'm using to get access to nodes by their name
) {
const node = getNodeByNameId(nodes, node_name_id)
const prompt_node = node?.id ? prompt[node.id] : {}
return prompt_node
}
function setInputValue(
nodes: any[],
prompt: any,
node_name_id: string,
input_name: string,
new_value: any
) {
const prompt_node = getPromptNodeByNameId(nodes, prompt, node_name_id)
prompt_node.inputs[input_name] = new_value
}
function getLink(links: any[], link_id: number) {
return links.filter((link: any) => {
return link[0] === link_id
})?.[0]
}
function getNodesFromLink(link: any) {
return {
from_node: { id: link[1], input_index: link[2] },
to_node: { id: link[3], input_index: link[4] },
}
}
const txt2img_map: Record<string, any> = {
model: 'checkpoint.ckpt_name',
vae: 'vae.vae_name',
width: 'latent_image.width',
height: 'latent_image.height',
batch_size: 'latent_image.batch_size',
prompt: 'positive_prompt.text',
negative_prompt: 'negative_prompt.text',
//sampler node
seed: 'sampler.seed',
steps: 'sampler.steps',
cfg: 'sampler.cfg',
sampler_index: 'sampler.sampler_name',
// scheduler: 'normal',
// denoise: 'sampler.denoise', // keep it at default value 1.0
//hires_node node:
hr_scale: 'scaler.scale_by',
upscale_method: 'nearest_exact',
hr_seed: 'hires_sampler.seed',
hr_second_pass_steps: 'hires_sampler.steps',
// hr_cfg: 'hires_sampler.cfg', // keep at default value 0.5
// hr_sampler_name: 'hires_sampler.sampler_name',
// hr_scheduler: 'normal',
hr_denoising_strength: 'hires_sampler.denoise',
}
function addMissingSettings(plugin_settings: Record<string, any>) {
plugin_settings['vae'] = vae_settings.store.data.current_vae
plugin_settings['model'] = sd_tab_util.store.data.selected_model
plugin_settings['hr_denoising_strength'] =
sd_tab_util.store.data.hr_denoising_strength
plugin_settings['hr_sampler_name'] = sd_tab_util.store.data.sampler_name // use the same sampler for the first and second pass (hires) upscale sampling steps
//calculate positive random seed if seed is -1
const random_seed: bigint = util.getRandomBigIntApprox(
0n,
18446744073709552000n
)
plugin_settings['seed'] =
parseInt(plugin_settings['seed']) === -1
? random_seed.toString()
: plugin_settings['seed'] // use the same as the main seed
plugin_settings['hr_seed'] = plugin_settings['seed']
return plugin_settings
}
function mapPluginSettingsToComfyuiPrompt(
nodes: any[],
prompt: any,
plugin_settings: any
) {
// const plugin_param = 'steps'
plugin_settings = addMissingSettings(plugin_settings)
function mapPluginInputToComfyInput(plugin_param: string) {
if (plugin_settings[plugin_param]) {
const [node_name_id, input_name] =
txt2img_map[plugin_param].split('.')
setInputValue(
nodes,
prompt,
node_name_id,
input_name,
plugin_settings[plugin_param]
)
}
}
Object.keys(txt2img_map).map((plugin_parm: string) => {
mapPluginInputToComfyInput(plugin_parm)
})
return prompt
}
// comfyui_util.postPromptAndGetBase64JsonResult(
// comfyui_util.store.data.comfy_server,
// prompt
// )
// const plugin_settings: Record<string, any> = {}
// const prompt = txt2img_api
// const node_name_id = 'sampler'
// const node = getNodeByNameId(txt2img.nodes, node_name_id)
// console.log('node: ', node)
// prompt[3].inputs.steps = plugin_settings.steps
// setInputValue(txt2img.nodes, prompt, 'sampler', 'seed', 3)
async function generateComfyTxt2img(
plugin_settings: any
): Promise<{ image_base64_list: string[]; image_url_list: string[] }> {
// Your function implementation goes here
const prompt = mapPluginSettingsToComfyuiPrompt(
txt2img.nodes,
txt2img_api,
plugin_settings
)
const final_prompt = copyJson(prompt)
if (!plugin_settings['enable_hr']) {
//get node_id
const hire_output_node = getNodeByNameId(txt2img.nodes, 'hires_output')
delete final_prompt[hire_output_node.id]
}
const outputs = await comfyui_util.postPromptAndGetBase64JsonResult(
store.data.comfy_server,
final_prompt
)
let image_url_list: string[] = []
let image_base64_list: string[] = []
if (outputs) {
image_url_list = Object.values(outputs).flat()
image_base64_list = image_url_list.map((image_url) => {
return base64UrlToBase64(image_url)
})
}
return { image_base64_list, image_url_list }
}
export default {
parseMetadata,
getNode,
getInput,
getLink,
getNodesFromLink,
getNodeByNameId,
mapPluginSettingsToComfyuiPrompt,
getPromptNodeByNameId,
setInputValue,
addMissingSettings,
generateComfyTxt2img,
txt2img,
txt2img_api,
}

View File

@ -0,0 +1,153 @@
{
"3": {
"inputs": {
"seed": 8216905740393,
"steps": 12,
"cfg": 8,
"sampler_name": "dpmpp_sde",
"scheduler": "normal",
"denoise": 1,
"model": [
"16",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "masterpiece HDR victorian portrait painting of woman, blonde hair, mountain nature, blue sky\n",
"clip": [
"16",
1
]
},
"class_type": "CLIPTextEncode"
},
"7": {
"inputs": {
"text": "bad hands, text, watermark\n",
"clip": [
"16",
1
]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"57",
0
]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
},
"11": {
"inputs": {
"seed": 481858222412057,
"steps": 14,
"cfg": 8,
"sampler_name": "dpmpp_2m",
"scheduler": "simple",
"denoise": 0.5,
"model": [
"16",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"55",
0
]
},
"class_type": "KSampler"
},
"12": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"13",
0
]
},
"class_type": "SaveImage"
},
"13": {
"inputs": {
"samples": [
"11",
0
],
"vae": [
"57",
0
]
},
"class_type": "VAEDecode"
},
"16": {
"inputs": {
"ckpt_name": "cardosAnime_v20.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"55": {
"inputs": {
"upscale_method": "nearest-exact",
"scale_by": 2,
"samples": [
"3",
0
]
},
"class_type": "LatentUpscaleBy"
},
"57": {
"inputs": {
"vae_name": "MoistMix.vae.pt"
},
"class_type": "VAELoader"
}
}

View File

@ -0,0 +1,737 @@
{
"last_node_id": 57,
"last_link_id": 83,
"nodes": [
{
"id": 7,
"type": "CLIPTextEncode",
"pos": [
638.8565730529789,
359.49742516677856
],
"size": {
"0": 425.27801513671875,
"1": 180.6060791015625
},
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 58
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
6,
13
],
"slot_index": 0
}
],
"title": "CLIP Text Encode (Prompt) | id:negative_prompt",
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"bad hands, text, watermark\n"
]
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
1238.8565730529783,
603.497425166778
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 36
},
{
"name": "vae",
"type": "VAE",
"link": 82
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
9
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 5,
"type": "EmptyLatentImage",
"pos": [
391,
-284
],
"size": {
"0": 315,
"1": 106
},
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
39
],
"slot_index": 0
}
],
"title": "Empty Latent Image | id:latent_image",
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
512,
512,
1
]
},
{
"id": 56,
"type": "Reroute",
"pos": [
-23,
541
],
"size": [
75,
26
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "",
"type": "*",
"link": 81
}
],
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
82,
83
],
"slot_index": 0
}
],
"properties": {
"showOutputText": true,
"horizontal": false
}
},
{
"id": 57,
"type": "VAELoader",
"pos": [
-455,
583
],
"size": {
"0": 315,
"1": 58
},
"flags": {},
"order": 1,
"mode": 0,
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [
81
],
"shape": 3,
"slot_index": 0
}
],
"title": "Load VAE | id:vae",
"properties": {
"Node name for S&R": "VAELoader"
},
"widgets_values": [
"MoistMix.vae.pt"
]
},
{
"id": 16,
"type": "CheckpointLoaderSimple",
"pos": [
-443,
369
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 2,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
59,
60
],
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
57,
58
],
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": [],
"slot_index": 2
}
],
"title": "Load Checkpoint | id:checkpoint",
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"cardosAnime_v20.safetensors"
]
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
637,
123
],
"size": {
"0": 422.84503173828125,
"1": 164.31304931640625
},
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 57
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
4,
12
],
"slot_index": 0
}
],
"title": "CLIP Text Encode (Prompt) | id:positive_prompt",
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"masterpiece HDR victorian portrait painting of woman, blonde hair, mountain nature, blue sky\n"
]
},
{
"id": 3,
"type": "KSampler",
"pos": [
1057,
-262
],
"size": {
"0": 315,
"1": 262
},
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 59
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 4
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 6
},
{
"name": "latent_image",
"type": "LATENT",
"link": 39
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
36,
78
],
"slot_index": 0
}
],
"title": "KSampler | id:sampler",
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
8216905740393,
"randomize",
12,
8,
"dpmpp_sde",
"normal",
1
]
},
{
"id": 55,
"type": "LatentUpscaleBy",
"pos": [
1465,
-133
],
"size": {
"0": 315,
"1": 82
},
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 78
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
79
],
"shape": 3,
"slot_index": 0
}
],
"title": "Upscale Latent By | id:scaler",
"properties": {
"Node name for S&R": "LatentUpscaleBy"
},
"widgets_values": [
"nearest-exact",
2
]
},
{
"id": 13,
"type": "VAEDecode",
"pos": [
1961,
125
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 15
},
{
"name": "vae",
"type": "VAE",
"link": 83
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
17
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 11,
"type": "KSampler",
"pos": [
1570,
130
],
"size": {
"0": 315,
"1": 262
},
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 60,
"slot_index": 0
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 12,
"slot_index": 1
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 13,
"slot_index": 2
},
{
"name": "latent_image",
"type": "LATENT",
"link": 79,
"slot_index": 3
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
15
],
"slot_index": 0
}
],
"title": "KSampler | id:hires_sampler",
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
481858222412057,
"randomize",
14,
8,
"dpmpp_2m",
"simple",
0.5
]
},
{
"id": 12,
"type": "SaveImage",
"pos": [
2240,
123
],
"size": {
"0": 407.53717041015625,
"1": 468.13226318359375
},
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 17
}
],
"title": "Save Image | id:hires_output",
"properties": {},
"widgets_values": [
"ComfyUI"
]
},
{
"id": 9,
"type": "SaveImage",
"pos": [
1499,
603
],
"size": {
"0": 232.94032287597656,
"1": 282.4336242675781
},
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 9
}
],
"properties": {},
"widgets_values": [
"ComfyUI"
]
}
],
"links": [
[
4,
6,
0,
3,
1,
"CONDITIONING"
],
[
6,
7,
0,
3,
2,
"CONDITIONING"
],
[
9,
8,
0,
9,
0,
"IMAGE"
],
[
12,
6,
0,
11,
1,
"CONDITIONING"
],
[
13,
7,
0,
11,
2,
"CONDITIONING"
],
[
15,
11,
0,
13,
0,
"LATENT"
],
[
17,
13,
0,
12,
0,
"IMAGE"
],
[
36,
3,
0,
8,
0,
"LATENT"
],
[
39,
5,
0,
3,
3,
"LATENT"
],
[
57,
16,
1,
6,
0,
"CLIP"
],
[
58,
16,
1,
7,
0,
"CLIP"
],
[
59,
16,
0,
3,
0,
"MODEL"
],
[
60,
16,
0,
11,
0,
"MODEL"
],
[
78,
3,
0,
55,
0,
"LATENT"
],
[
79,
55,
0,
11,
3,
"LATENT"
],
[
81,
57,
0,
56,
0,
"*"
],
[
82,
56,
0,
8,
1,
"VAE"
],
[
83,
56,
0,
13,
1,
"VAE"
]
],
"groups": [
{
"title": "Txt2Img",
"bounding": [
442,
-7,
1211,
708
],
"color": "#a1309b",
"font_size": 24,
"locked": false
},
{
"title": "Save Intermediate Image",
"bounding": [
1225,
500,
516,
196
],
"color": "#3f789e",
"font_size": 24,
"locked": false
},
{
"title": "Hires Fix",
"bounding": [
1210,
21,
710,
464
],
"color": "#b58b2a",
"font_size": 24,
"locked": false
},
{
"title": "Save Final Image",
"bounding": [
1949,
31,
483,
199
],
"color": "#3f789e",
"font_size": 24,
"locked": false
}
],
"config": {},
"extra": {},
"version": 0.4
}

View File

@ -6,12 +6,13 @@ export * as control_net from './controlnet/entry'
export * as after_detailer_script from './after_detailer/after_detailer'
export * as ultimate_sd_upscaler from './ultimate_sd_upscaler/ultimate_sd_upscaler'
export * as scripts from './ultimate_sd_upscaler/scripts'
export * as main from './main/main'
export * as controlnet_main from './controlnet/main'
export * as logger from './util/logger'
export * as image_search from './image_search/image_search'
export * as history from './history/history'
export * as viewer from './viewer/viewer'
export { default as viewer_util } from './viewer/viewer_util'
export * as session_ts from './session/session'
export { store as session_store } from './session/session_store'
export { store as sd_tab_store } from './sd_tab/util'
@ -41,5 +42,6 @@ export * as comfyui from './comfyui/comfyui'
export { toJS } from 'mobx'
export { default as node_fs } from 'fs'
export { default as comfyui_util } from './comfyui/util'
export { default as comfyui_main_ui } from './comfyui/main_ui'
export * as diffusion_chain from 'diffusion-chain'
export { default as comfyapi } from './comfyui/comfyapi'

View File

@ -4,7 +4,8 @@ import * as scripts from '../ultimate_sd_upscaler/scripts'
import * as control_net from '../controlnet/entry'
import { store as session_store } from '../session/session_store'
import sd_tab_util from '../sd_tab/util'
import settings_tab from '../settings/settings'
import comfyui_main_ui from '../comfyui/main_ui'
import {
html_manip,
io,
@ -268,13 +269,28 @@ export class Txt2ImgMode extends Mode {
response_json = await this.requestControlNetTxt2Img(settings)
} else {
response_json = await this.requestTxt2Img(settings)
if (
settings_tab.store.data.selected_backend === 'Automatic1111'
) {
response_json = await this.requestTxt2Img(settings) //this is automatic1111 txt2img
} else if (
settings_tab.store.data.selected_backend === 'ComfyUI'
) {
//request Txt2Img from comfyui
const { image_base64_list, image_url_list } =
await comfyui_main_ui.generateComfyTxt2img(settings)
output_images = image_base64_list
}
}
output_images = await this.processOutput(
response_json.images_info,
settings
)
if (settings_tab.store.data.selected_backend === 'Automatic1111') {
output_images = await this.processOutput(
response_json.images_info,
settings
)
} else if (settings_tab.store.data.selected_backend === 'ComfyUI') {
// output_images = image_base64_list
}
} catch (e) {
console.warn(e)
console.warn('output_images: ', output_images)

View File

@ -26,7 +26,7 @@ import {
updateViewerStoreImageAndThumbnail,
} from '../viewer/viewer_util'
import { sd_tab_store } from '../stores'
import settings_tab from '../settings/settings'
declare let g_inpaint_mask_layer: any
declare const g_image_not_found_url: string
declare let g_current_batch_index: number
@ -395,7 +395,11 @@ export class Session {
}
static async getProgress() {
// Progress.startSudoProgress()
progress.Progress.startTimer(async () => {
const comfyProgress = async () => {
progress.store.data.progress_value += 1
}
const auto1111Progress = async () => {
try {
let json = await progress.requestProgress()
const can_update = progress.store.data.can_update
@ -422,7 +426,12 @@ export class Session {
} catch (e) {
console.warn(e)
}
}, 2000)
}
const [callback, timer] =
settings_tab.store.data.selected_backend === 'Automatic1111'
? [auto1111Progress, 2000]
: [comfyProgress, 1000]
progress.Progress.startTimer(callback, timer)
}
static async endProgress() {
await progress.Progress.endTimer(async () => {

View File

@ -49,3 +49,14 @@ export function autoResize(textarea: any, text_content: string, delay = 300) {
export async function urlToCanvas(url: string, image_name = 'image.png') {
await io.IO.urlToLayer(url, image_name)
}
export const copyJson = (originalObject: any) =>
JSON.parse(JSON.stringify(originalObject))
export function base64ToBase64Url(base64_image: string) {
return 'data:image/png;base64,' + base64_image
}
export function base64UrlToBase64(base64_url: string) {
const base64_image = base64_url.replace('data:image/png;base64,', '')
return base64_image
}

View File

@ -136,3 +136,9 @@ export const resetViewer = () => {
init_store.data.images = []
init_store.data.thumbnails = []
}
export default {
store,
init_store,
mask_store,
}