Merge pull request #59 from v8hid/commonPrompt

Common prompt (incl json schema refactory)
pull/75/head
GeorgLegato 2023-04-27 01:19:26 +02:00 committed by GitHub
commit 83a21b5155
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 304 additions and 143 deletions

View File

@ -1,12 +1,11 @@
import math
import os
import json
from jsonschema import validate
import modules.shared as shared
import modules.sd_models
import gradio as gr
from scripts import postprocessing_upscale
from .static_variables import jsonprompt_schemafile
from .prompt_util import readJsonPrompt
import asyncio
def fix_env_Path_ffprobe():
@ -90,36 +89,38 @@ def do_upscaleImg(curImg, upscale_do, upscaler_name, upscale_by):
)
return pp.image
def validatePromptJson_throws(data):
with open(jsonprompt_schemafile, "r") as s:
schema = json.load(s)
validate(instance=data, schema=schema)
async def showGradioErrorAsync(txt, delay=1):
await asyncio.sleep(delay) # sleep for 1 second
raise gr.Error(txt)
def putPrompts(files):
try:
with open(files.name, "r") as f:
file_contents = f.read()
data = json.loads(file_contents)
validatePromptJson_throws(data)
data = readJsonPrompt(file_contents,False)
return [
gr.Textbox.update(data["commonPromptPrefix"]),
gr.DataFrame.update(data["prompts"]),
gr.Textbox.update(data["negPrompt"]),
gr.Textbox.update(data["commonPromptSuffix"]),
gr.Textbox.update(data["negPrompt"])
]
except Exception:
gr.Error(
"loading your prompt failed. It seems to be invalid. Your prompt table is preserved."
)
print(
"[InfiniteZoom:] Loading your prompt failed. It seems to be invalid. Your prompt table is preserved."
)
return [gr.DataFrame.update(), gr.Textbox.update()]
# error only be shown with raise, so ui gets broken.
#asyncio.run(showGradioErrorAsync("Loading your prompts failed. It seems to be invalid. Your prompt table has been preserved.",5))
return [gr.Textbox.update(), gr.DataFrame.update(), gr.Textbox.update(),gr.Textbox.update()]
def clearPrompts():
return [
gr.DataFrame.update(value=[[0, "Infinite Zoom. Start over"]]),
gr.Textbox.update(""),
gr.Textbox.update(""),
gr.Textbox.update("")
]

77
iz_helpers/prompt_util.py Normal file
View File

@ -0,0 +1,77 @@
import json
from jsonschema import validate
from .static_variables import (
empty_prompt,
invalid_prompt,
jsonprompt_schemafile
)
"""
json is valid, but not our current schema.
lets try something.
does it look like something usable?
def fixJson(j):
fixedJ = empty_prompt
try:
if isinstance(j, dict):
if "prompts" in j:
if "data" in j["prompts"]:
if isinstance (j["prompts"]["data"],list):
fixedJ["prompts"]["data"] = j["prompts"]["data"]
if not isinstance (fixedJ["prompts"]["data"][0].
if "headers" not in j["prompts"]:
fixedJ["prompts"]["headers"] = ["outpaint steps","prompt"]
else:
fixedJ["prompts"]["headers"] = j["prompts"]["headers"]
if "negPrompt" in j:
fixedJ["prompts"]["headers"]
if "commonPrompt" in j:
return j
except Exception:
raise "JsonFix: Failed on recovering json prompt"
return j
"""
def fixHeaders(j):
if isinstance(j, dict):
if "prompts" in j:
if "headers" not in j["prompts"]:
j["prompts"]["headers"] = ["outpaint steps","prompt"]
return j
def validatePromptJson_throws(data):
with open(jsonprompt_schemafile, "r") as s:
schema = json.load(s)
try:
validate(instance=data, schema=schema)
except Exception:
raise "Your prompts are not schema valid."
#fixJson(data)
return fixHeaders(data)
def readJsonPrompt(txt, returnFailPrompt=False):
if not txt:
return empty_prompt
try:
jpr = json.loads(txt)
except Exception:
if returnFailPrompt:
print (f"Infinite Zoom: Corrupted Json structure: {txt[:24]} ...")
return invalid_prompt
raise (f"Infinite Zoom: Corrupted Json structure: {txt[:24]} ...")
try:
return validatePromptJson_throws(jpr)
except Exception:
if returnFailPrompt:
return invalid_prompt
pass

View File

@ -1,49 +1,63 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"prompts": {
"type": "object",
"properties": {
"data": {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "1.1",
"type": "object",
"properties": {
"prompts": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "array",
"items": [
{
"oneOf": [
{
"type": "integer",
"minimum": 0
},
{
"type": "string"
}
]
},
{
"type": "string"
}
],
"minItems": 0,
"maxItems": 999,
"uniqueItems": false
},
"minItems": 0
"items": [
{
"oneOf": [
{
"type": "integer",
"minimum": 0
},
{
"type": "string"
}
]
},
{
"type": "string"
}
],
"minItems": 0,
"maxItems": 999,
"uniqueItems": false
},
"headers": {
"type": "array",
"items": {
"type": "string"
},
"minItems": 2
}
"minItems": 0
},
"required": ["data", "headers"]
"headers": {
"type": "array",
"items": {
"type": "string"
},
"minItems": 2
}
},
"negPrompt": {
"type": "string"
}
"required": [
"data"
]
},
"required": ["prompts", "negPrompt"]
}
"negPrompt": {
"type": "string"
},
"commonPromptPrefix": {
"type": "string"
},
"commonPromptSuffix": {
"type": "string"
}
},
"required": [
"prompts",
"negPrompt",
"commonPromptPrefix",
"commonPromptSuffix"
]
}

View File

@ -16,7 +16,9 @@ from .video import write_video
def create_zoom(
common_prompt_pre,
prompts_array,
common_prompt_suf,
negative_prompt,
num_outpainting_steps,
guidance_scale,
@ -46,7 +48,9 @@ def create_zoom(
for i in range(batchcount):
print(f"Batch {i+1}/{batchcount}")
result = create_zoom_single(
common_prompt_pre,
prompts_array,
common_prompt_suf,
negative_prompt,
num_outpainting_steps,
guidance_scale,
@ -76,7 +80,9 @@ def create_zoom(
def create_zoom_single(
common_prompt_pre,
prompts_array,
common_prompt_suf,
negative_prompt,
num_outpainting_steps,
guidance_scale,
@ -139,8 +145,9 @@ def create_zoom_single(
"infzoom_txt2img_model", progress, "Loading Model for txt2img: "
)
pr = prompts[min(k for k in prompts.keys() if k >= 0)]
processed, newseed = renderTxt2Img(
prompts[min(k for k in prompts.keys() if k >= 0)],
f"{common_prompt_pre}\n{pr}\n{common_prompt_suf}".strip(),
negative_prompt,
sampler,
num_inference_steps,
@ -203,8 +210,9 @@ def create_zoom_single(
)
print("using Custom Exit Image")
else:
pr = prompts[max(k for k in prompts.keys() if k <= i)]
processed, newseed = renderImg2Img(
prompts[max(k for k in prompts.keys() if k <= i)],
f"{common_prompt_pre}\n{pr}\n{common_prompt_suf}".strip(),
negative_prompt,
sampler,
num_inference_steps,

View File

@ -4,21 +4,20 @@ import modules.sd_samplers
default_prompt = """
{
"commonPromptPrefix":"<lora:epiNoiseoffset_v2:0.6> ",
"prompts":{
"headers":["outpaint steps","prompt"],
"headers":["outpaint steps","prompt","img"],
"data":[
[0,"Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic, Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2) <lora:epiNoiseoffset_v2:0.6> "]
[0,"Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic"]
]
},
"negPrompt":"frames, borderline, text, character, duplicate, error, out of frame, watermark, low quality, ugly, deformed, blur bad-artist"
"commonPromptSuffix":"style by Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2)",
"negPrompt":"frames, border, edges, borderline, text, character, duplicate, error, out of frame, watermark, low quality, ugly, deformed, blur, bad-artist"
}
"""
available_samplers = [
s.name for s in modules.sd_samplers.samplers if "UniPc" not in s.name
]
empty_prompt = (
'{"prompts":{"data":[],"headers":["outpaint steps","prompt"]},"negPrompt":""}'
'{"prompts":{"data":[],"headers":["outpaint steps","prompt"]},"negPrompt":"", commonPromptPrefix:"", commonPromptSuffix}'
)
invalid_prompt = {
@ -27,7 +26,14 @@ invalid_prompt = {
"headers": ["outpaint steps", "prompt"],
},
"negPrompt": "Invalid prompt-json",
"commonPromptPrefix": "Invalid prompt",
"commonPromptSuffix": "Invalid prompt"
}
available_samplers = [
s.name for s in modules.sd_samplers.samplers if "UniPc" not in s.name
]
current_script_dir = scripts.basedir().split(os.sep)[
-2:
] # contains install and our extension foldername

View File

@ -1,4 +1,3 @@
import json
import gradio as gr
from .run import create_zoom
import modules.shared as shared
@ -6,11 +5,10 @@ from webui import wrap_gradio_gpu_call
from modules.ui import create_output_panel
from .static_variables import (
default_prompt,
empty_prompt,
invalid_prompt,
available_samplers,
)
from .helpers import validatePromptJson_throws, putPrompts, clearPrompts
from .helpers import putPrompts, clearPrompts
from .prompt_util import readJsonPrompt
def on_ui_tabs():
@ -30,24 +28,30 @@ def on_ui_tabs():
with gr.Row():
with gr.Column(scale=1, variant="panel"):
with gr.Tab("Main"):
main_outpaint_steps = gr.Slider(
minimum=2,
maximum=100,
step=1,
value=8,
label="Total Outpaint Steps",
info="The more it is, the longer your videos will be",
)
with gr.Row():
batchcount_slider = gr.Slider(
minimum=1,
maximum=25,
value=shared.opts.data.get("infzoom_batchcount", 1),
step=1,
label="Batch Count",
)
main_outpaint_steps = gr.Slider(
minimum=2,
maximum=100,
step=1,
value=8,
label="Total Outpaint Steps"
)
# safe reading json prompt
pr = shared.opts.data.get("infzoom_defPrompt", default_prompt)
if not pr:
pr = empty_prompt
try:
jpr = json.loads(pr)
validatePromptJson_throws(jpr)
except Exception:
jpr = invalid_prompt
jpr = readJsonPrompt(pr, True)
main_common_prompt_pre = gr.Textbox(
value=jpr["commonPromptPrefix"], label="Common Prompt Prefix"
)
main_prompts = gr.Dataframe(
type="array",
@ -59,6 +63,10 @@ def on_ui_tabs():
wrap=True,
)
main_common_prompt_suf = gr.Textbox(
value=jpr["commonPromptSuffix"], label="Common Prompt Suffix"
)
main_negative_prompt = gr.Textbox(
value=jpr["negPrompt"], label="Negative Prompt"
)
@ -79,12 +87,12 @@ def on_ui_tabs():
exportPrompts_button.click(
None,
_js="exportPrompts",
inputs=[main_prompts, main_negative_prompt],
outputs=None,
inputs=[main_common_prompt_pre, main_prompts, main_common_prompt_suf, main_negative_prompt],
outputs=None
)
importPrompts_button.upload(
fn=putPrompts,
outputs=[main_prompts, main_negative_prompt],
outputs=[main_common_prompt_pre, main_prompts,main_common_prompt_suf , main_negative_prompt],
inputs=[importPrompts_button],
)
@ -97,59 +105,54 @@ def on_ui_tabs():
clearPrompts_button.click(
fn=clearPrompts,
inputs=[],
outputs=[main_prompts, main_negative_prompt],
outputs=[main_prompts, main_negative_prompt, main_common_prompt_pre, main_common_prompt_suf],
)
with gr.Row():
seed = gr.Number(
label="Seed", value=-1, precision=0, interactive=True
)
main_sampler = gr.Dropdown(
label="Sampler",
choices=available_samplers,
value="Euler a",
type="value",
)
with gr.Row():
main_width = gr.Slider(
minimum=16,
maximum=2048,
value=shared.opts.data.get("infzoom_outsizeW", 512),
step=16,
label="Output Width",
)
main_height = gr.Slider(
minimum=16,
maximum=2048,
value=shared.opts.data.get("infzoom_outsizeH", 512),
step=16,
label="Output Height",
)
with gr.Row():
main_guidance_scale = gr.Slider(
minimum=0.1,
maximum=15,
step=0.1,
value=7,
label="Guidance Scale",
)
sampling_step = gr.Slider(
minimum=1,
maximum=100,
step=1,
value=50,
label="Sampling Steps for each outpaint",
)
with gr.Row():
init_image = gr.Image(type="pil", label="custom initial image")
exit_image = gr.Image(type="pil", label="custom exit image")
batchcount_slider = gr.Slider(
minimum=1,
maximum=25,
value=shared.opts.data.get("infzoom_batchcount", 1),
step=1,
label="Batch Count",
)
with gr.Accordion("Render settings"):
with gr.Row():
seed = gr.Number(
label="Seed", value=-1, precision=0, interactive=True
)
main_sampler = gr.Dropdown(
label="Sampler",
choices=available_samplers,
value="Euler a",
type="value",
)
with gr.Row():
main_width = gr.Slider(
minimum=16,
maximum=2048,
value=shared.opts.data.get("infzoom_outsizeW", 512),
step=16,
label="Output Width",
)
main_height = gr.Slider(
minimum=16,
maximum=2048,
value=shared.opts.data.get("infzoom_outsizeH", 512),
step=16,
label="Output Height",
)
with gr.Row():
main_guidance_scale = gr.Slider(
minimum=0.1,
maximum=15,
step=0.1,
value=7,
label="Guidance Scale",
)
sampling_step = gr.Slider(
minimum=1,
maximum=100,
step=1,
value=50,
label="Sampling Steps for each outpaint",
)
with gr.Row():
init_image = gr.Image(type="pil", label="Custom initial image")
exit_image = gr.Image(type="pil", label="Custom exit image")
with gr.Tab("Video"):
video_frame_rate = gr.Slider(
label="Frames per second",
@ -237,7 +240,9 @@ Our best experience and trade-off is the R-ERSGAn4x upscaler.
generate_btn.click(
fn=wrap_gradio_gpu_call(create_zoom, extra_outputs=[None, "", ""]),
inputs=[
main_common_prompt_pre,
main_prompts,
main_common_prompt_suf,
main_negative_prompt,
main_outpaint_steps,
main_guidance_scale,

View File

@ -0,0 +1,50 @@
// mouseover tooltips for various UI elements
infzoom_titles = {
"Batch Count":"How many separate videos to create",
"Total Outpaint Steps":"The more it is, the longer your videos will be. Each step generates frame for 1 second at your FPS, while cycling through your array of prompts",
"Common Prompt Prefix":"Prompt inserted before each step",
"Common Prompt Suffix":"Prompt inserted after each step",
"Negative Prompt":"What your model shall avoid",
"Export prompts": "Downloads a JSON file to save all prompts",
"Import prompts": "Restore Prompts table from a specific JSON file",
"Clear prompts": "Start over, remove all entries from prompt table, prefix, suffix, negative",
"Custom initial image":"An image at the end resp. begin of your movie, depending or ZoomIn or Out",
"Custom exit image":"An image at the end resp. begin of your movie, depending or ZoomIn or Out",
"Zoom Speed":"Varies additional frames per second",
}
onUiUpdate(function(){
gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){
tooltip = infzoom_titles[span.textContent];
if(!tooltip){
tooltip = infzoom_titles[span.value];
}
if(!tooltip){
for (const c of span.classList) {
if (c in infzoom_titles) {
tooltip = infzoom_titles[c];
break;
}
}
}
if(tooltip){
span.title = tooltip;
}
})
gradioApp().querySelectorAll('select').forEach(function(select){
if (select.onchange != null) return;
select.onchange = function(){
select.title = infzoom_titles[select.value] || "";
}
})
})

View File

@ -1,7 +1,7 @@
// Function to download data to a file
function exportPrompts(p, np, filename = "infinite-zoom-prompts.json") {
function exportPrompts(cppre,p, cpsuf,np, filename = "infinite-zoom-prompts.json") {
let J = { prompts: p, negPrompt: np }
let J = { prompts: p, negPrompt: np, commonPromptPrefix: cppre, commonPromptSuffix: cpsuf }
var file = new Blob([JSON.stringify(J)], { type: "text/csv" });
if (window.navigator.msSaveOrOpenBlob) // IE10+