Update for Merged Changes

- key frames not yet integrated
exit_image
Charles Fettinger 2023-05-02 13:49:06 -07:00
parent 02571a52cd
commit acb7fc3feb
7 changed files with 82 additions and 91 deletions

View File

@ -117,6 +117,7 @@ def putPrompts(files):
)
asyncio.run(
showGradioErrorAsync("Loading your prompts failed. It seems to be invalid. Your prompt table has been preserved.",5)
)
return [gr.Textbox.update(), gr.DataFrame.update(), gr.Textbox.update(),gr.Textbox.update()]

View File

@ -23,7 +23,7 @@ def shrink_and_paste_on_blank(current_image, mask_width, mask_height):
# resize and paste onto blank image
prev_image = current_image.resize((new_width, new_height))
blank_image = Image.new("RGBA", (width, height), (0, 0, 0, 1))
blank_image = Image.new("RGBA", (width, height), (0, 0, 0, 0))
blank_image.paste(prev_image, (mask_width, mask_height))
return blank_image
@ -66,12 +66,22 @@ def apply_alpha_mask(image, mask_image):
Image: A PIL Image object of the input image with the applied alpha mask.
"""
# Resize the mask to match the current image size
mask_image = mask_image.resize(image.size)
mask_image = resize_and_crop_image(mask_image, image.width, image.height)
# Apply the mask as the alpha layer of the current image
result_image = image.copy()
result_image.putalpha(mask_image.convert('L')) # convert to grayscale
return result_image
def convert_to_rgba(images):
rgba_images = []
for img in images:
if img.mode == 'RGB':
rgba_img = img.convert('RGBA')
rgba_images.append(rgba_img)
else:
rgba_images.append(img)
return rgba_images
def resize_image_with_aspect_ratio(image: Image, basewidth: int = 512, baseheight: int = 512) -> Image:
"""
Resizes an image while maintaining its aspect ratio. This may not fill the entire image height.
@ -99,7 +109,7 @@ def resize_image_with_aspect_ratio(image: Image, basewidth: int = 512, baseheigh
hsize = int((float(orig_height) * float(wpercent)))
# Resize the image with Lanczos resampling filter
resized_image = image.resize((basewidth, hsize), resample=Image.LANCZOS)
resized_image = image.resize((basewidth, hsize), resample=Image.Resampling.LANCZOS)
# If the height of the resized image is still larger than the given baseheight,
# then crop the image from the top and bottom to match the baseheight
@ -153,7 +163,7 @@ def resize_and_crop_image(image: Image, new_width: int = 512, new_height: int =
top_offset = (resized_height - new_height) // 2
# Resize the image with Lanczos resampling filter
resized_image = image.resize((resized_width, resized_height), resample=Image.LANCZOS)
resized_image = image.resize((resized_width, resized_height), resample=Image.Resampling.LANCZOS)
# Crop the image to fill the entire height and width of the new image
cropped_image = resized_image.crop((left_offset, top_offset, left_offset + new_width, top_offset + new_height))
@ -437,6 +447,6 @@ def crop_inner_image(image: Image, width_offset: int, height_offset: int) -> Ima
)
# Resize the cropped image to the original image size using Lanczos resampling
resized_image = cropped_image.resize((width, height), resample=Image.LANCZOS)
resized_image = cropped_image.resize((width, height), resample=Image.Resampling.LANCZOS)
return resized_image

View File

@ -14,7 +14,7 @@ def completeOptionals(j):
if "prompts" in j:
if "headers" in j["prompts"]:
del j["prompts"]["headers"]
j["prompts"]["headers"]=promptTableHeaders
j["prompts"]["headers"]=promptTableHeaders[0]
if "negPrompt" not in j:
j["negPrompt"]=""

View File

@ -56,40 +56,39 @@ def outpaint_steps(
print(print_out)
current_image = main_frames[-1]
# apply available alpha mask of previous image
#if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 0))] != "":
# current_image = apply_alpha_mask(current_image, open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 0))]))
#else:
# #generate automatic alpha mask
# current_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.615 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.89),0.1)
# current_image = apply_alpha_mask(current_image, draw_gradient_ellipse(current_image.width, current_image.height, current_image_gradient_ratio, 0.0, 2.0))
# shrink image to mask size
current_image = shrink_and_paste_on_blank(
current_image, mask_width, mask_height
)
# apply available alpha mask of previous image
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))] != "":
current_image = apply_alpha_mask(current_image, open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))]))
else:
#generate automatic alpha mask
current_image_gradient_ratio = 0.615 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.89),0.1)
current_image = apply_alpha_mask(current_image, draw_gradient_ellipse(current_image.width, current_image.height, current_image_gradient_ratio, 0.0, 3.0).convert("RGB"))
prev_image = shrink_and_paste_on_blank(
current_image, mask_width, mask_height
)
current_image = prev_image
mask_image = np.array(current_image)[:, :, 3]
mask_image = Image.fromarray(255 - mask_image).convert("RGB")
# create mask (black image with white mask_width width edges)
#prev_image = current_image
# inpainting step
current_image = current_image.convert("RGB")
#current_image = current_image.convert("RGB")
#keyframes are not inpainted
paste_previous_image = not prompt_image_is_keyframe[max(k for k in prompt_image_is_keyframe.keys() if k <= (i + 1))]
paste_previous_image = not prompt_image_is_keyframe[max(k for k in prompt_image_is_keyframe.keys() if k <= (i + 0))]
if custom_exit_image and ((i + 1) == outpaint_steps):
current_image = resize_and_crop_image(custom_exit_image, width, height)
main_frames.append(current_image.convert("RGB"))
print("using Custom Exit Image")
save2Collect(current_image, out_config, f"exit_img.png")
save2Collect(main_frames[i], out_config, f"exit_img.png")
else:
if prompt_images[max(k for k in prompt_images.keys() if k <= (i + 1))] == "":
if prompt_images[max(k for k in prompt_images.keys() if k <= (i + 0))] == "":
pr = prompts[max(k for k in prompts.keys() if k <= i)]
processed, seed = renderImg2Img(
f"{common_prompt_pre}\n{pr}\n{common_prompt_suf}".strip(),
@ -109,22 +108,21 @@ def outpaint_steps(
inpainting_padding,
)
if len(processed.images) > 0:
main_frames.append(processed.images[0].convert("RGB"))
main_frames.append(processed.images[0])
save2Collect(processed.images[0], out_config, f"outpain_step_{i}.png")
paste_previous_image = True
else:
# use prerendered image, known as keyframe. Resize to target size
print(f"image {i} is a keyframe")
current_image = open_image(prompt_images[max(k for k in prompt_images.keys() if k <= (i + 1))])
main_frames.append(resize_and_crop_image(current_image, width, height).convert("RGB"))
save2Collect(current_image, out_config, f"key_frame_{i}.png")
current_image = open_image(prompt_images[max(k for k in prompt_images.keys() if k <= (i + 0))])
main_frames.append(resize_and_crop_image(current_image, width, height))
save2Collect(main_frames[i], out_config, f"key_frame_{i}.png")
#seed = newseed
# TODO: seed behavior
# TODO: seed behavior
# paste previous image on top of current image
if frame_correction and inpainting_mask_blur > 0:
corrected_frame = crop_inner_image(
main_frames[i + 1], mask_width, mask_height
@ -142,17 +140,18 @@ def outpaint_steps(
main_frames[i] = corrected_frame
else: #TEST
# apply available alpha mask of previous image
#if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))] != "":
# current_image = apply_alpha_mask(current_image, open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))]))
#else:
# current_image_gradient_ratio = 0.65 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
# current_image = draw_gradient_ellipse(current_image.width, current_image.height, current_image_gradient_ratio, 0.0, 1.8).convert("RGB")
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 0))] != "":
current_image = apply_alpha_mask(main_frames[i], open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 0))]))
else:
current_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.615 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
current_image = apply_alpha_mask(main_frames[i], draw_gradient_ellipse(main_frames[i].width, main_frames[i].height, current_image_gradient_ratio, 0.0, 2.0))
save2Collect(current_image, out_config, f"main_frame_gradient_{i}")
main_frames[i] = current_image
# paste previous image on current image
if paste_previous_image:
current_image.paste(prev_image, mask=prev_image)
#if paste_previous_image:
#current_image.paste(prev_image, mask=prev_image)
return main_frames
return main_frames, processed
def create_zoom(
@ -219,31 +218,6 @@ def create_zoom(
return result
def prepare_output_path():
isCollect = shared.opts.data.get("infzoom_collectAllResources", False)
output_path = shared.opts.data.get("infzoom_outpath", "outputs")
save_path = os.path.join(
output_path, shared.opts.data.get("infzoom_outSUBpath", "infinite-zooms")
)
if isCollect:
save_path = os.path.join(save_path, "iz_collect" + str(int(time.time())))
if not os.path.exists(save_path):
os.makedirs(save_path)
video_filename = os.path.join(
save_path, "infinite_zoom_" + str(int(time.time())) + ".mp4"
)
return {
"isCollect": isCollect,
"save_path": save_path,
"video_filename": video_filename,
}
def save2Collect(img, out_config, name):
if out_config["isCollect"]:
img.save(f'{out_config["save_path"]}/{name}.png')
@ -254,7 +228,7 @@ def frame2Collect(all_frames, out_config):
def prepare_output_path():
isCollect = shared.opts.data.get("infzoom_collectAllResources", False)
output_path = shared.opts.data.get("infzoom_outpath", "output")
output_path = shared.opts.data.get("infzoom_outpath", "outputs")
save_path = os.path.join(
output_path, shared.opts.data.get("infzoom_outSUBpath", "infinite-zooms")
@ -356,9 +330,9 @@ def create_zoom_single(
height = closest_upper_divisible_by_eight(outputsizeH)
current_image = Image.new(mode="RGBA", size=(width, height))
mask_image = np.array(current_image)[:, :, 3]
mask_image = Image.fromarray(255 - mask_image).convert("RGB")
current_image = current_image.convert("RGB")
#mask_image = np.array(current_image)[:, :, 3]
#mask_image = Image.fromarray(255 - mask_image).convert("RGB")
#current_image = current_image.convert("RGB")
current_seed = seed
extra_frames = 0
@ -366,7 +340,7 @@ def create_zoom_single(
current_image = resize_and_crop_image(custom_init_image, width, height)
print("using Custom Initial Image")
save2Collect(current_image, out_config, f"init_custom.png")
processed = Processed(StableDiffusionProcessing(),images_list=[current_image], seed=current_seed, info="init_custom image")
#processed = Processed(StableDiffusionProcessing(),images_list=[current_image], seed=current_seed, info="init_custom image")
else:
if prompt_images[min(k for k in prompt_images.keys() if k >= 0)] == "":
load_model_from_setting(
@ -391,7 +365,7 @@ def create_zoom_single(
current_image = open_image(prompt_images[min(k for k in prompt_images.keys() if k >= 0)])
current_image = resize_and_crop_image(current_image, width, height)
save2Collect(current_image, out_config, f"init_custom.png")
processed = Processed(StableDiffusionProcessing(),images_list=[current_image], seed=current_seed, info="prompt image")
#processed = Processed(StableDiffusionProcessing(),images_list=[current_image], seed=current_seed, info="prompt_0 image")
mask_width = math.trunc(width / 4) # was initially 512px => 128px
mask_height = math.trunc(height / 4) # was initially 512px => 128px
@ -410,7 +384,7 @@ def create_zoom_single(
if custom_exit_image:
extra_frames += 2
main_frames = outpaint_steps(
main_frames, processed = outpaint_steps(
width,
height,
common_prompt_pre,
@ -435,7 +409,12 @@ def create_zoom_single(
mask_width,
mask_height,
custom_exit_image,
False
)
#for k in range(len(main_frames)):
# resize_and_crop_image(main_frames[k], width, height)
all_frames.append(
do_upscaleImg(main_frames[0], upscale_do, upscaler_name, upscale_by)
if upscale_do

View File

@ -7,33 +7,33 @@ default_sampler = "DDIM"
default_cfg_scale = 8
default_mask_blur = 48
default_total_outpaints = 5
promptTableHeaders=["Start at second [0,1,...]", "prompt", "image location", "blend mask", "is keyframe"],
promptTableHeaders = ["Outpaint Steps", "Prompt", "image location", "blend mask", "is keyframe"], ["number", "str", "str", "str", "bool"]
default_prompt = """
{
"prePrompt":"<lora:epiNoiseoffset_v2:0.6> ",
"prePrompt":"(((Best quality))), ((masterpiece)), ",
"prompts":{
"headers":["outpaint steps","prompt","image location","blend mask location", "is keyframe"],
"headers":["Start at second [0,1,...]","prompt","image location","blend mask location", "is keyframe"],
"data":[
[0, "Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic, Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2) <lora:epiNoiseoffset_v2:0.6> ","C:\\path\\to\\image.png", "C:\\path\\to\\mask_image.png", false],
[2, "a Lush jungle","","",false],
[3, "a Thick rainforest","","",false],
[5, "a Verdant canopy","","",false]
[0, "Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic, Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2) <lora:epiNoiseoffset_v2:0.6> ","C:\\\\path\\\\to\\\\image.png", "extensions\\\\infinite-zoom-automatic1111-webui\\\\blends\\\\sun-square.png", true],
[1, "a Lush jungle","","",false],
[2, "a Thick rainforest","","",false],
[4, "a Verdant canopy","","",false]
]
},
"postPrompt": "style by Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2)",
"postPrompt": "style by Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2), <lora:epiNoiseoffset_v2:0.6>",
"negPrompt": "frames, border, edges, borderline, text, character, duplicate, error, out of frame, watermark, low quality, ugly, deformed, blur, bad-artist"
}
"""
empty_prompt = (
'{"prompts":{"data":[],"headers":["outpaint steps","prompt","image location", "blend mask location", "is keyframe"]},"negPrompt":"", prePrompt:"", postPrompt:""}'
'{"prompts":{"data":[0,"","","",false],"headers":["Outpaintg Steps","prompt","image location", "blend mask location", "is keyframe"]},"negPrompt":"", prePrompt:"", postPrompt:""}'
)
invalid_prompt = {
"prompts": {
"data": [[0, "Your prompt-json is invalid, please check Settings","", "", False]],
"headers": ["outpaint steps", "prompt","image location","blend mask location", "is keyframe"],
"headers": ["Start at second [0,1,...]", "prompt","image location","blend mask location", "is keyframe"],
},
"negPrompt": "Invalid prompt-json",
"prePrompt": "Invalid prompt",

View File

@ -51,7 +51,6 @@ def on_ui_tabs():
minimum=2,
maximum=100,
step=1,
value=8,
label="Total video length [s]",
value=default_total_outpaints,
precision=0,
@ -67,12 +66,13 @@ def on_ui_tabs():
)
main_prompts = gr.Dataframe(
type="array",
headers= promptTableHeaders,
datatype=["number", "str", "str", "str", "bool"],
headers=promptTableHeaders[0],
datatype=promptTableHeaders[1],
row_count=1,
col_count=(5, "fixed"),
value=jpr["prompts"],
wrap=True,
elem_id = "infzoom_prompt_table",
)
main_common_prompt_suf = gr.Textbox(
@ -242,8 +242,8 @@ def on_ui_tabs():
label="Upscale by factor",
minimum=1,
maximum=8,
step=0.5,
value=2,
step=0.25,
value=1,
)
with gr.Accordion("Help", open=False):
gr.Markdown(
@ -299,7 +299,7 @@ Our best experience and trade-off is the R-ERSGAn4x upscaler.
fn=checkPrompts, inputs=[main_prompts], outputs=[generate_btn]
)
interrupt.click(fn=shared.state.interrupt(), inputs=[], outputs=[])
interrupt.click(fn=lambda: shared.state.interrupt(), inputs=[], outputs=[])
infinite_zoom_interface.queue()
return [(infinite_zoom_interface, "Infinite Zoom", "iz_interface")]
@ -338,7 +338,8 @@ def check_create_zoom(
if 0 not in keys:
raise gr.Error("Ensure your prompt table has a step 9 (zero) prompt")
return create_zoom( main_common_prompt_pre,
return create_zoom(
main_common_prompt_pre,
main_prompts,
main_common_prompt_suf,
main_negative_prompt,

View File

@ -8,11 +8,11 @@
flex: 0 0 0%;
width: 0;
}
.gradio-container-3-23-0 .gradio-dataframe input {
height: 90%;
width: 90%;
.gradio-container-3-23-0 .gradio-dataframe input, #infzoom_prompt_table input {
height: 95%;
width: 95%;
}
.gradio-container-3-23-0 .gradio-dataframe .scroll-hide {
.gradio-container-3-23-0 .gradio-dataframe .scroll-hide, #infzoom_prompt_table .scroll-hide {
scrollbar-width: unset;
}
#component-2115, #component-2065 .gradio-column:nth-child(1) {