Add blend, work out more issues
Load Total Video Length from max frames in JSONexit_image
parent
60778dde98
commit
7c7e684a7e
|
|
@ -4,7 +4,7 @@ import modules.shared as shared
|
|||
import modules.sd_models
|
||||
import gradio as gr
|
||||
from scripts import postprocessing_upscale
|
||||
from .prompt_util import readJsonPrompt
|
||||
from .prompt_util import readJsonPrompt, process_keys
|
||||
from .static_variables import jsonprompt_schemafile
|
||||
import asyncio
|
||||
|
||||
|
|
@ -98,17 +98,19 @@ def validatePromptJson_throws(data):
|
|||
schema = json.load(s)
|
||||
validate(instance=data, schema=schema)
|
||||
|
||||
def putPrompts(files):
|
||||
def putPrompts(files):
|
||||
try:
|
||||
with open(files.name, "r") as f:
|
||||
file_contents = f.read()
|
||||
|
||||
data = readJsonPrompt(file_contents,False)
|
||||
prompts_keys = process_keys(data["prompts"]["data"])
|
||||
return [
|
||||
gr.Textbox.update(data["prePrompt"]),
|
||||
gr.DataFrame.update(data["prompts"]),
|
||||
gr.Textbox.update(data["postPrompt"]),
|
||||
gr.Textbox.update(data["negPrompt"])
|
||||
gr.Textbox.update(data["negPrompt"]),
|
||||
gr.Slider.update(value=prompts_keys[0]),
|
||||
]
|
||||
|
||||
except Exception:
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageDraw, ImageFont
|
||||
from PIL import Image, ImageDraw, ImageEnhance, ImageFilter, ImageDraw, ImageFont, ImageOps
|
||||
import requests
|
||||
import base64
|
||||
import numpy as np
|
||||
|
|
@ -451,4 +451,36 @@ def crop_inner_image(image: Image, width_offset: int, height_offset: int) -> Ima
|
|||
# Resize the cropped image to the original image size using Lanczos resampling
|
||||
resized_image = cropped_image.resize((width, height), resample=Image.Resampling.LANCZOS)
|
||||
|
||||
return resized_image
|
||||
return resized_image
|
||||
|
||||
def blend_images(start_image: Image, stop_image: Image, gray_image: Image, num_frames: int) -> list:
|
||||
"""
|
||||
Blend two images together by using the gray image as the alpha amount of each frame.
|
||||
This function takes in three parameters:
|
||||
- start_image: the starting PIL image in RGBA mode
|
||||
- stop_image: the target PIL image in RGBA mode
|
||||
- gray_image: a gray scale PIL image of the same size as start_image and stop_image
|
||||
- num_frames: the number of frames to generate in the blending animation
|
||||
|
||||
The function returns a list of PIL images representing the blending animation.
|
||||
"""
|
||||
# Initialize the list of blended frames
|
||||
blended_frames = []
|
||||
|
||||
#set alpha layers of images to be blended
|
||||
start_image = apply_alpha_mask(start_image, gray_image)
|
||||
stop_image = apply_alpha_mask(stop_image, gray_image, invert = True)
|
||||
|
||||
# Generate each frame of the blending animation
|
||||
for i in range(num_frames):
|
||||
# Calculate the alpha amount for this frame
|
||||
alpha = i / float(num_frames - 1)
|
||||
|
||||
# Blend the two images using the alpha amount
|
||||
blended_image = Image.blend(start_image, stop_image, alpha)
|
||||
|
||||
# Append the blended frame to the list
|
||||
blended_frames.append(blended_image)
|
||||
|
||||
# Return the list of blended frames
|
||||
return blended_frames
|
||||
|
|
@ -5,8 +5,17 @@ from .static_variables import (
|
|||
empty_prompt,
|
||||
invalid_prompt,
|
||||
jsonprompt_schemafile,
|
||||
promptTableHeaders
|
||||
promptTableHeaders,
|
||||
default_total_outpaints,
|
||||
)
|
||||
prompts_keys = (default_total_outpaints, default_total_outpaints)
|
||||
|
||||
def process_keys(data):
|
||||
#data = json.loads(txt)['data']
|
||||
keys = [int(sublist[0]) for sublist in data]
|
||||
max_key = max(keys)
|
||||
num_keys = len(keys)
|
||||
return (max_key, num_keys)
|
||||
|
||||
def completeOptionals(j):
|
||||
if isinstance(j, dict):
|
||||
|
|
@ -33,7 +42,6 @@ def completeOptionals(j):
|
|||
|
||||
return j
|
||||
|
||||
|
||||
def validatePromptJson_throws(data):
|
||||
with open(jsonprompt_schemafile, "r") as s:
|
||||
schema = json.load(s)
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ def outpaint_steps(
|
|||
paste_previous_image = True
|
||||
else:
|
||||
# use prerendered image, known as keyframe. Resize to target size
|
||||
print(f"image {i + 1} is a keyframe: Full:{not paste_previous_image}")
|
||||
print(f"image {i + 1} is a keyframe: Paste Previous:{not paste_previous_image}")
|
||||
current_image = open_image(prompt_images[max(k for k in prompt_images.keys() if k <= (i + 1))])
|
||||
current_image = resize_and_crop_image(current_image, width, height).convert("RGBA")
|
||||
main_frames.append(current_image)
|
||||
|
|
@ -145,7 +145,7 @@ def outpaint_steps(
|
|||
main_frames[i] = corrected_frame
|
||||
else: #TEST
|
||||
# paste current image with alpha layer on previous image to merge
|
||||
if paste_previous_image and i > 0: #and not prompt_image_is_keyframe[max(k for k in prompt_image_is_keyframe.keys() if k <= (i + 0))]:
|
||||
if paste_previous_image and i > 0:
|
||||
# apply predefined or generated alpha mask to current image
|
||||
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))] != "":
|
||||
current_image = apply_alpha_mask(main_frames[i + 1], open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))]))
|
||||
|
|
@ -180,6 +180,9 @@ def outpaint_steps(
|
|||
save2Collect(current_image, out_config, f"main_frame_gradient_{i + 1}")
|
||||
|
||||
if (not paste_previous_image) and ((i + 1) == outpaint_steps):
|
||||
# fix initial image by adding alpha layer
|
||||
|
||||
# fix exit image and frames
|
||||
backward_image = shrink_and_paste_on_blank(
|
||||
current_image, mask_width, mask_height
|
||||
)
|
||||
|
|
@ -201,7 +204,7 @@ def outpaint_steps(
|
|||
#input("Press Enter to continue...")
|
||||
|
||||
# Remove extra frames
|
||||
main_frames = main_frames[:(outpaint_steps + 1)]
|
||||
main_frames = main_frames[:(outpaint_steps)]
|
||||
return main_frames, processed
|
||||
|
||||
|
||||
|
|
@ -426,7 +429,7 @@ def create_zoom_single(
|
|||
)
|
||||
|
||||
if custom_exit_image:
|
||||
extra_frames += 2
|
||||
extra_frames += 1
|
||||
|
||||
main_frames, processed = outpaint_steps(
|
||||
width,
|
||||
|
|
@ -553,6 +556,8 @@ def create_zoom_single(
|
|||
video_zoom_mode,
|
||||
int(video_start_frame_dupe_amount),
|
||||
int(video_last_frame_dupe_amount),
|
||||
num_interpol_frames,
|
||||
True
|
||||
)
|
||||
print("Video saved in: " + os.path.join(script_path, out_config["video_filename"]))
|
||||
return (
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import modules.sd_samplers
|
|||
default_sampling_steps = 35
|
||||
default_sampler = "DDIM"
|
||||
default_cfg_scale = 8
|
||||
default_mask_blur = 48
|
||||
default_mask_blur = 60
|
||||
default_total_outpaints = 5
|
||||
promptTableHeaders = ["Outpaint Steps", "Prompt", "image location", "blend mask", "is keyframe"], ["number", "str", "str", "str", "bool"]
|
||||
|
||||
|
|
|
|||
|
|
@ -114,6 +114,7 @@ def on_ui_tabs():
|
|||
main_prompts,
|
||||
main_common_prompt_suf,
|
||||
main_negative_prompt,
|
||||
main_outpaint_steps,
|
||||
],
|
||||
inputs=[importPrompts_button],
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import numpy as np
|
||||
import imageio
|
||||
from PIL import Image
|
||||
from .image import blend_images, draw_gradient_ellipse
|
||||
import math
|
||||
|
||||
def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=15, last_frame_dupe_amount=30):
|
||||
def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=15, last_frame_dupe_amount=30, num_interpol_frames=2, blend=False, blend_image= None):
|
||||
"""
|
||||
Writes frames to an mp4 video file
|
||||
:param file_path: Path to output video, must end with .mp4
|
||||
|
|
@ -20,8 +21,18 @@ def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=1
|
|||
writer = imageio.get_writer(file_path, fps=fps, macro_block_size=None)
|
||||
|
||||
# Duplicate the start and end frames
|
||||
start_frames = [frames[0]] * start_frame_dupe_amount
|
||||
end_frames = [frames[-1]] * last_frame_dupe_amount
|
||||
if blend:
|
||||
if blend_image is None:
|
||||
blend_image = draw_gradient_ellipse(*frames[0].size, 0.63)
|
||||
next_frame = frames[num_interpol_frames]
|
||||
next_to_last_frame = frames[-num_interpol_frames]
|
||||
print(f"Blending start: {math.ceil(start_frame_dupe_amount)}")
|
||||
start_frames = blend_images(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount))
|
||||
print(f"Blending end: {math.ceil(last_frame_dupe_amount)}")
|
||||
end_frames = blend_images(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount))
|
||||
else:
|
||||
start_frames = [frames[0]] * start_frame_dupe_amount
|
||||
end_frames = [frames[-1]] * last_frame_dupe_amount
|
||||
|
||||
# Write the duplicated frames to the video writer
|
||||
for frame in start_frames:
|
||||
|
|
|
|||
Loading…
Reference in New Issue