Working Image level Luma Wipe

Still has debug code
exit_image
Charles Fettinger 2023-05-12 23:23:08 -07:00
parent f3c4902b56
commit 12bed602f0
4 changed files with 120 additions and 83 deletions

View File

@ -195,7 +195,7 @@ def lerp_imagemath_RGBA(img1, img2, alphaimg, factor:int = 50):
def CMYKInvert(img) :
return Image.merge(img.mode, [ImageOps.invert(b.convert('L')) for b in img.split()])
def clip_gradient_image(gradient_image, min_value:int = 50, max_value:int =75, invert= False):
def clip_gradient_image(gradient_image, min_value:int = 50, max_value:int =75, invert= False, mask = False):
"""
Return only the values of a gradient grayscale image between a minimum and maximum value.
@ -218,8 +218,9 @@ def clip_gradient_image(gradient_image, min_value:int = 50, max_value:int =75, i
#adjusted_image = enhancer.enhance(1.0 + max_value / 255)
adjusted_image = normalized_image
# Clip the values outside the desired range
adjusted_image_mask = ImageMath.eval("im <= 0 ", im=adjusted_image)
adjusted_image = ImageMath.eval("mask * (255 - im)", im=adjusted_image, mask=adjusted_image_mask).convert("L")
if mask:
adjusted_image_mask = ImageMath.eval("im <= 0 ", im=adjusted_image)
adjusted_image = ImageMath.eval("mask * (255 - im)", im=adjusted_image, mask=adjusted_image_mask).convert("L")
# Map the brightness to the desired range
#mapped_image = ImageMath.eval("im * (max_value - min_value) + min_value", im=adjusted_image, min_value=min_value, max_value=max_value)
#mapped_image = ImageMath.eval("float(im) * ((max_value + min_value)/(max_value - min_value)) - min_value", im=adjusted_image, min_value=min_value, max_value=max_value)
@ -663,11 +664,9 @@ def blend_images(start_image: Image, stop_image: Image, gray_image: Image, num_f
"""
# Initialize the list of blended frames
blended_frames = []
#set alpha layers of images to be blended
#set alpha layers of images to be blended - does nothing!
#start_image = apply_alpha_mask(start_image, gray_image)
#stop_image = apply_alpha_mask(stop_image, gray_image, invert = True)
# Generate each frame of the blending animation
for i in range(num_frames):
start = timer()
@ -679,9 +678,10 @@ def blend_images(start_image: Image, stop_image: Image, gray_image: Image, num_f
# Append the blended frame to the list
blended_frames.append(blended_image)
end = timer()
print(f"blend:{end - start}")
blended_frames.append(stop_image)
# Return the list of blended frames
return blended_frames
@ -700,16 +700,16 @@ def alpha_composite_images(start_image: Image, stop_image: Image, gray_image: Im
ac_frames = []
#set alpha layers of images to be blended
start_image = apply_alpha_mask(start_image, gray_image)
stop_image = apply_alpha_mask(stop_image, gray_image, invert = False)
start_image_c = apply_alpha_mask(start_image.copy(), gray_image)
stop_image_c = apply_alpha_mask(stop_image.copy(), gray_image, invert = False)
# Generate each frame of the blending animation
for i in range(num_frames):
start = timer()
# Calculate the alpha amount for this frame
alpha = i / float(num_frames - 1)
start_adj_image = multiply_alpha(start_image.copy(), 1 - alpha)
stop_adj_image = multiply_alpha(stop_image.copy(), alpha)
start_adj_image = multiply_alpha(start_image_c, 1 - alpha)
stop_adj_image = multiply_alpha(stop_image_c, alpha)
# Blend the two images using the alpha amount
ac_image = Image.alpha_composite(start_adj_image, stop_adj_image)
@ -718,7 +718,7 @@ def alpha_composite_images(start_image: Image, stop_image: Image, gray_image: Im
ac_frames.append(ac_image)
end = timer()
print(f"alpha_composited:{end - start}")
ac_frames.append(stop_image)
# Return the list of blended frames
return ac_frames
@ -842,6 +842,8 @@ def PSLumaWipe2(a_color, b_color, luma, l_color=(255, 255, 0, 255), progress=0.0
elif (progress >= (1 - stop_adjust)):
final_image = b_color
else:
if luma.mode != "L":
luma = luma.convert("L")
# invert luma if invert is true
if (invert):
luma = ImageOps.invert(luma)
@ -858,7 +860,7 @@ def PSLumaWipe2(a_color, b_color, luma, l_color=(255, 255, 0, 255), progress=0.0
# lerp_imagemath_RGBA works reasonably fast, but minimal visual difference, softness increases visibility
if softness >= 0.1:
a_out_color = lerp_imagemath_RGBA(a_color, b_color, out_color_alpha, max_time)
a_out_color = lerp_imagemath_RGBA(a_color, b_color, out_color_alpha, int(np.ceil((max_time * 100)/255)))
else:
a_out_color = a_color.copy()
a_out_color.putalpha(out_color_alpha)
@ -866,16 +868,22 @@ def PSLumaWipe2(a_color, b_color, luma, l_color=(255, 255, 0, 255), progress=0.0
# build the colorized out_color image, b_color is the rgb value
# out_color_alpha should provide transparency to see b_color
# we need the alpha channel to be reversed so that the color is transparent
b_color_alpha = clip_gradient_image(ImageOps.invert(luma.convert("L")), 255 - max_time, 255, False)
b_color_alpha = clip_gradient_image(ImageOps.invert(luma), 255 - time, 255, False)
b_out_color = b_color.copy()
b_out_color.putalpha(b_color_alpha)
out_color_comp = Image.alpha_composite(a_out_color, b_out_color)
# experiment - only faded the colors - not needed
#out_color_comp = lerp_imagemath_RGBA(a_out_color, b_out_color, None, int(np.ceil((max_time * 100)/255)))
#out_color_comp.show("out_color_comp")
# ensure that the composited images are transparent
a_color.putalpha(ImageOps.invert(b_color_alpha))
#a_color.show("a_color b_color_alpha")
final_image = Image.alpha_composite(a_color, out_color_comp)
final_image.show()
#final_image.show("final image")
#print(f"time:{time} maxtime:{max_time} inv-max-time:{255 - max_time} softness: {softness}")
#input("Press Enter to continue...")
end = timer()
print(f"PSLumaWipe2:{end - start}")
print(f"PSLumaWipe2:{end - start} ")
return final_image.convert("RGBA")
@ -894,9 +902,9 @@ def PSLumaWipe_images2(start_image: Image, stop_image: Image, luma_wipe_image: I
# Compute the luma value for this frame
luma_progress = i / (num_frames - 1)
# initialize the transition image
transition = Image.new("RGBA", (width, height))
#transition = Image.new("RGBA", (width, height))
# call PSLumaWipe for frame
transition = PSLumaWipe2(start_image, stop_image, luma_wipe_image, transition_color, luma_progress, False, softness, 0.02, 0.01)
transition = PSLumaWipe2(start_image.copy(), stop_image.copy(), luma_wipe_image.copy(), transition_color, luma_progress, False, softness, 0.02, 0.01)
lw_frames.append(transition)
print(f"Luma Wipe frame:{len(lw_frames)} {transition.mode} {transition.size} {luma_progress}")
#lw_frames[-1].show()

View File

@ -43,7 +43,6 @@ def outpaint_steps(
frame_correction=True, # TODO: add frame_Correction in UI
):
main_frames = [init_img.convert("RGBA")]
main_frames[0] = init_img.convert("RGBA")
prev_image = init_img.convert("RGBA")
exit_img = custom_exit_image.convert("RGBA") if custom_exit_image else None
@ -60,21 +59,22 @@ def outpaint_steps(
current_image = main_frames[-1]
# apply available alpha mask of previous image
#if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 0))] != "":
# current_image = apply_alpha_mask(current_image, open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 0))]))
## apply available alpha mask of previous image
#if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))] != "":
# current_image_amask = open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))])
#else:
# #generate automatic alpha mask
# current_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.615 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.89),0.1)
# current_image = apply_alpha_mask(current_image, draw_gradient_ellipse(current_image.width, current_image.height, current_image_gradient_ratio, 0.0, 2.0))
# current_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.6175 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.89),0.1)
# current_image_amask = draw_gradient_ellipse(current_image.width, current_image.height, current_image_gradient_ratio, 0.0, 2.5)
#current_image = apply_alpha_mask(current_image, current_image_amask)
# shrink image to mask size
current_image = shrink_and_paste_on_blank(
current_image, mask_width, mask_height
)
mask_image = np.array(current_image)[:, :, 3]
mask_image = Image.fromarray(255 - mask_image)
mask_image = Image.fromarray(255 - mask_image)
# create mask (black image with white mask_width width edges)
#prev_image = current_image
@ -83,11 +83,12 @@ def outpaint_steps(
#current_image = current_image.convert("RGB")
#keyframes are not inpainted
paste_previous_image = not prompt_image_is_keyframe[max(k for k in prompt_image_is_keyframe.keys() if k <= (i + 1))]
paste_previous_image = not prompt_image_is_keyframe[(i + 1)]
print(f"paste_prev_image: {paste_previous_image} {i} {i + 1}")
if custom_exit_image and ((i + 1) == outpaint_steps):
current_image = resize_and_crop_image(custom_exit_image, width, height).convert("RGBA")
main_frames.append(current_image)
exit_img = current_image
print("using Custom Exit Image")
save2Collect(current_image, out_config, f"exit_img.png")
@ -116,88 +117,110 @@ def outpaint_steps(
main_frames.append(processed.images[0].convert("RGBA"))
save2Collect(processed.images[0], out_config, f"outpain_step_{i}.png")
paste_previous_image = True
#paste_previous_image = True
else:
# use prerendered image, known as keyframe. Resize to target size
print(f"image {i + 1} is a keyframe: {not paste_previous_image}")
current_image = open_image(prompt_images[max(k for k in prompt_images.keys() if k <= (i + 1))])
current_image = open_image(prompt_images[(i + 1)])
current_image = resize_and_crop_image(current_image, width, height).convert("RGBA")
# if keyframe is last frame, use it as exit image
if (not paste_previous_image) and ((i + 1) == outpaint_steps):
exit_img = current_image
print("using keyframe as exit image")
main_frames.append(current_image)
else:
main_frames.append(current_image)
save2Collect(current_image, out_config, f"key_frame_{i + 1}.png")
#seed = newseed
# TODO: seed behavior
# paste previous image on top of current image
if frame_correction and inpainting_mask_blur > 0:
corrected_frame = crop_inner_image(
main_frames[i + 1], mask_width, mask_height
)
enhanced_img = crop_fethear_ellipse(
main_frames[i],
30,
inpainting_mask_blur / 3 // 2,
inpainting_mask_blur / 3 // 2,
)
save2Collect(main_frames[i], out_config, f"main_frame_{i}")
save2Collect(enhanced_img, out_config, f"main_frame_enhanced_{i}")
corrected_frame.paste(enhanced_img, mask=enhanced_img)
main_frames[i] = corrected_frame
if frame_correction and (inpainting_mask_blur > 0):
#if 0 <= (i + 1) < len(main_frames):
if paste_previous_image and i > 0:
corrected_frame = crop_inner_image(
main_frames[i + 1], mask_width, mask_height
)
enhanced_img = crop_fethear_ellipse(
main_frames[i],
30,
inpainting_mask_blur / 3 // 2,
inpainting_mask_blur / 3 // 2,
)
save2Collect(main_frames[i], out_config, f"main_frame_{i}")
save2Collect(enhanced_img, out_config, f"main_frame_enhanced_{i}")
corrected_frame.paste(enhanced_img, mask=enhanced_img)
main_frames[i] = corrected_frame
else: #TEST
# paste current image with alpha layer on previous image to merge
if paste_previous_image:
# apply predefined or generated alpha mask to current image
# paste current image with alpha layer on previous image to merge : paste on i
if paste_previous_image and i > 0:
# apply predefined or generated alpha mask to current image:
# current image must be redefined as most current image in frame stack
# use previous image alpha mask if available
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))] != "":
current_image = apply_alpha_mask(main_frames[i + 1], open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))]))
current_image_amask = open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))])
else:
current_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.615 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
current_image = apply_alpha_mask(main_frames[i + 1], draw_gradient_ellipse(main_frames[i + 1].width, main_frames[i + 1].height, current_image_gradient_ratio, 0.0, 2.0))
current_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.6175 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
current_image_amask = draw_gradient_ellipse(main_frames[i + 1].width, main_frames[i + 1].height, current_image_gradient_ratio, 0.0, 2.5)
current_image = apply_alpha_mask(main_frames[i + 1], current_image_amask)
# handle previous image alpha layer
#handle previous image alpha layer
prev_image = (main_frames[i] if main_frames[i] else main_frames[0])
# apply available alpha mask of previous image (inverted)
## apply available alpha mask of previous image (inverted)
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))] != "":
prev_image = apply_alpha_mask(prev_image, open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))]), invert = True)
prev_image_amask = open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))])
else:
prev_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.615 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
prev_image = apply_alpha_mask(prev_image, draw_gradient_ellipse(prev_image.width, prev_image.height, prev_image_gradient_ratio, 0.0, 2.0), invert = True )
prev_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.6175 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
prev_image_amask = draw_gradient_ellipse(prev_image.width, prev_image.height, prev_image_gradient_ratio, 0.0, 2.5)
prev_image = apply_alpha_mask(prev_image, prev_image_amask, invert = True)
# merge previous image with current image
corrected_frame = crop_inner_image(
current_image, mask_width, mask_height
)
current_image.alpha_composite(prev_image)
res = Image.new(current_image.mode, (width, height))
paste_pos = (
int((width - current_image.width) / 2),
int((height - current_image.height) / 2),
)
res.paste(current_image, paste_pos)
corrected_frame.paste(res, mask=res)
current_image = corrected_frame
#enhanced_img = crop_fethear_ellipse(
# main_frames[i],
# 30,
# inpainting_mask_blur / 3 // 2,
# inpainting_mask_blur / 3 // 2,
#)
#enhanced_img.show()
#input("Press Enter to continue...")
#test_enh_img = apply_alpha_mask(main_frames[i], prev_image_amask)
#test_enh_img.show()
#input("Press Enter to continue...")
prev = Image.new(prev_image.mode, (width, height), (255,255,255,255))
prev.paste(apply_alpha_mask(main_frames[i], prev_image_amask))
#prev.show()
corrected_frame.paste(prev, mask=prev)
#print(f"corrected_frame pasted")
#corrected_frame.show()
#input("Press Enter to continue...")
#current_image = corrected_frame
main_frames[i] = current_image
save2Collect(current_image, out_config, f"main_frame_gradient_{i + 1}")
main_frames[i] = corrected_frame
save2Collect(corrected_frame, out_config, f"main_frame_gradient_{i + 0}")
#if (not paste_previous_image) and ((i + 1) == outpaint_steps):
# # fix initial image by adding alpha layer
# # fix exit image and frames
# backward_image = shrink_and_paste_on_blank(
# current_image, mask_width, mask_height
# )
# backward_image.show()
# input("Press Enter to continue...")
# #handle previous image alpha layer
# prev_image = (main_frames[i] if main_frames[i] else main_frames[0])
# prev_image.show()
# input("Press Enter to continue...")
# prev_image.alpha_composite(backward_image)
# print(f"no previous image - prev_image with backward Image")
# prev_image.show()
# input("Press Enter to continue...")
# main_frames[i - 1] = prev_image
#print(str(f"Frames: {len(main_frames)}"))
#print(str(f"Frame previous : {prev_image} {prev_image.mode} ({prev_image.width}, {prev_image.height})"))
#print(str(f"Frame current : {current_image} {current_image.mode} ({current_image.width}, {current_image.height})"))
@ -211,10 +234,10 @@ def outpaint_steps(
#input("Press Enter to continue...")
# Remove extra frames
main_frames = main_frames[:(outpaint_steps)]
#main_frames = main_frames[:(outpaint_steps)]
#handle first and last frames, this ensures blends work properly
if init_img is not None:
main_frames.insert(0, init_img)
#if init_img is not None:
#main_frames.insert(0, init_img)
if exit_img is not None:
main_frames.append(exit_img)

View File

@ -5,7 +5,7 @@ import modules.sd_samplers
default_sampling_steps = 35
default_sampler = "DDIM"
default_cfg_scale = 8
default_mask_blur = 60
default_mask_blur = 62
default_overmask = 8
default_total_outpaints = 5
promptTableHeaders = ["Outpaint Steps", "Prompt", "image location", "blend mask", "is keyframe"], ["number", "str", "str", "str", "bool"]

View File

@ -3,7 +3,7 @@ import imageio
from .image import blend_images, draw_gradient_ellipse, alpha_composite_images, luma_wipe_images, PSLumaWipe_images2
import math
def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=15, last_frame_dupe_amount=30, num_interpol_frames=2, blend=False, blend_image= None):
def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=15, last_frame_dupe_amount=30, num_interpol_frames=2, blend=False, blend_image= None, blend_type:int = 0):
"""
Writes frames to an mp4 video file
:param file_path: Path to output video, must end with .mp4
@ -22,22 +22,28 @@ def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=1
# Duplicate the start and end frames
if blend:
num_frames_replaced = num_interpol_frames + 2
num_frames_replaced = num_interpol_frames
if blend_image is None:
blend_image = draw_gradient_ellipse(*frames[0].size, 0.63)
next_frame = frames[num_frames_replaced]
next_to_last_frame = frames[(-1 * num_frames_replaced)]
print(f"Blending start: {math.ceil(start_frame_dupe_amount)} next frame:{(num_frames_replaced)}")
#start_frames = alpha_composite_images(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount))
#start_frames = luma_wipe_images(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount))
start_frames = PSLumaWipe_images2(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount),(255,255,0,225))
if blend_type == 1:
start_frames = alpha_composite_images(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount))
elif blend_type == 2:
start_frames = luma_wipe_images(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount))
else:
start_frames = PSLumaWipe_images2(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount),(255,255,0,225))
del frames[:num_frames_replaced]
print(f"Blending end: {math.ceil(last_frame_dupe_amount)} next to last frame:{-1 * (num_frames_replaced)}")
end_frames = alpha_composite_images(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount))
#end_frames = luma_wipe_images(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount))
#end_frames = PSLumaWipe_images2(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount),(255,255,0,225))
if blend_type == 1:
end_frames = alpha_composite_images(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount))
elif blend_type == 2:
end_frames = luma_wipe_images(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount))
else:
end_frames = PSLumaWipe_images2(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount),(255,255,0,225))
frames = frames[:(-1 * num_frames_replaced)]
else:
start_frames = [frames[0]] * start_frame_dupe_amount