Code Integrated to UI

Needs rebased to latest codebase
exit_image
Charles Fettinger 2023-05-16 00:04:19 -07:00
parent 12bed602f0
commit 3c54697ba0
6 changed files with 188 additions and 222 deletions

View File

@ -106,6 +106,9 @@ It works on free colab plan
<img src="https://img.shields.io/static/v1?label=github&message=repository&color=blue&style=flat&logo=github&logoColor=white" alt="GitHub Repo"/>
</a>
## Additional Resources
Luma Wipes ideas [Videezy](https://www.videezy.com/free-video/luma-wipe) and free Luma Wipes to use immediately [GIT](https://github.com/Oncorporation/obs-studio/tree/master/plugins/obs-transitions/data/luma_wipes)
## Contributing
Contributions are welcome! Please follow these guidelines:

View File

@ -164,7 +164,7 @@ def lerp_imagemath_RGBA(img1, img2, alphaimg, factor:int = 50):
Returns:
A PIL.Image object representing the resulting interpolated image.
"""
start = timer()
#start = timer()
# create alpha and alpha inverst from luma wipe image
# multiply the time factor
if img1.mode != "RGBA":
@ -186,8 +186,8 @@ def lerp_imagemath_RGBA(img1, img2, alphaimg, factor:int = 50):
#r1 = ImageMath.eval("convert(int(a*b), 'L')", a=r1, b=factor)
# Merge the color bands back into an RGBA image
rebuilt_image = Image.merge("RGBA", (rl, gl, bl, alphaimg.convert('L')))
end = timer()
print(f"lerp_imagemath_rgba: {end - start}")
#end = timer()
#print(f"lerp_imagemath_rgba: {end - start}")
return rebuilt_image
@ -206,7 +206,7 @@ def clip_gradient_image(gradient_image, min_value:int = 50, max_value:int =75, i
Returns:
A PIL.Image object representing the adjusted gradient image.
"""
start = timer()
#start = timer()
# Convert the image to grayscale if needed
if gradient_image.mode != "L":
gradient_image = gradient_image.convert("L")
@ -237,8 +237,8 @@ def clip_gradient_image(gradient_image, min_value:int = 50, max_value:int =75, i
final_image = ImageOps.grayscale(mapped_image)
if invert:
final_image = ImageOps.invert(final_image)
end = timer()
print(end - start)
#end = timer()
#print(end - start)
return final_image
def resize_image_with_aspect_ratio(image: Image, basewidth: int = 512, baseheight: int = 512) -> Image:
@ -492,16 +492,7 @@ def draw_gradient_ellipse(width=512, height=512, white_amount=1.0, rotation = 0.
# Apply brightness method of ImageEnhance class
image = ImageEnhance.Contrast(image).enhance(contrast).convert('RGBA')
# Apply the alpha mask to the image
image = apply_alpha_mask(image, image)
# Define the radial gradient parameters
#ellipse_width, ellipse_height = (int((width * white_amount) // 1.5), int((height * white_amount) // 1.5))
#ellipse_colors = [(255, 255, 255, 255), (0, 0, 0, 0)]
# Create a new image for inner ellipse
#inner_ellipse = Image.new("L", size, 0)
#inner_ellipse = make_gradient_v2(width, height, center[0], center[1], ellipse_width, ellipse_height, theta)
#inner_ellipse = apply_alpha_mask(inner_ellipse, inner_ellipse)
#image.paste(inner_ellipse, center, mask=inner_ellipse)
# Creating object of Brightness class
image = apply_alpha_mask(image, image)
# Return the result image
return image
@ -651,22 +642,20 @@ def multiply_alpha(image, factor):
print(f"multiply_alpha:{end - start}")
return result_image
def blend_images(start_image: Image, stop_image: Image, gray_image: Image, num_frames: int) -> list:
def blend_images(start_image: Image, stop_image: Image, num_frames: int, invert:bool = False) -> list:
"""
Blend two images together by using the gray image as the alpha amount of each frame.
Blend two images together via the alpha amount of each frame.
This function takes in three parameters:
- start_image: the starting PIL image in RGBA mode
- stop_image: the target PIL image in RGBA mode
- gray_image: a gray scale PIL image of the same size as start_image and stop_image
- num_frames: the number of frames to generate in the blending animation
The function returns a list of PIL images representing the blending animation.
"""
# Initialize the list of blended frames
blended_frames = []
#set alpha layers of images to be blended - does nothing!
#start_image = apply_alpha_mask(start_image, gray_image)
#stop_image = apply_alpha_mask(stop_image, gray_image, invert = True)
if (invert):
start_image, stop_image = stop_image, start_image
# Generate each frame of the blending animation
for i in range(num_frames):
start = timer()
@ -685,7 +674,7 @@ def blend_images(start_image: Image, stop_image: Image, gray_image: Image, num_f
# Return the list of blended frames
return blended_frames
def alpha_composite_images(start_image: Image, stop_image: Image, gray_image: Image, num_frames: int) -> list:
def alpha_composite_images(start_image: Image, stop_image: Image, gray_image: Image, num_frames: int, invert:bool = False) -> list:
"""
Blend two images together by using the gray image as the alpha amount of each frame.
This function takes in three parameters:
@ -698,7 +687,8 @@ def alpha_composite_images(start_image: Image, stop_image: Image, gray_image: Im
"""
# Initialize the list of blended frames
ac_frames = []
if (invert):
gray_image = ImageOps.invert(gray_image)
#set alpha layers of images to be blended
start_image_c = apply_alpha_mask(start_image.copy(), gray_image)
stop_image_c = apply_alpha_mask(stop_image.copy(), gray_image, invert = False)
@ -722,104 +712,104 @@ def alpha_composite_images(start_image: Image, stop_image: Image, gray_image: Im
# Return the list of blended frames
return ac_frames
def luma_wipe_images(start_image: Image, stop_image: Image, alpha: Image, num_frames: int) -> list:
#progress(0, status='Generating luma wipe...')
lw_frames = []
for i in range(num_frames):
start = timer()
# Compute the luma value for this frame
luma_progress = i / (num_frames - 1)
# Create a new image for the transition
transition = Image.new("RGBA", start_image.size)
# Loop over each pixel in the alpha layer
for x in range(alpha.width):
for y in range(alpha.height):
# Compute the luma value for this pixel
luma = alpha.getpixel((x, y))[0] / 255.0
if luma_progress >= luma:
# Interpolate between the two images based on the luma value
pixel = (
int(start_image.getpixel((x, y))[0] * (1 - luma) + stop_image.getpixel((x, y))[0] * luma),
int(start_image.getpixel((x, y))[1] * (1 - luma) + stop_image.getpixel((x, y))[1] * luma),
int(start_image.getpixel((x, y))[2] * (1 - luma) + stop_image.getpixel((x, y))[2] * luma),
int(255 * luma_progress) # Set the alpha value based on the luma value
)
# Set the new pixel in the transition image
transition.putpixel((x, y), pixel)
else:
# Set the start pixel in the transition image
transition.putpixel((x, y), start_image.getpixel((x, y)))
# Append the transition image to the list
lw_frames.append(transition)
#progress((x + 1) / num_frames)
end = timer()
print(f"luma_wipe:{end - start}")
return lw_frames
###def luma_wipe_images(start_image: Image, stop_image: Image, alpha: Image, num_frames: int) -> list:
### #progress(0, status='Generating luma wipe...')
### lw_frames = []
### for i in range(num_frames):
### start = timer()
### # Compute the luma value for this frame
### luma_progress = i / (num_frames - 1)
### # Create a new image for the transition
### transition = Image.new("RGBA", start_image.size)
### # Loop over each pixel in the alpha layer
### for x in range(alpha.width):
### for y in range(alpha.height):
### # Compute the luma value for this pixel
### luma = alpha.getpixel((x, y))[0] / 255.0
### if luma_progress >= luma:
### # Interpolate between the two images based on the luma value
### pixel = (
### int(start_image.getpixel((x, y))[0] * (1 - luma) + stop_image.getpixel((x, y))[0] * luma),
### int(start_image.getpixel((x, y))[1] * (1 - luma) + stop_image.getpixel((x, y))[1] * luma),
### int(start_image.getpixel((x, y))[2] * (1 - luma) + stop_image.getpixel((x, y))[2] * luma),
### int(255 * luma_progress) # Set the alpha value based on the luma value
### )
### # Set the new pixel in the transition image
### transition.putpixel((x, y), pixel)
### else:
### # Set the start pixel in the transition image
### transition.putpixel((x, y), start_image.getpixel((x, y)))
### # Append the transition image to the list
### lw_frames.append(transition)
### #progress((x + 1) / num_frames)
### end = timer()
### print(f"luma_wipe:{end - start}")
### return lw_frames
def srgb_nonlinear_to_linear_channel(u):
return (u / 12.92) if (u <= 0.04045) else pow((u + 0.055) / 1.055, 2.4)
###def srgb_nonlinear_to_linear_channel(u):
### return (u / 12.92) if (u <= 0.04045) else pow((u + 0.055) / 1.055, 2.4)
def srgb_nonlinear_to_linear(v):
return [srgb_nonlinear_to_linear_channel(x) for x in v]
###def srgb_nonlinear_to_linear(v):
### return [srgb_nonlinear_to_linear_channel(x) for x in v]
#result_img = eval("convert('RGBA')", lambda x, y: PSLumaWipe(img_a.getpixel((x,y)), img_b.getpixel((x,y)), test_g_image.getpixel((x,y))[0]/255,(1,0,0,.5), 0.25, False, 0.1, 0.01, 0.01))
#list(np.divide((255,255,245,225),255))
def PSLumaWipe(a_color, b_color, luma, l_color=(255, 255, 255, 255), progress=0.0, invert=False, softness=0.01, start_adjust = 0.01, stop_adjust = 0.0):
# - adjust for min and max. Do not process if luma value is outside min or max
if ((luma >= (start_adjust)) and (luma <= (1 - stop_adjust))):
if (invert):
luma = 1.0 - luma
# user color with luma
out_color = np.array([l_color[0], l_color[1], l_color[2], luma * 255])
time = lerp(0.0, 1.0 + softness, progress)
#print(f"softness: {str(softness)} out_color: {str(out_color)} a_color: {str(a_color)} b_color: {str(b_color)} time: {str(time)} luma: {str(luma)} progress: {str(progress)}")
# if luma less than time, do not blend color
if (luma <= time - softness):
alpha_behind = np.clip(1.0 - (time - softness - luma) / softness, 0.0, 1.0)
return tuple(np.round(lerp(b_color, out_color, alpha_behind)).astype(int))
# if luma greater than time, show original color
if (luma >= time):
return a_color
alpha = (time - luma) / softness
out_color = lerp(a_color, b_color + out_color, alpha)
#print(f"alpha: {str(alpha)} out_color: {str(out_color)} time: {str(time)} luma: {str(luma)}")
out_color = srgb_nonlinear_to_linear(out_color)
return tuple(np.round(out_color).astype(int))
else:
# return original pixel color
return a_color
####result_img = eval("convert('RGBA')", lambda x, y: PSLumaWipe(img_a.getpixel((x,y)), img_b.getpixel((x,y)), test_g_image.getpixel((x,y))[0]/255,(1,0,0,.5), 0.25, False, 0.1, 0.01, 0.01))
####list(np.divide((255,255,245,225),255))
###def PSLumaWipe(a_color, b_color, luma, l_color=(255, 255, 255, 255), progress=0.0, invert=False, softness=0.01, start_adjust = 0.01, stop_adjust = 0.0):
### # - adjust for min and max. Do not process if luma value is outside min or max
### if ((luma >= (start_adjust)) and (luma <= (1 - stop_adjust))):
### if (invert):
### luma = 1.0 - luma
### # user color with luma
### out_color = np.array([l_color[0], l_color[1], l_color[2], luma * 255])
### time = lerp(0.0, 1.0 + softness, progress)
### #print(f"softness: {str(softness)} out_color: {str(out_color)} a_color: {str(a_color)} b_color: {str(b_color)} time: {str(time)} luma: {str(luma)} progress: {str(progress)}")
### # if luma less than time, do not blend color
### if (luma <= time - softness):
### alpha_behind = np.clip(1.0 - (time - softness - luma) / softness, 0.0, 1.0)
### return tuple(np.round(lerp(b_color, out_color, alpha_behind)).astype(int))
### # if luma greater than time, show original color
### if (luma >= time):
### return a_color
### alpha = (time - luma) / softness
### out_color = lerp(a_color, b_color + out_color, alpha)
### #print(f"alpha: {str(alpha)} out_color: {str(out_color)} time: {str(time)} luma: {str(luma)}")
### out_color = srgb_nonlinear_to_linear(out_color)
### return tuple(np.round(out_color).astype(int))
### else:
### # return original pixel color
### return a_color
def PSLumaWipe_images(start_image: Image, stop_image: Image, luma_wipe_image: Image, num_frames: int, transition_color: tuple[int, int, int, int] = (255,255,255,255)) -> list:
#progress(0, status='Generating luma wipe...')
# fix transition_color to relative 0.0 - 1.0
#luma_color = list(np.divide(transition_color,255))
###def PSLumaWipe_images(start_image: Image, stop_image: Image, luma_wipe_image: Image, num_frames: int, transition_color: tuple[int, int, int, int] = (255,255,255,255)) -> list:
### #progress(0, status='Generating luma wipe...')
### # fix transition_color to relative 0.0 - 1.0
### #luma_color = list(np.divide(transition_color,255))
softness = 0.03
lw_frames = []
lw_frames.append(start_image)
width, height = start_image.size
#compensate for different image sizes for LumaWipe
if (start_image.size != luma_wipe_image.size):
luma_wipe_image = resize_and_crop_image(luma_wipe_image,width,height)
# call PSLumaWipe for each pixel
for i in range(num_frames):
start = timer()
# Compute the luma value for this frame
luma_progress = i / (num_frames - 1)
transition = Image.new(start_image.mode, (width, height))
# apply to each pixel in the image
for x in range(width):
for y in range(height):
# call PSLumaWipe for each pixel
pixel = PSLumaWipe(start_image.getpixel((x, y)), stop_image.getpixel((x, y)), luma_wipe_image.getpixel((x, y))[0]/255, transition_color, luma_progress, False, softness, 0.01, 0.00)
transition.putpixel((x, y), pixel)
lw_frames.append(transition)
print(f"Luma Wipe frame:{len(lw_frames)}")
#lw_frames[-1].show()
end = timer()
print(f"PSLumaWipe:{end - start}")
lw_frames.append(stop_image)
return lw_frames
### softness = 0.03
### lw_frames = []
### lw_frames.append(start_image)
### width, height = start_image.size
### #compensate for different image sizes for LumaWipe
### if (start_image.size != luma_wipe_image.size):
### luma_wipe_image = resize_and_crop_image(luma_wipe_image,width,height)
### # call PSLumaWipe for each pixel
### for i in range(num_frames):
### start = timer()
### # Compute the luma value for this frame
### luma_progress = i / (num_frames - 1)
### transition = Image.new(start_image.mode, (width, height))
### # apply to each pixel in the image
### for x in range(width):
### for y in range(height):
### # call PSLumaWipe for each pixel
### pixel = PSLumaWipe(start_image.getpixel((x, y)), stop_image.getpixel((x, y)), luma_wipe_image.getpixel((x, y))[0]/255, transition_color, luma_progress, False, softness, 0.01, 0.00)
### transition.putpixel((x, y), pixel)
### lw_frames.append(transition)
### print(f"Luma Wipe frame:{len(lw_frames)}")
### #lw_frames[-1].show()
### end = timer()
### print(f"PSLumaWipe:{end - start}")
### lw_frames.append(stop_image)
### return lw_frames
#result_img = , 0.25, False, 0.1, 0.01, 0.01))
#list(np.divide((255,255,245,225),255))
@ -836,7 +826,7 @@ def PSLumaWipe2(a_color, b_color, luma, l_color=(255, 255, 0, 255), progress=0.0
#5. merge or composite images together
#6. return the merged image
# - adjust for min and max. Do not process if luma value is outside min or max
start = timer()
#start = timer()
if (progress <= start_adjust):
final_image = a_color
elif (progress >= (1 - stop_adjust)):
@ -872,22 +862,16 @@ def PSLumaWipe2(a_color, b_color, luma, l_color=(255, 255, 0, 255), progress=0.0
b_out_color = b_color.copy()
b_out_color.putalpha(b_color_alpha)
out_color_comp = Image.alpha_composite(a_out_color, b_out_color)
# experiment - only faded the colors - not needed
#out_color_comp = lerp_imagemath_RGBA(a_out_color, b_out_color, None, int(np.ceil((max_time * 100)/255)))
#out_color_comp.show("out_color_comp")
# ensure that the composited images are transparent
# ensure that the composited images have transparency
a_color.putalpha(ImageOps.invert(b_color_alpha))
#a_color.show("a_color b_color_alpha")
final_image = Image.alpha_composite(a_color, out_color_comp)
#final_image.show("final image")
#print(f"time:{time} maxtime:{max_time} inv-max-time:{255 - max_time} softness: {softness}")
#input("Press Enter to continue...")
end = timer()
print(f"PSLumaWipe2:{end - start} ")
#end = timer()
#print(f"PSLumaWipe2:{end - start} ")
return final_image.convert("RGBA")
def PSLumaWipe_images2(start_image: Image, stop_image: Image, luma_wipe_image: Image, num_frames: int, transition_color: tuple[int, int, int, int] = (255,255,255,255)) -> list:
def PSLumaWipe_images2(start_image: Image, stop_image: Image, luma_wipe_image: Image, num_frames: int, invert:bool = False, transition_color: tuple[int, int, int, int] = (255,255,255,255)) -> list:
#progress(0, status='Generating luma wipe...')
#luma_color = list(np.divide(transition_color,255))
softness = 0.095
@ -900,13 +884,11 @@ def PSLumaWipe_images2(start_image: Image, stop_image: Image, luma_wipe_image: I
# call PSLumaWipe for each frame
for i in range(num_frames):
# Compute the luma value for this frame
luma_progress = i / (num_frames - 1)
# initialize the transition image
#transition = Image.new("RGBA", (width, height))
luma_progress = i / (num_frames - 1)
# call PSLumaWipe for frame
transition = PSLumaWipe2(start_image.copy(), stop_image.copy(), luma_wipe_image.copy(), transition_color, luma_progress, False, softness, 0.02, 0.01)
lw_frames.append(transition)
print(f"Luma Wipe frame:{len(lw_frames)} {transition.mode} {transition.size} {luma_progress}")
#lw_frames[-1].show()
transition = PSLumaWipe2(start_image.copy(), stop_image.copy(), luma_wipe_image.copy(), transition_color, luma_progress, invert, softness, 0.02, 0.01)
lw_frames.append(transition)
print(f"Luma Wipe frame:{len(lw_frames)} {transition.size} {luma_progress * 100}%")
lw_frames.append(stop_image.convert("RGBA"))
return lw_frames

View File

@ -41,6 +41,7 @@ def outpaint_steps(
mask_height,
custom_exit_image,
frame_correction=True, # TODO: add frame_Correction in UI
blend_gradient_size = 61,
):
main_frames = [init_img.convert("RGBA")]
prev_image = init_img.convert("RGBA")
@ -59,15 +60,6 @@ def outpaint_steps(
current_image = main_frames[-1]
## apply available alpha mask of previous image
#if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))] != "":
# current_image_amask = open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))])
#else:
# #generate automatic alpha mask
# current_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.6175 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.89),0.1)
# current_image_amask = draw_gradient_ellipse(current_image.width, current_image.height, current_image_gradient_ratio, 0.0, 2.5)
#current_image = apply_alpha_mask(current_image, current_image_amask)
# shrink image to mask size
current_image = shrink_and_paste_on_blank(
current_image, mask_width, mask_height
@ -77,11 +69,6 @@ def outpaint_steps(
mask_image = Image.fromarray(255 - mask_image)
# create mask (black image with white mask_width width edges)
#prev_image = current_image
# inpainting step
#current_image = current_image.convert("RGB")
#keyframes are not inpainted
paste_previous_image = not prompt_image_is_keyframe[(i + 1)]
print(f"paste_prev_image: {paste_previous_image} {i} {i + 1}")
@ -161,83 +148,31 @@ def outpaint_steps(
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))] != "":
current_image_amask = open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))])
else:
current_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.6175 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
current_image_gradient_ratio = (blend_gradient_size / 100) #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
current_image_amask = draw_gradient_ellipse(main_frames[i + 1].width, main_frames[i + 1].height, current_image_gradient_ratio, 0.0, 2.5)
current_image = apply_alpha_mask(main_frames[i + 1], current_image_amask)
#handle previous image alpha layer
prev_image = (main_frames[i] if main_frames[i] else main_frames[0])
#prev_image = (main_frames[i] if main_frames[i] else main_frames[0])
## apply available alpha mask of previous image (inverted)
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))] != "":
prev_image_amask = open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i))])
else:
prev_image_gradient_ratio = (inpainting_mask_blur / 100) if inpainting_mask_blur > 0 else 0.6175 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
prev_image_gradient_ratio = (blend_gradient_size / 100) #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
prev_image_amask = draw_gradient_ellipse(prev_image.width, prev_image.height, prev_image_gradient_ratio, 0.0, 2.5)
prev_image = apply_alpha_mask(prev_image, prev_image_amask, invert = True)
#prev_image = apply_alpha_mask(prev_image, prev_image_amask, invert = True)
# merge previous image with current image
corrected_frame = crop_inner_image(
current_image, mask_width, mask_height
)
#enhanced_img = crop_fethear_ellipse(
# main_frames[i],
# 30,
# inpainting_mask_blur / 3 // 2,
# inpainting_mask_blur / 3 // 2,
#)
#enhanced_img.show()
#input("Press Enter to continue...")
#test_enh_img = apply_alpha_mask(main_frames[i], prev_image_amask)
#test_enh_img.show()
#input("Press Enter to continue...")
prev = Image.new(prev_image.mode, (width, height), (255,255,255,255))
prev.paste(apply_alpha_mask(main_frames[i], prev_image_amask))
#prev.show()
corrected_frame.paste(prev, mask=prev)
#print(f"corrected_frame pasted")
#corrected_frame.show()
#input("Press Enter to continue...")
#current_image = corrected_frame
main_frames[i] = corrected_frame
save2Collect(corrected_frame, out_config, f"main_frame_gradient_{i + 0}")
#if (not paste_previous_image) and ((i + 1) == outpaint_steps):
# # fix initial image by adding alpha layer
# # fix exit image and frames
# backward_image = shrink_and_paste_on_blank(
# current_image, mask_width, mask_height
# )
# backward_image.show()
# input("Press Enter to continue...")
# #handle previous image alpha layer
# prev_image = (main_frames[i] if main_frames[i] else main_frames[0])
# prev_image.show()
# input("Press Enter to continue...")
# prev_image.alpha_composite(backward_image)
# print(f"no previous image - prev_image with backward Image")
# prev_image.show()
# input("Press Enter to continue...")
# main_frames[i - 1] = prev_image
#print(str(f"Frames: {len(main_frames)}"))
#print(str(f"Frame previous : {prev_image} {prev_image.mode} ({prev_image.width}, {prev_image.height})"))
#print(str(f"Frame current : {current_image} {current_image.mode} ({current_image.width}, {current_image.height})"))
##print(str(f"Frame corrected_frame : {corrected_frame} {corrected_frame.mode} ({corrected_frame.width}, {corrected_frame.height})"))
##print(str(f"Frame res - paste position: {paste_pos}"))
##print(str(f"Frame res : {res} {res.mode} ({res.width}, {res.height})"))
#print(str(f"Frame {i - 1} : {main_frames[i - 1]}"))
#print(str(f"Frame {i} : {main_frames[i]}"))
#print(str(f"Frame {i + 1} : {main_frames[i + 1]}"))
#print(str(f"Frame {-1} : {main_frames[-1]}"))
#input("Press Enter to continue...")
# Remove extra frames
#main_frames = main_frames[:(outpaint_steps)]
#handle first and last frames, this ensures blends work properly
#if init_img is not None:
#main_frames.insert(0, init_img)
if exit_img is not None:
main_frames.append(exit_img)
@ -269,6 +204,10 @@ def create_zoom(
upscale_do,
upscaler_name,
upscale_by,
blend_image,
blend_mode,
blend_gradient_size,
blend_invert_do,
inpainting_denoising_strength=1,
inpainting_full_res=0,
inpainting_padding=0,
@ -300,6 +239,10 @@ def create_zoom(
upscale_do,
upscaler_name,
upscale_by,
blend_image,
blend_mode,
blend_gradient_size,
blend_invert_do,
inpainting_denoising_strength,
inpainting_full_res,
inpainting_padding,
@ -371,6 +314,10 @@ def create_zoom_single(
upscale_do,
upscaler_name,
upscale_by,
blend_image,
blend_mode,
blend_gradient_size,
blend_invert_do,
inpainting_denoising_strength,
inpainting_full_res,
inpainting_padding,
@ -492,7 +439,8 @@ def create_zoom_single(
mask_width,
mask_height,
custom_exit_image,
False
False,
blend_gradient_size,
)
#for k in range(len(main_frames)):
@ -594,8 +542,10 @@ def create_zoom_single(
int(video_start_frame_dupe_amount),
int(video_last_frame_dupe_amount),
num_interpol_frames,
True,
open_image("G:\\Projects\\obs-studio\\plugins\\obs-transitions\\data\\luma_wipes\\derez-top.png")
blend_invert_do,
blend_image,
blend_mode,
blend_gradient_size,
)
print("Video saved in: " + os.path.join(script_path, out_config["video_filename"]))
return (

View File

@ -5,7 +5,8 @@ import modules.sd_samplers
default_sampling_steps = 35
default_sampler = "DDIM"
default_cfg_scale = 8
default_mask_blur = 62
default_mask_blur = 48
default_gradient_size = 61
default_overmask = 8
default_total_outpaints = 5
promptTableHeaders = ["Outpaint Steps", "Prompt", "image location", "blend mask", "is keyframe"], ["number", "str", "str", "str", "bool"]

View File

@ -15,6 +15,7 @@ from .static_variables import (
default_cfg_scale,
default_mask_blur,
default_sampler,
default_gradient_size,
)
from .helpers import validatePromptJson_throws, putPrompts, clearPrompts
from .prompt_util import readJsonPrompt
@ -219,6 +220,30 @@ def on_ui_tabs():
step=0.1,
info="Zoom speed in seconds (higher values create slower zoom)",
)
with gr.Accordion("Blend settings"):
with gr.Row():
blend_image = gr.Image(type="pil", label="Custom in/out Blend Image")
blend_mode = gr.Radio(
label="Blend Mode",
choices=["None", "Simple Blend", "Alpha Composite", "Luma Wipe"],
value="Luma Wipe",
type="index",
)
with gr.Row():
blend_gradient_size = gr.Slider(
label="Blend Gradient size",
minimum=25,
maximum=75,
value=default_gradient_size,
step=1
)
blend_invert_do = gr.Checkbox(False, label="Reverse Blend/Wipe")
gr.Markdown(
"""# Important Blend Info:
Number of Start and Stop Frame Duplication number of frames used for the blend/wipe effect. At 30 Frames per second, 30 frames is 1 second.
Blend Gradient size determines if blends extend to the border of the images. 61 is typical, higher values may result in frames around steps of your video
"""
)
with gr.Tab("Outpaint"):
inpainting_mask_blur = gr.Slider(
@ -235,6 +260,7 @@ def on_ui_tabs():
type="index",
)
with gr.Tab("Post proccess"):
upscale_do = gr.Checkbox(False, label="Enable Upscale")
upscaler_name = gr.Dropdown(
@ -296,6 +322,10 @@ Our best experience and trade-off is the R-ERSGAn4x upscaler.
upscale_do,
upscaler_name,
upscale_by,
blend_image,
blend_mode,
blend_gradient_size,
blend_invert_do,
],
outputs=[output_video, out_image, generation_info, html_info, html_log],
)

View File

@ -1,9 +1,9 @@
import numpy as np
import imageio
from .image import blend_images, draw_gradient_ellipse, alpha_composite_images, luma_wipe_images, PSLumaWipe_images2
from .image import draw_gradient_ellipse, alpha_composite_images, blend_images, PSLumaWipe_images2
import math
def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=15, last_frame_dupe_amount=30, num_interpol_frames=2, blend=False, blend_image= None, blend_type:int = 0):
def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=15, last_frame_dupe_amount=30, num_interpol_frames=2, blend_invert: bool = False, blend_image= None, blend_type:int = 0, blend_gradient_size: int = 63):
"""
Writes frames to an mp4 video file
:param file_path: Path to output video, must end with .mp4
@ -21,29 +21,29 @@ def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=1
writer = imageio.get_writer(file_path, fps=fps, macro_block_size=None)
# Duplicate the start and end frames
if blend:
if blend_type != 0:
num_frames_replaced = num_interpol_frames
if blend_image is None:
blend_image = draw_gradient_ellipse(*frames[0].size, 0.63)
blend_image = draw_gradient_ellipse(*frames[0].size, blend_gradient_size)
next_frame = frames[num_frames_replaced]
next_to_last_frame = frames[(-1 * num_frames_replaced)]
print(f"Blending start: {math.ceil(start_frame_dupe_amount)} next frame:{(num_frames_replaced)}")
if blend_type == 1:
start_frames = alpha_composite_images(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount))
start_frames = blend_images(frames[0], next_frame, math.ceil(start_frame_dupe_amount), blend_invert)
elif blend_type == 2:
start_frames = luma_wipe_images(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount))
else:
start_frames = PSLumaWipe_images2(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount),(255,255,0,225))
start_frames = alpha_composite_images(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount), blend_invert)
elif blend_type == 3:
start_frames = PSLumaWipe_images2(frames[0], next_frame, blend_image, math.ceil(start_frame_dupe_amount), blend_invert,(255,255,0,225))
del frames[:num_frames_replaced]
print(f"Blending end: {math.ceil(last_frame_dupe_amount)} next to last frame:{-1 * (num_frames_replaced)}")
if blend_type == 1:
end_frames = alpha_composite_images(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount))
end_frames = blend_images(next_to_last_frame, frames[-1], math.ceil(last_frame_dupe_amount), blend_invert)
elif blend_type == 2:
end_frames = luma_wipe_images(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount))
else:
end_frames = PSLumaWipe_images2(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount),(255,255,0,225))
end_frames = alpha_composite_images(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount), blend_invert)
elif blend_type == 3:
end_frames = PSLumaWipe_images2(next_to_last_frame, frames[-1], blend_image, math.ceil(last_frame_dupe_amount), blend_invert, (255,255,0,225))
frames = frames[:(-1 * num_frames_replaced)]
else:
start_frames = [frames[0]] * start_frame_dupe_amount