Add blends, update styles for dataframe

exit_image
Charles Fettinger 2023-04-27 02:18:19 -07:00
parent 379b0f51fc
commit 3a1cfd53d5
8 changed files with 290 additions and 31 deletions

BIN
blends/iris-square.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

BIN
blends/spot.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

BIN
blends/square.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

BIN
blends/sun-square.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

View File

@ -123,3 +123,14 @@ def clearPrompts():
gr.DataFrame.update(value=[[0, "Infinite Zoom. Start over"]]), gr.DataFrame.update(value=[[0, "Infinite Zoom. Start over"]]),
gr.Textbox.update(""), gr.Textbox.update(""),
] ]
def value_to_bool(value):
if isinstance(value, bool):
return value
elif isinstance(value, str):
if value.lower() in ("true", "false"):
return value.lower() == "true"
elif isinstance(value, int):
if value in (0, 1):
return bool(value)
return False

View File

@ -1,6 +1,8 @@
from PIL import Image from PIL import Image, ImageDraw, ImageEnhance
import requests import requests
import base64 import base64
import numpy as np
import math
from io import BytesIO from io import BytesIO
@ -43,13 +45,245 @@ def open_image(image_path):
return img return img
def apply_alpha_mask(current_image, mask_image): def apply_alpha_mask(image, mask_image):
# Resize the mask to match the current image size # Resize the mask to match the current image size
mask_image = mask_image.resize(current_image.size) mask_image = mask_image.resize(image.size)
# Apply the mask as the alpha layer of the current image # Apply the mask as the alpha layer of the current image
result_image = current_image.copy() result_image = image.copy()
result_image.putalpha(mask_image.convert('L')) # convert to grayscale result_image.putalpha(mask_image.convert('L')) # convert to grayscale
return result_image return result_image
def resize_image_with_aspect_ratio(image, basewidth=512, baseheight=512):
# Get the original size of the image
orig_width, orig_height = image.size
# Calculate the height that corresponds to the given width while maintaining aspect ratio
wpercent = (basewidth / float(orig_width))
hsize = int((float(orig_height) * float(wpercent)))
# Resize the image with Lanczos resampling filter
resized_image = image.resize((basewidth, hsize), resample=Image.LANCZOS)
# If the height of the resized image is still larger than the given baseheight,
# then crop the image from the top and bottom to match the baseheight
if hsize > baseheight:
# Calculate the number of pixels to crop from the top and bottom
crop_height = (hsize - baseheight) // 2
# Crop the image
resized_image = resized_image.crop((0, crop_height, basewidth, hsize - crop_height))
else:
if hsize < baseheight:
# If the height of the resized image is smaller than the given baseheight,
# then paste the resized image in the middle of a blank image with the given baseheight
blank_image = Image.new("RGBA", (basewidth, baseheight), (255, 255, 255, 0))
blank_image.paste(resized_image, (0, (baseheight - hsize) // 2))
resized_image = blank_image
return resized_image
def resize_and_crop_image(image, new_width=512, new_height=512):
# Get the dimensions of the original image
orig_width, orig_height = image.size
# Calculate the aspect ratios of the original and new images
orig_aspect_ratio = orig_width / float(orig_height)
new_aspect_ratio = new_width / float(new_height)
# Calculate the new size of the image while maintaining aspect ratio
if orig_aspect_ratio > new_aspect_ratio:
# The original image is wider than the new image, so we need to crop the sides
resized_width = int(new_height * orig_aspect_ratio)
resized_height = new_height
left_offset = (resized_width - new_width) / 2
top_offset = 0
else:
# The original image is taller than the new image, so we need to crop the top and bottom
resized_width = new_width
resized_height = int(new_width / orig_aspect_ratio)
left_offset = 0
top_offset = (resized_height - new_height) / 2
# Resize the image with Lanczos resampling filter
resized_image = image.resize((resized_width, resized_height), resample=Image.LANCZOS)
# Crop the image to fill the entire height and width of the new image
cropped_image = resized_image.crop((left_offset, top_offset, left_offset + new_width, top_offset + new_height))
return cropped_image
def grayscale_to_gradient(image, gradient_colors):
"""
Converts a grayscale PIL Image into a two color image using the specified gradient colors.
Args:
image (PIL.Image.Image): The input grayscale image.
gradient_colors (list): A list of two tuples representing the gradient colors.
Returns:
PIL.Image.Image: A two color image with the same dimensions as the input grayscale image.
"""
# Create a new image with a palette
result = Image.new("P", image.size)
result.putpalette([c for color in gradient_colors for c in color])
# Convert the input image to a list of pixel values
pixel_values = list(image.getdata())
# Convert the pixel values to indices in the palette and assign them to the output image
result.putdata([gradient_colors[int(p * (len(gradient_colors) - 1))] for p in pixel_values])
return result
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
# _, (h, k) = ellipse_bbox(0,0,768,512,math.radians(0.0)) # Ellipse center
def ellipse_bbox(h, k, a, b, theta):
"""
Computes the bounding box of an ellipse centered at (h,k) with semi-major axis 'a',
semi-minor axis 'b', and rotation angle 'theta' (in radians).
Args:
h (float): x-coordinate of the ellipse center.
k (float): y-coordinate of the ellipse center.
a (float): Length of the semi-major axis.
b (float): Length of the semi-minor axis.
theta (float): Angle of rotation (in radians) of the ellipse.
Returns:
tuple: A tuple of two tuples representing the top left and bottom right corners of
the bounding box of the ellipse, respectively.
"""
ux = a * math.cos(theta)
uy = a * math.sin(theta)
vx = b * math.cos(theta + math.pi / 2)
vy = b * math.sin(theta + math.pi / 2)
box_halfwidth = np.ceil(math.sqrt(ux**2 + vx**2))
box_halfheight = np.ceil(math.sqrt(uy**2 + vy**2))
return ((int(h - box_halfwidth), int(k - box_halfheight))
, (int(h + box_halfwidth), int(k + box_halfheight)))
# intel = make_gradient_v2(768,512,h/2,k/2,h*2/3,k*2/3,math.radians(0.0))
def make_gradient_v1(width, height, h, k, a, b, theta):
"""
Generates a gradient image with an elliptical shape.
Args:
width (int): Width of the output image.
height (int): Height of the output image.
h (float): x-coordinate of the center of the ellipse.
k (float): y-coordinate of the center of the ellipse.
a (float): Length of the semi-major axis.
b (float): Length of the semi-minor axis.
theta (float): Angle of rotation (in radians) of the ellipse.
Returns:
PIL.Image.Image: A PIL Image object representing the gradient with an elliptical shape.
"""
# Precalculate constants
st, ct = math.sin(theta), math.cos(theta)
aa, bb = a**2, b**2
# Initialize an empty array to hold the weights
weights = np.zeros((height, width), np.float64)
# Calculate the weight for each pixel
for y in range(height):
for x in range(width):
weights[y, x] = ((((x-h) * ct + (y-k) * st) ** 2) / aa
+ (((x-h) * st - (y-k) * ct) ** 2) / bb)
# Convert the weights to pixel values and create a PIL Image
pixel_values = np.uint8(np.clip(1.0 - weights, 0, 1) * 255)
return Image.fromarray(pixel_values, mode='L')
# make_gradient_v2(768,512,h/2,k/2,h-192,k-192,math.radians(30.0))
def make_gradient_v2(width, height, h, k, a, b, theta):
"""
Generates a gradient image with an elliptical shape.
Args:
width (int): Width of the output image.
height (int): Height of the output image.
h (float): x-coordinate of the center of the ellipse.
k (float): y-coordinate of the center of the ellipse.
a (float): Length of the semi-major axis.
b (float): Length of the semi-minor axis.
theta (float): Angle of rotation (in radians) of the ellipse.
Returns:
PIL.Image.Image: A PIL Image object representing the gradient with an elliptical shape.
"""
# Precalculate constants
st, ct = math.sin(theta), math.cos(theta)
aa, bb = a**2, b**2
# Generate (x,y) coordinate arrays
y,x = np.mgrid[-k:height-k,-h:width-h]
# Calculate the weight for each pixel
weights = (((x * ct + y * st) ** 2) / aa) + (((x * st - y * ct) ** 2) / bb)
# Convert the weights to pixel values and create a PIL Image
pixel_values = np.uint8(np.clip(1.0 - weights, 0, 1) * 255)
return Image.fromarray(pixel_values, mode='L')
def make_gradient_v3(width, height, h, k, a, b, theta, gradient_colors=[(255, 255, 255, 1), (0, 0, 0, 1)]):
"""
Generates a gradient image with an elliptical shape and the specified gradient colors.
Args:
width (int): Width of the output image.
height (int): Height of the output image.
h (float): x-coordinate of the center of the ellipse.
k (float): y-coordinate of the center of the ellipse.
a (float): Length of the semi-major axis.
b (float): Length of the semi-minor axis.
theta (float): Angle of rotation (in radians) of the ellipse.
gradient_colors (list): A list of two tuples representing the gradient colors.
Returns:
PIL.Image.Image: A two color gradient image with an elliptical shape.
"""
# Precalculate constants
st, ct = math.sin(theta), math.cos(theta)
aa, bb = a**2, b**2
# Generate (x,y) coordinate arrays
y, x = np.mgrid[-k:height-k, -h:width-h]
# Calculate the weight for each pixel
weights = (((x * ct + y * st) ** 2) / aa) + (((x * st - y * ct) ** 2) / bb)
# Normalize the weights to the range [0, 1]
weights = 1.0 - np.clip(weights / np.max(weights), 0.0, 1.0)
# Create a grayscale image from the weights array
grayscale_image = Image.fromarray(np.uint8(weights * 255))
# Convert the grayscale image into a two color gradient image using the specified gradient colors
gradient_image = grayscale_to_gradient(grayscale_image, gradient_colors)
return gradient_image
def draw_gradient_ellipse(width=512, height=512, white_amount=1.0, rotation = 0.0, contrast = 1.0):
"""
Draw an ellipse with a radial gradient fill, and a variable amount of white in the center.
:param height: The height of the output image. Default is 512.
:param width: The width of the output image. Default is 512.
:param white_amount: The amount of white in the center of the ellipse, as a float between 0.0 and 1.0. Default is 1.0.
:return: An RGBA image with the gradient ellipse.
"""
# Create a new image for outer ellipse
size = (width, height)
image = Image.new('RGBA', size, (255, 255, 255, 0))
theta = rotation * (math.pi / 180)
# Define the ellipse parameters
center = (int(width // 2), int(height // 2))
# Draw the ellipse and fill it with the radial gradient
image = make_gradient_v2(width, height, center[0], center[1], width * white_amount, height * white_amount, theta)
# Apply brightness method of ImageEnhance class
image = ImageEnhance.Contrast(image).enhance(contrast).convert('RGBA')
# Apply the alpha mask to the image
image = apply_alpha_mask(image, image)
# Define the radial gradient parameters
#ellipse_width, ellipse_height = (int((width * white_amount) // 1.5), int((height * white_amount) // 1.5))
#ellipse_colors = [(255, 255, 255, 255), (0, 0, 0, 0)]
# Create a new image for inner ellipse
#inner_ellipse = Image.new("L", size, 0)
#inner_ellipse = make_gradient_v2(width, height, center[0], center[1], ellipse_width, ellipse_height, theta)
#inner_ellipse = apply_alpha_mask(inner_ellipse, inner_ellipse)
#image.paste(inner_ellipse, center, mask=inner_ellipse)
# Creating object of Brightness class
# Return the result image
return image

View File

@ -8,10 +8,10 @@ from .helpers import (
fix_env_Path_ffprobe, fix_env_Path_ffprobe,
closest_upper_divisible_by_eight, closest_upper_divisible_by_eight,
load_model_from_setting, load_model_from_setting,
do_upscaleImg, do_upscaleImg,value_to_bool
) )
from .sd_helpers import renderImg2Img, renderTxt2Img from .sd_helpers import renderImg2Img, renderTxt2Img
from .image import shrink_and_paste_on_blank, open_image, apply_alpha_mask from .image import shrink_and_paste_on_blank, open_image, apply_alpha_mask, draw_gradient_ellipse, resize_and_crop_image
from .video import write_video from .video import write_video
@ -125,7 +125,7 @@ def create_zoom_single(
prompts[key] = value prompts[key] = value
prompt_images[key] = file_loc prompt_images[key] = file_loc
prompt_alpha_mask_images[key] = alpha_mask_loc prompt_alpha_mask_images[key] = alpha_mask_loc
prompt_image_is_keyframe[key] = is_keyframe prompt_image_is_keyframe[key] = value_to_bool(is_keyframe)
except ValueError: except ValueError:
pass pass
assert len(prompts_array) > 0, "prompts is empty" assert len(prompts_array) > 0, "prompts is empty"
@ -143,9 +143,7 @@ def create_zoom_single(
extra_frames = 0 extra_frames = 0
if custom_init_image: if custom_init_image:
current_image = custom_init_image.resize( current_image = resize_and_crop_image(custom_init_image, width, height)
(width, height), resample=Image.LANCZOS
)
print("using Custom Initial Image") print("using Custom Initial Image")
else: else:
if prompt_images[min(k for k in prompt_images.keys() if k >= 0)] == "": if prompt_images[min(k for k in prompt_images.keys() if k >= 0)] == "":
@ -165,14 +163,8 @@ def create_zoom_single(
) )
current_image = processed.images[0] current_image = processed.images[0]
else: else:
current_image = open_image(prompt_images[min(k for k in prompt_images.keys() if k >= 0)]).resize( current_image = open_image(prompt_images[min(k for k in prompt_images.keys() if k >= 0)])
(width, height), resample=Image.LANCZOS current_image = resize_and_crop_image(current_image, width, height)
)
# apply available alpha mask
if prompt_alpha_mask_images[min(k for k in prompt_alpha_mask_images.keys() if k >= 0)] != "":
current_image = apply_alpha_mask(current_image, open_image(prompt_alpha_mask_images[min(k for k in prompt_alpha_mask_images.keys() if k >= 0)]))
mask_width = math.trunc(width / 4) # was initially 512px => 128px mask_width = math.trunc(width / 4) # was initially 512px => 128px
mask_height = math.trunc(height / 4) # was initially 512px => 128px mask_height = math.trunc(height / 4) # was initially 512px => 128px
@ -208,6 +200,14 @@ def create_zoom_single(
if progress: if progress:
progress(((i + 1) / num_outpainting_steps), desc=print_out) progress(((i + 1) / num_outpainting_steps), desc=print_out)
# apply available alpha mask of previous image
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))] != "":
current_image = apply_alpha_mask(current_image, open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))]))
else:
#generate automatic alpha mask
current_image_gradient_ratio = 0.615 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.89),0.1)
current_image = apply_alpha_mask(current_image, draw_gradient_ellipse(current_image.width, current_image.height, current_image_gradient_ratio, 0.0, 3.0).convert("RGB"))
prev_image_fix = current_image prev_image_fix = current_image
prev_image = shrink_and_paste_on_blank(current_image, mask_width, mask_height) prev_image = shrink_and_paste_on_blank(current_image, mask_width, mask_height)
current_image = prev_image current_image = prev_image
@ -219,13 +219,11 @@ def create_zoom_single(
# inpainting step # inpainting step
current_image = current_image.convert("RGB") current_image = current_image.convert("RGB")
paste_previous_image = prompt_image_is_keyframe[max(k for k in prompt_image_is_keyframe.keys() if k <= (i + 1))] paste_previous_image = not prompt_image_is_keyframe[max(k for k in prompt_image_is_keyframe.keys() if k <= (i + 1))]
# Custom and specified images work like keyframes # Custom and specified images work like keyframes
if custom_exit_image and (i + 1) >= (num_outpainting_steps + extra_frames): if custom_exit_image and (i + 1) >= (num_outpainting_steps + extra_frames):
current_image = custom_exit_image.resize( current_image = resize_and_crop_image(custom_exit_image, width, height)
(width, height), resample=Image.LANCZOS
)
print("using Custom Exit Image") print("using Custom Exit Image")
else: else:
if prompt_images[max(k for k in prompt_images.keys() if k <= (i + 1))] == "": if prompt_images[max(k for k in prompt_images.keys() if k <= (i + 1))] == "":
@ -251,13 +249,16 @@ def create_zoom_single(
#current_image.paste(prev_image, mask=prev_image) #current_image.paste(prev_image, mask=prev_image)
paste_previous_image = True paste_previous_image = True
else: else:
current_image = open_image(prompt_images[max(k for k in prompt_images.keys() if k <= (i + 1))]).resize( # use prerendered image, known as keyframe. Resize to target size
(width, height), resample=Image.LANCZOS current_image = open_image(prompt_images[max(k for k in prompt_images.keys() if k <= (i + 1))])
) current_image = resize_and_crop_image(current_image, width, height)
# apply available alpha mask # apply available alpha mask of previous image
if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))] != "": #if prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))] != "":
current_image = apply_alpha_mask(current_image, open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))])) # current_image = apply_alpha_mask(current_image, open_image(prompt_alpha_mask_images[max(k for k in prompt_alpha_mask_images.keys() if k <= (i + 1))]))
#else:
# current_image_gradient_ratio = 0.65 #max((min(current_image.width/current_image.height,current_image.height/current_image.width) * 0.925),0.1)
# current_image = draw_gradient_ellipse(current_image.width, current_image.height, current_image_gradient_ratio, 0.0, 1.8).convert("RGB")
# paste previous image on current image # paste previous image on current image
if paste_previous_image: if paste_previous_image:

View File

@ -8,3 +8,16 @@
flex: 0 0 0%; flex: 0 0 0%;
width: 0; width: 0;
} }
.gradio-container-3-23-0 .gradio-dataframe input {
height: 90%;
width: 90%;
}
.gradio-container-3-23-0 .gradio-dataframe .scroll-hide {
scrollbar-width: unset;
}
#component-2115, #component-2065 .gradio-column:nth-child(1) {
flex-grow: .66 !important;
}
#component-2115 .gradio-video, #component-2065 .gradio-column:nth-child(1) .gradio-video {
height: auto !important;
}