Formatting and use PIL image backend

pull/35/head
Kohaku-Blueleaf 2024-01-19 14:16:43 +08:00
parent 197daebecd
commit 0f511c7656
23 changed files with 832 additions and 612 deletions

View File

@ -4,13 +4,15 @@ import cv2
def basic(target, blend, opacity):
return target * opacity + blend * (1-opacity)
return target * opacity + blend * (1 - opacity)
def blender(func):
def blend(target, blend, opacity=1, *args):
res = func(target, blend, *args)
res = basic(res, blend, opacity)
return np.clip(res, 0, 1)
return blend
@ -18,118 +20,130 @@ class Blend:
@classmethod
def method(cls, name):
return getattr(cls, name)
normal = basic
@staticmethod
@blender
def darken(target, blend, *args):
return np.minimum(target, blend)
@staticmethod
@blender
def multiply(target, blend, *args):
return target * blend
@staticmethod
@blender
def color_burn(target, blend, *args):
return 1 - (1-target)/blend
return 1 - (1 - target) / blend
@staticmethod
@blender
def linear_burn(target, blend, *args):
return target+blend-1
return target + blend - 1
@staticmethod
@blender
def lighten(target, blend, *args):
return np.maximum(target, blend)
@staticmethod
@blender
def screen(target, blend, *args):
return 1 - (1-target) * (1-blend)
return 1 - (1 - target) * (1 - blend)
@staticmethod
@blender
def color_dodge(target, blend, *args):
return target/(1-blend)
return target / (1 - blend)
@staticmethod
@blender
def linear_dodge(target, blend, *args):
return target+blend
return target + blend
@staticmethod
@blender
def overlay(target, blend, *args):
return (target>0.5) * (1-(2-2*target)*(1-blend)) +\
(target<=0.5) * (2*target*blend)
return (target > 0.5) * (1 - (2 - 2 * target) * (1 - blend)) + (
target <= 0.5
) * (2 * target * blend)
@staticmethod
@blender
def soft_light(target, blend, *args):
return (blend>0.5) * (1 - (1-target)*(1-(blend-0.5))) +\
(blend<=0.5) * (target*(blend+0.5))
return (blend > 0.5) * (1 - (1 - target) * (1 - (blend - 0.5))) + (
blend <= 0.5
) * (target * (blend + 0.5))
@staticmethod
@blender
def hard_light(target, blend, *args):
return (blend>0.5) * (1 - (1-target)*(2-2*blend)) +\
(blend<=0.5) * (2*target*blend)
return (blend > 0.5) * (1 - (1 - target) * (2 - 2 * blend)) + (blend <= 0.5) * (
2 * target * blend
)
@staticmethod
@blender
def vivid_light(target, blend, *args):
return (blend>0.5) * (1 - (1-target)/(2*blend-1)) +\
(blend<=0.5) * (target/(1-2*blend))
return (blend > 0.5) * (1 - (1 - target) / (2 * blend - 1)) + (blend <= 0.5) * (
target / (1 - 2 * blend)
)
@staticmethod
@blender
def linear_light(target, blend, *args):
return (blend>0.5) * (target + 2*(blend-0.5)) +\
(blend<=0.5) * (target + 2*blend)
return (blend > 0.5) * (target + 2 * (blend - 0.5)) + (blend <= 0.5) * (
target + 2 * blend
)
@staticmethod
@blender
def pin_light(target, blend, *args):
return (blend>0.5) * np.maximum(target,2*(blend-0.5)) +\
(blend<=0.5) * np.minimum(target,2*blend)
return (blend > 0.5) * np.maximum(target, 2 * (blend - 0.5)) + (
blend <= 0.5
) * np.minimum(target, 2 * blend)
@staticmethod
@blender
def difference(target, blend, *args):
return np.abs(target - blend)
@staticmethod
@blender
def exclusion(target, blend, *args):
return 0.5 - 2*(target-0.5)*(blend-0.5)
return 0.5 - 2 * (target - 0.5) * (blend - 0.5)
blend_methods = [i for i in Blend.__dict__.keys() if i[0]!='_' and i!='method']
blend_methods = [i for i in Blend.__dict__.keys() if i[0] != "_" and i != "method"]
def run(layers):
def blend(bg, *args):
assert len(args)%5==0
chunks = [args[i*layers: i*layers+layers] for i in range(5)]
h, w, c = [i['image'] for i in chunks[-1] if i is not None][0].shape
base_img = np.array(Image.new(mode="RGB", size=(w, h), color=ImageColor.getcolor(bg, 'RGB')))
base_img = base_img.astype(np.float64)/255
assert len(args) % 5 == 0
chunks = [args[i * layers : i * layers + layers] for i in range(5)]
h, w, c = np.array([i["image"] for i in chunks[-1] if i is not None][0]).shape
base_img = np.array(
Image.new(mode="RGB", size=(w, h), color=ImageColor.getcolor(bg, "RGB"))
)
base_img = base_img.astype(np.float64) / 255
for alpha, mask_blur, mask_str, mode, img in reversed(list(zip(*chunks))):
if img is None or img['image'] is None: continue
img_now = Image.fromarray(img['image']).resize((w, h))
mask = Image.fromarray(img['mask'][:,:,0], mode='L')
img_now = np.array(img_now).astype(np.float64)/255
if img is None or img["image"] is None:
continue
img_now = img["image"].convert('RGB').resize((w, h))
mask = img["mask"].convert('L')
img_now = np.array(img_now).astype(np.float64) / 255
mask = mask.resize((w, h)).filter(ImageFilter.GaussianBlur(mask_blur))
mask = np.expand_dims(np.array(mask)*mask_str/255, 2)
mask = np.expand_dims(np.array(mask) * mask_str / 255, 2)
img_now = Blend.normal(base_img, img_now, mask)
base_img = Blend.method(mode)(img_now, base_img, alpha)
base_img *= 255
return Image.fromarray(base_img.astype(np.uint8), mode='RGB')
return blend
base_img = np.clip(base_img, 0, 255)
return Image.fromarray(base_img.astype(np.uint8), mode="RGB")
return blend

View File

@ -1,7 +1,6 @@
from PIL import Image, ImageFilter
def run(img3, img_blur):
img = Image.fromarray(img3)
def run(img, img_blur):
blur = ImageFilter.GaussianBlur(img_blur)
return img.filter(blur)
return img.filter(blur)

View File

@ -3,17 +3,17 @@ from PIL import Image
from .kromo import add_chromatic
def run(np_img, strength, blur = False):
def run(pil_img, strength, blur=False):
if strength <= 0:
return np_img
return pil_img
img = Image.fromarray(np_img)
img = pil_img
if (img.size[0] % 2 == 0 or img.size[1] % 2 == 0):
if (img.size[0] % 2 == 0):
if img.size[0] % 2 == 0 or img.size[1] % 2 == 0:
if img.size[0] % 2 == 0:
img = img.crop((0, 0, img.size[0] - 1, img.size[1]))
img.load()
if (img.size[1] % 2 == 0):
if img.size[1] % 2 == 0:
img = img.crop((0, 0, img.size[0], img.size[1] - 1))
img.load()

View File

@ -16,63 +16,63 @@ import os
def cartesian_to_polar(data: np.ndarray) -> np.ndarray:
"""Returns the polar form of <data>
"""
"""Returns the polar form of <data>"""
width = data.shape[1]
height = data.shape[0]
assert (width > 2)
assert (height > 2)
assert (width % 2 == 1)
assert (height % 2 == 1)
assert width > 2
assert height > 2
assert width % 2 == 1
assert height % 2 == 1
perimeter = 2 * (width + height - 2)
halfdiag = math.ceil(((width ** 2 + height ** 2) ** 0.5) / 2)
halfdiag = math.ceil(((width**2 + height**2) ** 0.5) / 2)
halfw = width // 2
halfh = height // 2
ret = np.zeros((halfdiag, perimeter, 3))
# Don't want to deal with divide by zero errors...
ret[0:(halfw + 1), halfh] = data[halfh, halfw::-1]
ret[0:(halfw + 1), height + width - 2 +
halfh] = data[halfh, halfw:(halfw * 2 + 1)]
ret[0:(halfh + 1), height - 1 + halfw] = data[halfh:(halfh * 2 + 1), halfw]
ret[0:(halfh + 1), perimeter - halfw] = data[halfh::-1, halfw]
ret[0 : (halfw + 1), halfh] = data[halfh, halfw::-1]
ret[0 : (halfw + 1), height + width - 2 + halfh] = data[
halfh, halfw : (halfw * 2 + 1)
]
ret[0 : (halfh + 1), height - 1 + halfw] = data[halfh : (halfh * 2 + 1), halfw]
ret[0 : (halfh + 1), perimeter - halfw] = data[halfh::-1, halfw]
# Divide the image into 8 triangles, and use the same calculation on
# 4 triangles at a time. This is possible due to symmetry.
# This section is also responsible for the corner pixels
for i in range(0, halfh):
slope = (halfh - i) / (halfw)
diagx = ((halfdiag ** 2) / (slope ** 2 + 1)) ** 0.5
diagx = ((halfdiag**2) / (slope**2 + 1)) ** 0.5
unit_xstep = diagx / (halfdiag - 1)
unit_ystep = diagx * slope / (halfdiag - 1)
for row in range(halfdiag):
ystep = round(row * unit_ystep)
xstep = round(row * unit_xstep)
if ((halfh >= ystep) and halfw >= xstep):
if (halfh >= ystep) and halfw >= xstep:
ret[row, i] = data[halfh - ystep, halfw - xstep]
ret[row, height - 1 - i] = data[halfh + ystep, halfw - xstep]
ret[row, height + width - 2 +
i] = data[halfh + ystep, halfw + xstep]
ret[row, height + width + height - 3 -
i] = data[halfh - ystep, halfw + xstep]
ret[row, height + width - 2 + i] = data[halfh + ystep, halfw + xstep]
ret[row, height + width + height - 3 - i] = data[
halfh - ystep, halfw + xstep
]
else:
break
# Remaining 4 triangles
for j in range(1, halfw):
slope = (halfh) / (halfw - j)
diagx = ((halfdiag ** 2) / (slope ** 2 + 1)) ** 0.5
diagx = ((halfdiag**2) / (slope**2 + 1)) ** 0.5
unit_xstep = diagx / (halfdiag - 1)
unit_ystep = diagx * slope / (halfdiag - 1)
for row in range(halfdiag):
ystep = round(row * unit_ystep)
xstep = round(row * unit_xstep)
if (halfw >= xstep and halfh >= ystep):
if halfw >= xstep and halfh >= ystep:
ret[row, height - 1 + j] = data[halfh + ystep, halfw - xstep]
ret[row, height + width - 2 -
j] = data[halfh + ystep, halfw + xstep]
ret[row, height + width + height - 3 +
j] = data[halfh - ystep, halfw + xstep]
ret[row, height + width - 2 - j] = data[halfh + ystep, halfw + xstep]
ret[row, height + width + height - 3 + j] = data[
halfh - ystep, halfw + xstep
]
ret[row, perimeter - j] = data[halfh - ystep, halfw - xstep]
else:
break
@ -85,23 +85,24 @@ def polar_to_cartesian(data: np.ndarray, width: int, height: int) -> np.ndarray:
<width> is the original width of the cartesian image
<height> is the original height of the cartesian image
"""
assert (width > 2)
assert (height > 2)
assert (width % 2 == 1)
assert (height % 2 == 1)
assert width > 2
assert height > 2
assert width % 2 == 1
assert height % 2 == 1
perimeter = 2 * (width + height - 2)
halfdiag = math.ceil(((width ** 2 + height ** 2) ** 0.5) / 2)
halfdiag = math.ceil(((width**2 + height**2) ** 0.5) / 2)
halfw = width // 2
halfh = height // 2
ret = np.zeros((height, width, 3))
def div0():
# Don't want to deal with divide by zero errors...
ret[halfh, halfw::-1] = data[0:(halfw + 1), halfh]
ret[halfh, halfw:(halfw * 2 + 1)] = data[0:(halfw + 1),
height + width - 2 + halfh]
ret[halfh:(halfh * 2 + 1), halfw] = data[0:(halfh + 1), height - 1 + halfw]
ret[halfh::-1, halfw] = data[0:(halfh + 1), perimeter - halfw]
ret[halfh, halfw::-1] = data[0 : (halfw + 1), halfh]
ret[halfh, halfw : (halfw * 2 + 1)] = data[
0 : (halfw + 1), height + width - 2 + halfh
]
ret[halfh : (halfh * 2 + 1), halfw] = data[0 : (halfh + 1), height - 1 + halfw]
ret[halfh::-1, halfw] = data[0 : (halfh + 1), perimeter - halfw]
div0()
@ -110,21 +111,21 @@ def polar_to_cartesian(data: np.ndarray, width: int, height: int) -> np.ndarray:
def part1():
for i in range(0, halfh):
slope = (halfh - i) / (halfw)
diagx = ((halfdiag ** 2) / (slope ** 2 + 1)) ** 0.5
diagx = ((halfdiag**2) / (slope**2 + 1)) ** 0.5
unit_xstep = diagx / (halfdiag - 1)
unit_ystep = diagx * slope / (halfdiag - 1)
for row in range(halfdiag):
ystep = round(row * unit_ystep)
xstep = round(row * unit_xstep)
if ((halfh >= ystep) and halfw >= xstep):
ret[halfh - ystep, halfw - xstep] = \
data[row, i]
ret[halfh + ystep, halfw - xstep] = \
data[row, height - 1 - i]
ret[halfh + ystep, halfw + xstep] = \
data[row, height + width - 2 + i]
ret[halfh - ystep, halfw + xstep] = \
data[row, height + width + height - 3 - i]
if (halfh >= ystep) and halfw >= xstep:
ret[halfh - ystep, halfw - xstep] = data[row, i]
ret[halfh + ystep, halfw - xstep] = data[row, height - 1 - i]
ret[halfh + ystep, halfw + xstep] = data[
row, height + width - 2 + i
]
ret[halfh - ystep, halfw + xstep] = data[
row, height + width + height - 3 - i
]
else:
break
@ -133,21 +134,21 @@ def polar_to_cartesian(data: np.ndarray, width: int, height: int) -> np.ndarray:
def part2():
for j in range(1, halfw):
slope = (halfh) / (halfw - j)
diagx = ((halfdiag ** 2) / (slope ** 2 + 1)) ** 0.5
diagx = ((halfdiag**2) / (slope**2 + 1)) ** 0.5
unit_xstep = diagx / (halfdiag - 1)
unit_ystep = diagx * slope / (halfdiag - 1)
for row in range(halfdiag):
ystep = round(row * unit_ystep)
xstep = round(row * unit_xstep)
if (halfw >= xstep and halfh >= ystep):
ret[halfh + ystep, halfw - xstep] = \
data[row, height - 1 + j]
ret[halfh + ystep, halfw + xstep] = \
data[row, height + width - 2 - j]
ret[halfh - ystep, halfw + xstep] = \
data[row, height + width + height - 3 + j]
ret[halfh - ystep, halfw - xstep] = \
data[row, perimeter - j]
if halfw >= xstep and halfh >= ystep:
ret[halfh + ystep, halfw - xstep] = data[row, height - 1 + j]
ret[halfh + ystep, halfw + xstep] = data[
row, height + width - 2 - j
]
ret[halfh - ystep, halfw + xstep] = data[
row, height + width + height - 3 + j
]
ret[halfh - ystep, halfw - xstep] = data[row, perimeter - j]
else:
break
@ -156,7 +157,9 @@ def polar_to_cartesian(data: np.ndarray, width: int, height: int) -> np.ndarray:
# Repairs black/missing pixels in the transformed image
def set_zeros():
zero_mask = ret[1:-1, 1:-1] == 0
ret[1:-1, 1:-1] = np.where(zero_mask, (ret[:-2, 1:-1] + ret[2:, 1:-1]) / 2, ret[1:-1, 1:-1])
ret[1:-1, 1:-1] = np.where(
zero_mask, (ret[:-2, 1:-1] + ret[2:, 1:-1]) / 2, ret[1:-1, 1:-1]
)
set_zeros()
@ -169,11 +172,24 @@ def get_gauss(n: int) -> List[float]:
"""
sigma = 0.3 * (n / 2 - 1) + 0.8
r = range(-int(n / 2), int(n / 2) + 1)
new_sum = sum([1 / (sigma * math.sqrt(2 * math.pi)) *
math.exp(-float(x) ** 2 / (2 * sigma ** 2)) for x in r])
new_sum = sum(
[
1
/ (sigma * math.sqrt(2 * math.pi))
* math.exp(-float(x) ** 2 / (2 * sigma**2))
for x in r
]
)
# Ensure that the gaussian array adds up to one
return [(1 / (sigma * math.sqrt(2 * math.pi)) *
math.exp(-float(x) ** 2 / (2 * sigma ** 2))) / new_sum for x in r]
return [
(
1
/ (sigma * math.sqrt(2 * math.pi))
* math.exp(-float(x) ** 2 / (2 * sigma**2))
)
/ new_sum
for x in r
]
def vertical_gaussian(data: np.ndarray, n: int) -> np.ndarray:
@ -186,19 +202,22 @@ def vertical_gaussian(data: np.ndarray, n: int) -> np.ndarray:
width = data.shape[1]
height = data.shape[0]
padded_data = np.zeros((height + padding * 2, width))
padded_data[padding: -padding, :] = data
padded_data[padding:-padding, :] = data
ret = np.zeros((height, width))
kernel = None
old_radius = - 1
old_radius = -1
for i in range(height):
radius = round(i * padding / (height - 1)) + 1
# Recreate new kernel only if we have to
if (radius != old_radius):
if radius != old_radius:
old_radius = radius
kernel = np.tile(get_gauss(1 + 2 * (radius - 1)),
(width, 1)).transpose()
ret[i, :] = np.sum(np.multiply(
padded_data[padding + i - radius + 1:padding + i + radius, :], kernel), axis=0)
kernel = np.tile(get_gauss(1 + 2 * (radius - 1)), (width, 1)).transpose()
ret[i, :] = np.sum(
np.multiply(
padded_data[padding + i - radius + 1 : padding + i + radius, :], kernel
),
axis=0,
)
return ret
@ -221,7 +240,11 @@ def add_chromatic(im, strength: float = 1, no_blur: bool = False):
bfinal = b
else:
poles = cartesian_to_polar(np.stack([rdata, gdata, bdata], axis=-1))
rpolar, gpolar, bpolar = poles[:, :, 0], poles[:, :, 1], poles[:, :, 2],
rpolar, gpolar, bpolar = (
poles[:, :, 0],
poles[:, :, 1],
poles[:, :, 2],
)
bluramount = (im.size[0] + im.size[1] - 2) / 100 * strength
if round(bluramount) > 0:
@ -230,18 +253,34 @@ def add_chromatic(im, strength: float = 1, no_blur: bool = False):
bpolar = vertical_gaussian(bpolar, round(bluramount * 1.4))
rgbpolar = np.stack([rpolar, gpolar, bpolar], axis=-1)
cartes = polar_to_cartesian(rgbpolar, width=rdata.shape[1], height=rdata.shape[0])
rcartes, gcartes, bcartes = cartes[:, :, 0], cartes[:, :, 1], cartes[:, :, 2],
cartes = polar_to_cartesian(
rgbpolar, width=rdata.shape[1], height=rdata.shape[0]
)
rcartes, gcartes, bcartes = (
cartes[:, :, 0],
cartes[:, :, 1],
cartes[:, :, 2],
)
rfinal = Image.fromarray(np.uint8(rcartes), 'L')
gfinal = Image.fromarray(np.uint8(gcartes), 'L')
bfinal = Image.fromarray(np.uint8(bcartes), 'L')
rfinal = Image.fromarray(np.uint8(rcartes), "L")
gfinal = Image.fromarray(np.uint8(gcartes), "L")
bfinal = Image.fromarray(np.uint8(bcartes), "L")
# enlarge the green and blue channels slightly, blue being the most enlarged
gfinal = gfinal.resize((round((1 + 0.018 * strength) * rdata.shape[1]),
round((1 + 0.018 * strength) * rdata.shape[0])), Image.ANTIALIAS)
bfinal = bfinal.resize((round((1 + 0.044 * strength) * rdata.shape[1]),
round((1 + 0.044 * strength) * rdata.shape[0])), Image.ANTIALIAS)
gfinal = gfinal.resize(
(
round((1 + 0.018 * strength) * rdata.shape[1]),
round((1 + 0.018 * strength) * rdata.shape[0]),
),
Image.ANTIALIAS,
)
bfinal = bfinal.resize(
(
round((1 + 0.044 * strength) * rdata.shape[1]),
round((1 + 0.044 * strength) * rdata.shape[0]),
),
Image.ANTIALIAS,
)
rwidth, rheight = rfinal.size
gwidth, gheight = gfinal.size
@ -252,10 +291,14 @@ def add_chromatic(im, strength: float = 1, no_blur: bool = False):
gwdiff = (bwidth - gwidth) // 2
# Centre the channels
im = Image.merge("RGB", (
rfinal.crop((-rwdiff, -rhdiff, bwidth - rwdiff, bheight - rhdiff)),
gfinal.crop((-gwdiff, -ghdiff, bwidth - gwdiff, bheight - ghdiff)),
bfinal))
im = Image.merge(
"RGB",
(
rfinal.crop((-rwdiff, -rhdiff, bwidth - rwdiff, bheight - rhdiff)),
gfinal.crop((-gwdiff, -ghdiff, bwidth - gwdiff, bheight - ghdiff)),
bfinal,
),
)
# Crop the image to the original image dimensions
return im.crop((rwdiff, rhdiff, rwidth + rwdiff, rheight + rhdiff))
@ -273,10 +316,14 @@ def add_jitter(im, pixels: int = 1):
rwidth, rheight = r.size
gwidth, gheight = g.size
bwidth, bheight = b.size
im = Image.merge("RGB", (
r.crop((pixels, 0, rwidth + pixels, rheight)),
g.crop((0, 0, gwidth, gheight)),
b.crop((-pixels, 0, bwidth - pixels, bheight))))
im = Image.merge(
"RGB",
(
r.crop((pixels, 0, rwidth + pixels, rheight)),
g.crop((0, 0, gwidth, gheight)),
b.crop((-pixels, 0, bwidth - pixels, bheight)),
),
)
return im
@ -286,63 +333,87 @@ def blend_images(im, og_im, alpha: float = 1, strength: float = 1):
before adding it as an overlay.
"""
og_im.putalpha(int(255 * alpha))
og_im = og_im.resize((round((1 + 0.018 * strength) * og_im.size[0]),
round((1 + 0.018 * strength) * og_im.size[1])), Image.ANTIALIAS)
og_im = og_im.resize(
(
round((1 + 0.018 * strength) * og_im.size[0]),
round((1 + 0.018 * strength) * og_im.size[1]),
),
Image.ANTIALIAS,
)
hdiff = (og_im.size[1] - im.size[1]) // 2
wdiff = (og_im.size[0] - im.size[0]) // 2
og_im = og_im.crop((wdiff, hdiff, wdiff + im.size[0], hdiff + im.size[1]))
im = im.convert('RGBA')
im = im.convert("RGBA")
final_im = Image.new("RGBA", im.size)
final_im = Image.alpha_composite(final_im, im)
final_im = Image.alpha_composite(final_im, og_im)
final_im = final_im.convert('RGB')
final_im = final_im.convert("RGB")
return final_im
if __name__ == '__main__':
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Apply chromatic aberration and lens blur to images")
description="Apply chromatic aberration and lens blur to images"
)
parser.add_argument("filename", help="input filename")
parser.add_argument("-s", "--strength", type=float, default=1.0,
help="set blur/aberration strength, defaults to 1.0")
parser.add_argument("-j", "--jitter", type=int, default=0,
help="set color channel offset pixels, defaults to 0")
parser.add_argument("-y", "--overlay", type=float, default=0.0,
help="alpha of original image overlay, defaults to 0.0")
parser.add_argument(
"-n", "--noblur", help="disable radial blur", action="store_true")
"-s",
"--strength",
type=float,
default=1.0,
help="set blur/aberration strength, defaults to 1.0",
)
parser.add_argument(
"-o", "--out", help="write to OUTPUT (supports multiple formats)")
"-j",
"--jitter",
type=int,
default=0,
help="set color channel offset pixels, defaults to 0",
)
parser.add_argument(
'-v', '--verbose', help="print status messages", action="store_true")
"-y",
"--overlay",
type=float,
default=0.0,
help="alpha of original image overlay, defaults to 0.0",
)
parser.add_argument(
"-n", "--noblur", help="disable radial blur", action="store_true"
)
parser.add_argument(
"-o", "--out", help="write to OUTPUT (supports multiple formats)"
)
parser.add_argument(
"-v", "--verbose", help="print status messages", action="store_true"
)
args = parser.parse_args()
# Get Start Time
start = time.time()
ifile = args.filename
im = Image.open(ifile)
if (args.verbose):
if args.verbose:
print("Original Image:", im.format, im.size, im.mode)
if (im.mode != 'RGB'):
if (args.verbose):
if im.mode != "RGB":
if args.verbose:
print("Converting to RGB...")
im = im.convert('RGB')
im = im.convert("RGB")
# Ensure width and height are odd numbers
if (im.size[0] % 2 == 0 or im.size[1] % 2 == 0):
if (args.verbose):
if im.size[0] % 2 == 0 or im.size[1] % 2 == 0:
if args.verbose:
print("Dimensions must be odd numbers, cropping...")
if (im.size[0] % 2 == 0):
if im.size[0] % 2 == 0:
im = im.crop((0, 0, im.size[0] - 1, im.size[1]))
im.load()
if (im.size[1] % 2 == 0):
if im.size[1] % 2 == 0:
im = im.crop((0, 0, im.size[0], im.size[1] - 1))
im.load()
if (args.verbose):
if args.verbose:
print("New Dimensions:", im.size)
og_im = im.copy()
@ -361,5 +432,5 @@ if __name__ == '__main__':
im.save(args.out, quality=99)
# Get Finish Time
end = time.time()
if (args.verbose):
print("Completed in: " + '% 6.2f' % (end - start) + "s")
if args.verbose:
print("Completed in: " + "% 6.2f" % (end - start) + "s")

View File

@ -23,12 +23,12 @@ def run(
sharpness,
vignette,
):
bright /=100
contrast /=100
temp /=100
sat /=100
bright /= 100
contrast /= 100
temp /= 100
sat /= 100
res = Image.fromarray(img1)
res = img1
res = get_exposure_offset(res, exposure_offset, bright)
res = get_hdr(res, hdr, img1)
@ -36,40 +36,40 @@ def run(
res = get_noise(res, noise)
res = get_vignette(res, vignette)
#brigtness
#res = Image.fromarray(res)
# brigtness
# res = Image.fromarray(res)
brightness = ImageEnhance.Brightness(res)
res = brightness.enhance(1+bright)
res = brightness.enhance(1 + bright)
#contrast
# contrast
cont = ImageEnhance.Contrast(res)
res = cont.enhance(1+contrast)
res = cont.enhance(1 + contrast)
res = np.array(res).astype(np.float32)
#temp
if temp>0:
res[:, :, 0] *= 1+temp
res[:, :, 1] *= 1+temp*0.4
elif temp<0:
res[:, :, 2] *= 1-temp
res = np.clip(res, 0, 255)/255
# temp
if temp > 0:
res[:, :, 0] *= 1 + temp
res[:, :, 1] *= 1 + temp * 0.4
elif temp < 0:
res[:, :, 2] *= 1 - temp
res = np.clip(res, 0, 255) / 255
res = np.clip(np.power(res, gamma), 0, 1)
#saturation
# saturation
print(res.shape)
sat_real = 1 + sat
hls_img = cv2.cvtColor(res, cv2.COLOR_RGB2HLS)
hls_img[:, :, 2] = np.clip(sat_real*hls_img[:, :, 2], 0, 1)
res = cv2.cvtColor(hls_img, cv2.COLOR_HLS2RGB)*255
hls_img[:, :, 2] = np.clip(sat_real * hls_img[:, :, 2], 0, 1)
res = cv2.cvtColor(hls_img, cv2.COLOR_HLS2RGB) * 255
# hue
hsv_img = cv2.cvtColor(res, cv2.COLOR_RGB2HSV)
print(np.max(hsv_img[:, :, 0]), np.max(hsv_img[:, :, 1]), np.max(hsv_img[:, :, 2]))
hsv_img[:, :, 0] = (hsv_img[:, :, 0]+hue)%360
hsv_img[:, :, 0] = (hsv_img[:, :, 0] + hue) % 360
res = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB)
res = res.astype(np.uint8)
res = Image.fromarray(res, mode='RGB')
res = Image.fromarray(res, mode="RGB")
return res

View File

@ -9,6 +9,4 @@ def get_exposure_offset(img, value, brightness_value):
np_img = np.array(img).astype(float) + value * 75
np_img = np.clip(np_img, 0, 255).astype(np.uint8)
img = Image.fromarray(np_img)
return ImageEnhance.Brightness(img).enhance(
(brightness_value+1) - value / 4
)
return ImageEnhance.Brightness(img).enhance((brightness_value + 1) - value / 4)

View File

@ -23,14 +23,10 @@ def get_hdr(img, value, original_img):
color_dodge = converted_original_img / (1 - converted_sharped)
converted_color_dodge = (255 * color_dodge).clip(0, 255).astype(np.uint8)
temp_img = Image.fromarray(
cv2.cvtColor(converted_color_dodge, cv2.COLOR_BGR2RGB)
)
temp_img = Image.fromarray(cv2.cvtColor(converted_color_dodge, cv2.COLOR_BGR2RGB))
inverted_color_dodge = ImageOps.invert(temp_img)
black_white_color_dodge = ImageEnhance.Color(inverted_color_dodge).enhance(0)
hue = blendLayers(temp_img, black_white_color_dodge, BlendType.HUE)
hdr_image = blendLayers(hue, temp_img, BlendType.NORMAL, 0.7)
return blendLayers(img, hdr_image, BlendType.NORMAL, value * 2).convert(
"RGB"
)
return blendLayers(img, hdr_image, BlendType.NORMAL, value * 2).convert("RGB")

View File

@ -2,11 +2,10 @@ from PIL import ImageChops, Image
import numpy as np
def get_noise(img, value):
if value <= 0:
return img
noise = np.random.randint(0, value * 100, img.size, np.uint8)
noise_img = Image.fromarray(noise, 'L').resize(img.size).convert(img.mode)
noise_img = Image.fromarray(noise, "L").resize(img.size).convert(img.mode)
return ImageChops.add(img, noise_img)

View File

@ -5,4 +5,4 @@ def get_sharpness(img, value):
if value <= 0:
return img
return ImageEnhance.Sharpness(img).enhance((value+1) * 1.5)
return ImageEnhance.Sharpness(img).enhance((value + 1) * 1.5)

View File

@ -9,8 +9,6 @@ def get_vignette(img, value):
mask = Image.new("L", (width, height), 0)
draw = ImageDraw.Draw(mask)
padding = 100 - value * 100
draw.ellipse(
(-padding, -padding, width + padding, height + padding), fill=255
)
draw.ellipse((-padding, -padding, width + padding, height + padding), fill=255)
mask = mask.filter(ImageFilter.GaussianBlur(radius=100))
return Image.composite(img, Image.new("RGB", img.size, "black"), mask)

View File

@ -9,16 +9,17 @@ import numpy as np
from scipy import interpolate
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
plt.style.use('dark_background')
matplotlib.use("agg")
from matplotlib import pyplot as plt
plt.style.use("dark_background")
def make_curve(x_in, y_in):
assert len(x_in) == len(y_in)
his = set([0, 255])
xs = []
ys = []
for x, y in sorted(zip(x_in, y_in)):
@ -26,58 +27,57 @@ def make_curve(x_in, y_in):
xs.append(x)
ys.append(y)
his.add(x)
if len(xs):
spline = interpolate.make_interp_spline(
[0, *xs, 255], [0, *ys, 255], 2+(len(xs)>1)
[0, *xs, 255], [0, *ys, 255], 2 + (len(xs) > 1)
)
return lambda x:np.clip(spline(x), 0, 255)
return lambda x: np.clip(spline(x), 0, 255)
else:
return lambda x:x
return lambda x: x
def make_plot(points):
xs, ys = points
curve = make_curve(xs, ys)
fig, ax = plt.subplots(1, 1)
x = np.arange(0, 255, 1)
y = np.clip(curve(x), 0, 255)
ax.set_xlim(0, 255)
ax.set_ylim(0, 255)
ax.plot([0, 255], [0, 255], 'white')
ax.plot([0, 255], [0, 255], "white")
ax.plot(x, y)
ax.plot([0, *sorted(xs), 255], [0, *sorted(ys), 255], 'ro')
ax.plot([0, *sorted(xs), 255], [0, *sorted(ys), 255], "ro")
fig.canvas.draw()
img = Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())
plt.close('all')
img = Image.frombytes(
"RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb()
)
plt.close("all")
del fig, ax
return img
def run(
points: int
):
def curve(
img: NDArray,
*args: List[int]
):
def run(points: int):
def curve(img: Image, *args: List[int]):
nonlocal points
#all, r, g, b
# all, r, g, b
point = points * 2
all, r, g, b = (
(k[::2], k[1::2])
for i in range(4) for k in [args[i*point: i*point+point]]
for i in range(4)
for k in [args[i * point : i * point + point]]
)
img = np.array(img)
img[:, :, 0] = make_curve(*r)(img[:, :, 0])
img[:, :, 1] = make_curve(*g)(img[:, :, 1])
img[:, :, 2] = make_curve(*b)(img[:, :, 2])
img = make_curve(*all)(img)
return img.astype(np.uint8)
return curve
@ -85,35 +85,38 @@ def curve_img(*all_points):
return make_plot((all_points[::2], all_points[1::2]))
if __name__ == '__main__':
if __name__ == "__main__":
from matplotlib import pyplot as plt
from time import time_ns
plt.style.use('dark_background')
plt.style.use("dark_background")
t0 = time_ns()
fig, ax = plt.subplots(1, 1)
xs = [50, 125, 200]
ys = [40, 150, 180]
t2 = time_ns()
curve = make_curve(xs, ys)
x = np.arange(0, 255, 0.00001)
y = np.clip(curve(x), 0, 255)
t3 = time_ns()
ax.set_xlim(0, 255)
ax.set_ylim(0, 255)
ax.plot([0, 255], [0, 255], 'white')
ax.plot([0, 255], [0, 255], "white")
ax.plot(x[::10000], y[::10000])
ax.plot([0, *xs, 255], [0, *ys, 255], 'ro')
ax.plot([0, *xs, 255], [0, *ys, 255], "ro")
fig.canvas.draw()
img = Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb())
img = Image.frombytes(
"RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb()
)
plt.close(fig)
t1 = time_ns()
print((t1-t0)/1e6)
print((t3-t2)/1e6, x.size)
img.show()
print((t1 - t0) / 1e6)
print((t3 - t2) / 1e6, x.size)
img.show()

View File

@ -1,11 +1,9 @@
from PIL import Image
def run(np_img, text):
def run(img, text):
if not text:
return np_img
return img
img = Image.fromarray(np_img)
img.info["parameters"] = text
return img

View File

@ -1,41 +1,27 @@
from __future__ import annotations
from typing import Any, Tuple, List, Union
from numpy.typing import NDArray
from itertools import product
from PIL import Image
import numpy as np
def dithering(
img: NDArray[Any],
find_new_color
):
def dithering(img: Image, find_new_color):
img = np.array(img)
d_h, d_w, c = img.shape
new_res = np.array(img, dtype=np.float32)/255
new_res = np.array(img, dtype=np.float32) / 255
for i, j in product(range(d_h), range(d_w)):
old_val = new_res[i, j].copy()
new_val = find_new_color(old_val)
new_res[i, j] = new_val
err = old_val - new_val
if j < d_w - 1:
new_res[i, j+1] += err * 7/16
new_res[i, j + 1] += err * 7 / 16
if i < d_h - 1:
new_res[i+1, j] += err * 5/16
new_res[i + 1, j] += err * 5 / 16
if j > 0:
new_res[i+1, j-1] += err * 3/16
new_res[i + 1, j - 1] += err * 3 / 16
if j < d_w - 1:
new_res[i+1, j+1] += err * 1/16
return np.clip(new_res/np.max(new_res, axis=(0,1))*255, 0, 255)
def run(
src,
k: int = 3,
):
def calc():
pass
return Image.fromarray(result)
new_res[i + 1, j + 1] += err * 1 / 16
return np.clip(new_res / np.max(new_res, axis=(0, 1)) * 255, 0, 255)

View File

@ -1,6 +1,7 @@
import enum
import numpy as np
from PIL import Image
class Axis(str, enum.Enum):
@ -8,10 +9,11 @@ class Axis(str, enum.Enum):
HORIZONTAL = "horizontal"
def run(np_img, axis):
def run(pil_img, axis):
np_img = np.array(pil_img)
if axis == Axis.VERTICAL:
np_img = np.flipud(np_img)
elif axis == Axis.HORIZONTAL:
np_img = np.fliplr(np_img)
return np_img
return Image.fromarray(np_img)

View File

@ -5,14 +5,16 @@ import numpy as np
from PIL import Image
def run(np_img, k1, k2):
def run(pil_img, k1, k2):
np_img = np.array(pil_img)
height, width = np_img.shape[:2]
focal_length = width
center_x = width / 2
center_y = height / 2
K = np.array(
[[focal_length, 0, center_x], [0, focal_length, center_y], [0, 0, 1]], dtype=np.float64,
[[focal_length, 0, center_x], [0, focal_length, center_y], [0, 0, 1]],
dtype=np.float64,
)
D = np.array([k1, k2, 0, 0], dtype=np.float64)
img = cv2.fisheye.undistortImage(np_img, K, D, Knew=K)

View File

@ -6,18 +6,19 @@ import scipy as sp
from hakuimg.blend import Blend
def run(img, blur, strength, mode='BS'):
img = img/255
if mode == 'BS':
def run(pil_img, blur, strength, mode="BS"):
img = np.array(pil_img)
img = img / 255
if mode == "BS":
img_blur = cv2.GaussianBlur(img, (0, 0), blur)
img_glow = Blend.screen(img_blur, img, strength)
elif mode == 'BMBL':
elif mode == "BMBL":
img_blur = cv2.GaussianBlur(img, (0, 0), blur)
img_mul = Blend.multiply(img_blur, img)
img_mul_blur = cv2.GaussianBlur(img_mul, (0, 0), blur)
img_glow = Blend.lighten(img_mul_blur, img, strength)
else:
raise NotImplementedError
return (img_glow*255).astype(np.uint8)
return (img_glow * 255).astype(np.uint8)

View File

@ -13,131 +13,122 @@ from hakuimg.dither import dithering
INFLATE_FILTER = [
None,
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], np.uint8),
np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], np.uint8),
np.array(
[[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], np.uint8
[
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
],
np.uint8,
),
np.array(
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]], np.uint8
),
np.array(
[[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]], np.uint8
),
np.array(
[[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]], np.uint8
[
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
],
np.uint8,
),
np.ones((7, 7), np.uint8),
np.ones((9, 9), np.uint8),
np.ones((11, 11), np.uint8),
np.ones((13, 13), np.uint8),
np.ones((15, 15), np.uint8),
np.ones((17, 17), np.uint8)
np.ones((17, 17), np.uint8),
]
def read_img_as_array(
img
) -> NDArray[Any]:
'''Convert image to RGBA and read to ndarray'''
def pil_imgread_img_as_array(img) -> NDArray[Any]:
"""Convert image to RGBA and read to ndarray"""
img = Image.fromarray(img)
img = img.convert('RGBA')
img = img.convert("RGBA")
img_arr = np.asarray(img)
return img_arr
def preprocess(
img: NDArray[Any],
blur: int = 0,
blur: int = 0,
erode: int = 0,
) -> NDArray[Any]:
'''
Process for
"""
Process for
* outline inflation (erode)
* smoothing (blur)
* saturation
* contrast
'''
"""
# outline process
if erode:
img = cv2.erode(
img, INFLATE_FILTER[erode],
iterations = 1,
img,
INFLATE_FILTER[erode],
iterations=1,
)
# blur process
if blur:
img = cv2.bilateralFilter(
img, 15, blur*20, 20
)
img = cv2.bilateralFilter(img, 15, blur * 20, 20)
img = img.astype(np.float32)
return img
def pixelize(
img: NDArray[Any],
k: int, c: int,
d_w: int, d_h: int,
o_w: int, o_h: int,
img: Image,
k: int,
c: int,
d_w: int,
d_h: int,
o_w: int,
o_h: int,
precise: int,
mode: str = 'dithering',
resize: bool = True
mode: str = "dithering",
resize: bool = True,
) -> Tuple[NDArray[Any], NDArray[Any]]:
'''
"""
Use down scale and up scale to make pixel image.
And use k-means to confine the num of colors.
'''
img = cv2.resize(
img, (d_w, d_h),
interpolation = cv2.INTER_NEAREST
)
"""
img = cv2.resize(img, (d_w, d_h), interpolation=cv2.INTER_NEAREST)
# reshape to 1-dim array(for every color) for k-means
# use k-means to abstract the colors to use
if 'kmeans' in mode:
if "kmeans" in mode:
criteria = (
cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,
precise*5, 0.01
cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
precise * 5,
0.01,
)
img_cp = img.reshape(-1, c)
_, label, center = cv2.kmeans(
img_cp, k, None,
criteria, 1, cv2.KMEANS_PP_CENTERS
img_cp, k, None, criteria, 1, cv2.KMEANS_PP_CENTERS
)
if 'dithering' in mode:
if "dithering" in mode:
center /= 255
kdt = sp.spatial.KDTree(center)
def find_center(px):
return center[kdt.query(px)[1]]
result = dithering(img, find_center)
else:
result = center[label.flatten()].reshape(*img.shape)
elif mode == 'dithering':
result = dithering(
img, lambda px: np.round(px*(k-1))/(k-1)
)
elif mode == "dithering":
result = dithering(img, lambda px: np.round(px * (k - 1)) / (k - 1))
else:
raise NotImplementedError('Unknown Method')
raise NotImplementedError("Unknown Method")
if resize:
result = cv2.resize(
result,
(o_w, o_h),
interpolation=cv2.INTER_NEAREST
)
result = cv2.resize(result, (o_w, o_h), interpolation=cv2.INTER_NEAREST)
return result.astype(np.uint8)
@ -145,17 +136,17 @@ def run(
src: Image.Image,
k: int = 3,
scale: int = 2,
blur: int = 0,
blur: int = 0,
erode: int = 0,
mode: str = 'kmeans',
mode: str = "kmeans",
precise: int = 10,
resize: bool = True
resize: bool = True,
) -> Tuple[Image.Image, List[List[Union[str, float]]]]:
#print('Start process.')
#print('Read raw image... ', end='', flush=True)
img = read_img_as_array(src)
#convert color space
# print('Start process.')
# print('Read raw image... ', end='', flush=True)
img = np.asarray(src.convert('RGBA'))
# convert color space
alpha_channel = img[:, :, 3]
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
h, w, c = img.shape
@ -163,49 +154,31 @@ def run(
d_w = w // scale
o_h = h
o_w = w
#print('done!')
#print('Image preprocess... ', end='', flush=True)
# print('done!')
# print('Image preprocess... ', end='', flush=True)
# preprocess(erode, blur, saturation, contrast)
img = preprocess(
img,
blur, erode
)
#print('done!')
#print('Pixelize... ', end='', flush=True)
img = preprocess(img, blur, erode)
# print('done!')
# print('Pixelize... ', end='', flush=True)
# pixelize(using k-means)
result = pixelize(
img, k, c,
d_w, d_h,
o_w, o_h,
precise,
mode,
resize
)
#print('done!')
#print('Process output image... ', end='', flush=True)
result = pixelize(img, k, c, d_w, d_h, o_w, o_h, precise, mode, resize)
# print('done!')
# print('Process output image... ', end='', flush=True)
# add alpha channel
a = cv2.resize(
alpha_channel, (d_w, d_h),
interpolation = cv2.INTER_NEAREST
)
a = cv2.resize(alpha_channel, (d_w, d_h), interpolation=cv2.INTER_NEAREST)
if resize:
a = cv2.resize(
a, (o_w, o_h),
interpolation = cv2.INTER_NEAREST
)
a[a!=0]=255
a = cv2.resize(a, (o_w, o_h), interpolation=cv2.INTER_NEAREST)
a[a != 0] = 255
if 0 not in a:
a[0, 0] = 0
r, g, b = cv2.split(result)
result = cv2.merge((r, g, b, a))
# for saving to png
result = cv2.cvtColor(
result, cv2.COLOR_RGBA2BGRA
)
#print('done!')
return Image.fromarray(result)
result = cv2.cvtColor(result, cv2.COLOR_RGBA2BGRA)
# print('done!')
return Image.fromarray(result)

View File

@ -4,7 +4,7 @@ import numpy as np
def fix_float(val, eps=1e-3):
return float(val)-eps
return float(val) - eps
def gaussian(img, kernel, sigma):
@ -13,26 +13,27 @@ def gaussian(img, kernel, sigma):
def dog_filter(img, kernel=0, sigma=1.4, k_sigma=1.6, gamma=1):
g1 = gaussian(img, kernel, sigma)
g2 = gaussian(img, kernel, sigma*k_sigma)
g2 = gaussian(img, kernel, sigma * k_sigma)
return g1 - fix_float(gamma) * g2
def xdog(img, kernel, sigma, k_sigma, eps, phi, gamma, color, scale=True):
if color=='gray':
img = np.array(img)
if color == "gray":
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
dog = dog_filter(img, kernel, sigma, k_sigma, gamma)
dog = dog/dog.max()
e = 1+np.tanh(fix_float(phi) * (dog-fix_float(eps)))
e[e>=1] = 1
if color=='gray':
dog = dog / dog.max()
e = 1 + np.tanh(fix_float(phi) * (dog - fix_float(eps)))
e[e >= 1] = 1
if color == "gray":
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if not scale:
e[e<1] = 0
return Image.fromarray((e*255).astype('uint8'))
e[e < 1] = 0
return Image.fromarray((e * 255).astype("uint8"))
def run(*args):
return xdog(*args)
return xdog(*args)

View File

@ -1,12 +1,14 @@
import cv2
import numpy as np
from PIL import Image
from .utils import tilt_shift
def run(np_img, focus_ratio: float, dof: int):
def run(pil_img, focus_ratio: float, dof: int):
focus_ratio += 5
np_img = np.array(pil_img)
height = np_img.shape[0]
focus_height = round(height * (focus_ratio / 10))

View File

@ -6,29 +6,31 @@ import numpy as np
def tilt_shift(im, focus_height: int, dof: int = 60):
if focus_height < 2*dof:
focus_height = 2*dof
if focus_height > im.shape[0] - 2*dof:
focus_height = im.shape[0] - 2*dof
if focus_height < 2 * dof:
focus_height = 2 * dof
if focus_height > im.shape[0] - 2 * dof:
focus_height = im.shape[0] - 2 * dof
above_focus, below_focus = im[:focus_height,:], im[focus_height:,:]
above_focus = increasing_blur(above_focus[::-1,...], dof)[::-1,...]
above_focus, below_focus = im[:focus_height, :], im[focus_height:, :]
above_focus = increasing_blur(above_focus[::-1, ...], dof)[::-1, ...]
below_focus = increasing_blur(below_focus, dof)
out = np.vstack((above_focus, below_focus))
return out
def increasing_blur(im, dof=60):
BLEND_WIDTH = dof
blur_region = cv2.GaussianBlur(im[dof:,:], ksize=(15,15), sigmaX=0)
if blur_region.shape[0] > dof*2:
blur_region = cv2.GaussianBlur(im[dof:, :], ksize=(15, 15), sigmaX=0)
if blur_region.shape[0] > dof * 2:
blur_region = increasing_blur(blur_region, dof)
blend_col = np.linspace(1.0, 0, num=BLEND_WIDTH)
blend_mask = np.tile(blend_col, (im.shape[1], 1)).T
res = np.zeros_like(im)
res[:dof,:] = im[:dof,:]
res[:dof, :] = im[:dof, :]
# alpha blend region of width BLEND_WIDTH to hide seams between blur layers
res[dof:dof+BLEND_WIDTH,:] = im[dof:dof+BLEND_WIDTH,:] * blend_mask[:, :, None] + \
blur_region[:BLEND_WIDTH,:] * (1-blend_mask[:, :, None])
res[dof+BLEND_WIDTH:,:] = blur_region[BLEND_WIDTH:]
res[dof : dof + BLEND_WIDTH, :] = im[dof : dof + BLEND_WIDTH, :] * blend_mask[
:, :, None
] + blur_region[:BLEND_WIDTH, :] * (1 - blend_mask[:, :, None])
res[dof + BLEND_WIDTH :, :] = blur_region[BLEND_WIDTH:]
return res

View File

@ -10,49 +10,49 @@ except:
from utils import *
HTML_TEMPLATES = {
'resolution': '''<textarea>'''
}
HTML_TEMPLATES = {"resolution": """<textarea>"""}
def run(img, u, d, l, r, mode='fill'):
img = Image.fromarray(img)
def run(img, u, d, l, r, mode="fill"):
new_img, mask = resize_with_mask(
img,
(u, d, l, r),
)
w, h = img.size
w_n, h_n = new_img.size
if u:
new_img.paste(img.resize((w, u), box=(0, 0, w, 0)), box=(l, 0))
if d:
new_img.paste(img.resize((w, d), box=(0, h, w, h)), box=(l, u+h))
new_img.paste(img.resize((w, d), box=(0, h, w, h)), box=(l, u + h))
if l:
new_img.paste(new_img.resize((l, h_n), box=(l+1, 0, l+1, h_n)), box=(0, 0))
new_img.paste(new_img.resize((l, h_n), box=(l + 1, 0, l + 1, h_n)), box=(0, 0))
if r:
new_img.paste(new_img.resize((r, h_n), box=(w+l-1, 0, w+l-1, h_n)), box=(l+w, 0))
return new_img, mask, f'{w_n} x {h_n}'
new_img.paste(
new_img.resize((r, h_n), box=(w + l - 1, 0, w + l - 1, h_n)), box=(l + w, 0)
)
return new_img, mask, f"{w_n} x {h_n}"
if __name__ == '__main__':
img = Image.open('./01491-2023-01-21_d7ff2a1d60_KBlueLeaf_KBlueLeaf_856221927-768x512.png')
if __name__ == "__main__":
img = Image.open(
"./01491-2023-01-21_d7ff2a1d60_KBlueLeaf_KBlueLeaf_856221927-768x512.png"
)
new, mask, _ = run(np.array(img), 64, 192, 64, 64)
w, h = new.size
if w>h:
demo = Image.new('RGB', (w, h*2))
if w > h:
demo = Image.new("RGB", (w, h * 2))
demo.paste(new, box=(0, 0))
demo.paste(mask, box=(0, h))
else:
demo = Image.new('RGB', (w*2, h))
demo = Image.new("RGB", (w * 2, h))
demo.paste(new, box=(0, 0))
demo.paste(mask, box=(w, 0))
new.save('./test-img.png')
mask.save('./test-mask.png')
demo.show()
new.save("./test-img.png")
mask.save("./test-mask.png")
demo.show()

View File

@ -6,16 +6,15 @@ from PIL import Image
def resize_with_mask(
img: Image.Image,
expands: tuple[int, int, int, int]
img: Image.Image, expands: tuple[int, int, int, int]
) -> tuple[Image.Image, Image.Image]:
w, h = img.size
u, d, l, r = expands
new_img = Image.new('RGB', (w+l+r, h+u+d))
new_img = Image.new("RGB", (w + l + r, h + u + d))
new_img.paste(img, (l, u))
mask = Image.new('L', (w+l+r, h+u+d), 255)
mask_none = Image.new('L', (w, h), 0)
mask = Image.new("L", (w + l + r, h + u + d), 255)
mask_none = Image.new("L", (w, h), 0)
mask.paste(mask_none, (l, u))
return new_img, mask
return new_img, mask

View File

@ -9,7 +9,7 @@ from modules.ui_components import FormRow
import gradio as gr
from hakuimg import(
from hakuimg import (
blend,
blur,
color,
@ -26,15 +26,15 @@ from hakuimg import(
from inoutpaint import main as outpaint
'''
"""
UI part
'''
"""
inpaint_base: gr.Image
inpaint_mask: gr.Image
all_btns: List[Tuple[gr.Button, ...]] = []
layers = int(shared.opts.data.get('hakuimg_layer_num', 5))
points = int(shared.opts.data.get('hakuimg_curve_points', 3))
layers = int(shared.opts.data.get("hakuimg_layer_num", 5))
points = int(shared.opts.data.get("hakuimg_curve_points", 3))
class Script(scripts.Script):
@ -47,21 +47,21 @@ class Script(scripts.Script):
def after_component(self, component, **kwargs):
global img_src, all_btns, inpaint_base, inpaint_mask
if isinstance(component, gr.Gallery):
if component.elem_id in {'txt2img_gallery', 'img2img_gallery'}:
if component.elem_id in {"txt2img_gallery", "img2img_gallery"}:
img_src = component
val = kwargs.get("value", "")
id = kwargs.get("elem_id", "")
if id=='img_inpaint_base':
if id == "img_inpaint_base":
inpaint_base = component
if id=='img_inpaint_mask':
if id == "img_inpaint_mask":
inpaint_mask = component
if id in {'extras_tab', 'txt2img_send_to_extras', 'img2img_send_to_extras'}:
with gr.Accordion('HakuImg', open=False):
if id in {"extras_tab", "txt2img_send_to_extras", "img2img_send_to_extras"}:
with gr.Accordion("HakuImg", open=False):
with gr.Column():
with gr.Accordion('Send to Blend', open=False):
with gr.Accordion("Send to Blend", open=False):
btns = []
for i in range(layers, 0, -1):
btn1 = gr.Button(f"Send to Layer{i}")
@ -78,23 +78,36 @@ class Script(scripts.Script):
def add_tab():
print('add tab')
print("add tab")
with gr.Blocks(analytics_enabled=False) as demo:
with FormRow(equal_height=False):
with gr.Column():
with gr.Tabs(elem_id="haku_blend_tabs"):
with gr.TabItem('Blend', elem_id='haku_blend'):
with gr.TabItem("Blend", elem_id="haku_blend"):
all_layers = []
all_alphas = []
all_mask_str = []
all_mask_blur = []
all_mode = []
img_blend_h_slider = gr.Slider(160, 1280, 320, step=10, label="Image preview height", elem_id='haku_img_h_blend')
img_blend_h_slider = gr.Slider(
160,
1280,
320,
step=10,
label="Image preview height",
elem_id="haku_img_h_blend",
)
with gr.Tabs(elem_id="haku_blend_layers_tabs"):
for i in range(layers, 0, -1):
with gr.TabItem(f'Layer{i}', elem_id=f'haku_blend_Layer{i}'):
with gr.TabItem(
f"Layer{i}", elem_id=f"haku_blend_Layer{i}"
):
all_layers.append(
gr.ImageMask(type='numpy', label=f"Layer{i}", elem_id=f'haku_img_blend{i}')
gr.ImageMask(
type="pil",
label=f"Layer{i}",
elem_id=f"haku_img_blend{i}",
)
)
all_alphas.append(
gr.Slider(0, 1, 1, label=f"Layer{i} opacity")
@ -103,248 +116,405 @@ def add_tab():
gr.Slider(0, 32, 4, label=f"Layer{i} mask blur")
)
all_mask_str.append(
gr.Slider(0, 1, 1, label=f"Layer{i} mask strength")
gr.Slider(
0, 1, 1, label=f"Layer{i} mask strength"
)
)
all_mode.append(
gr.Dropdown(blend.blend_methods, value='normal', label='Blend mode')
gr.Dropdown(
blend.blend_methods,
value="normal",
label="Blend mode",
)
)
bg_color = gr.ColorPicker('#FFFFFF', label='background color')
bg_color = gr.ColorPicker("#FFFFFF", label="background color")
expand_btn = gr.Button("refresh", variant="primary")
with gr.TabItem('Effect', elem_id='haku_eff'):
img_eff_h_slider = gr.Slider(160, 1280, 320, step=10, label="Image preview height", elem_id='haku_img_h_eff')
image_eff = gr.Image(type='numpy', label="img", elem_id='haku_img_eff', show_label=False)
with gr.Tabs(elem_id='effect_tabs'):
with gr.TabItem('Color', elem_id='haku_color'):
with gr.TabItem("Effect", elem_id="haku_eff"):
img_eff_h_slider = gr.Slider(
160,
1280,
320,
step=10,
label="Image preview height",
elem_id="haku_img_h_eff",
)
image_eff = gr.Image(
type="pil",
label="img",
elem_id="haku_img_eff",
show_label=False,
)
with gr.Tabs(elem_id="effect_tabs"):
with gr.TabItem("Color", elem_id="haku_color"):
with gr.Row():
temp_slider = gr.Slider(-100, 100, 0, step=1, label="temperature")
hue_slider = gr.Slider(-90, 90, 0, step=1, label="hue")
temp_slider = gr.Slider(
-100, 100, 0, step=1, label="temperature"
)
hue_slider = gr.Slider(
-90, 90, 0, step=1, label="hue"
)
with gr.Row():
bright_slider = gr.Slider(-100, 100, 0, step=1, label="brightness")
contrast_slider = gr.Slider(-100, 100, 0, step=1, label="contrast")
bright_slider = gr.Slider(
-100, 100, 0, step=1, label="brightness"
)
contrast_slider = gr.Slider(
-100, 100, 0, step=1, label="contrast"
)
with gr.Row():
sat_slider = gr.Slider(-100, 100, 0, step=1, label="saturation")
gamma_slider = gr.Slider(0.2, 2.2, 1, step=0.1, label="Gamma")
sat_slider = gr.Slider(
-100, 100, 0, step=1, label="saturation"
)
gamma_slider = gr.Slider(
0.2, 2.2, 1, step=0.1, label="Gamma"
)
with gr.Row():
exposure_offset_slider = gr.Slider(0, 1, 0, label="ExposureOffset")
vignette_slider = gr.Slider(0, 1, 0, label="Vignette")
exposure_offset_slider = gr.Slider(
0, 1, 0, label="ExposureOffset"
)
vignette_slider = gr.Slider(
0, 1, 0, label="Vignette"
)
with gr.Row():
noise_slider = gr.Slider(0, 1, 0, label="Noise")
sharpness_slider = gr.Slider(0, 1, 0, label="Sharpness")
sharpness_slider = gr.Slider(
0, 1, 0, label="Sharpness"
)
with gr.Row():
hdr_slider = gr.Slider(0, 1, 0, label="HDR")
with gr.Row():
color_btn = gr.Button("refresh", variant="primary")
color_rst_btn = gr.Button("reset")
with gr.TabItem('Tone Curve', elem_id='haku_curve'):
with gr.TabItem("Tone Curve", elem_id="haku_curve"):
all_points = [[], [], [], []]
all_curve_defaults = [[], [], [], []]
all_curves = []
with gr.Tabs(elem_id='curve'):
for index, tab in enumerate(['All', 'R', 'G', 'B']):
with gr.Tabs(elem_id="curve"):
for index, tab in enumerate(["All", "R", "G", "B"]):
with gr.TabItem(tab):
for i in range(1, points+1):
for i in range(1, points + 1):
with gr.Row():
all_points[index] += [
gr.Slider(
0, 255, int(255*i/(points+1)),
step=1, label=f'point{i} x'
0,
255,
int(255 * i / (points + 1)),
step=1,
label=f"point{i} x",
),
gr.Slider(
0, 255, int(255*i/(points+1)),
step=1, label=f'point{i} y'
)
0,
255,
int(255 * i / (points + 1)),
step=1,
label=f"point{i} y",
),
]
all_curve_defaults[index] += [int(255*i/(points+1))]*2
all_curve_defaults[index] += [
int(255 * i / (points + 1))
] * 2
all_curves.append(gr.Image())
with gr.Row():
curve_btn = gr.Button("refresh", variant="primary")
curve_rst_btn = gr.Button("reset")
with gr.TabItem('Blur', elem_id='haku_blur'):
with gr.TabItem("Blur", elem_id="haku_blur"):
blur_slider = gr.Slider(0, 128, 8, label="blur")
blur_btn = gr.Button("refresh", variant="primary")
with gr.TabItem('Sketch', elem_id='haku_sketch'):
sk_kernel = gr.Slider(0, 25, 0, step=1, label='kernel size')
with gr.TabItem("Sketch", elem_id="haku_sketch"):
sk_kernel = gr.Slider(
0, 25, 0, step=1, label="kernel size"
)
with gr.Row():
sk_sigma = gr.Slider(1, 5, 1.4, step=0.05, label='sigma')
sk_k_sigma = gr.Slider(1, 5, 1.6, step=0.05, label='k_sigma')
sk_sigma = gr.Slider(
1, 5, 1.4, step=0.05, label="sigma"
)
sk_k_sigma = gr.Slider(
1, 5, 1.6, step=0.05, label="k_sigma"
)
with gr.Row():
sk_eps = gr.Slider(-0.2, 0.2, -0.03, step=0.005, label='epsilon')
sk_phi = gr.Slider(1, 50, 10, step=1, label='phi')
sk_gamma = gr.Slider(0.75, 1, 1, step=0.005, label='gamma')
sk_eps = gr.Slider(
-0.2, 0.2, -0.03, step=0.005, label="epsilon"
)
sk_phi = gr.Slider(1, 50, 10, step=1, label="phi")
sk_gamma = gr.Slider(
0.75, 1, 1, step=0.005, label="gamma"
)
sk_color = gr.Radio(['gray', 'rgb'], value='gray', label='color mode')
sk_scale = gr.Checkbox(False, label='use scale')
sk_color = gr.Radio(
["gray", "rgb"], value="gray", label="color mode"
)
sk_scale = gr.Checkbox(False, label="use scale")
with gr.Row():
sketch_btn = gr.Button("refresh", variant="primary")
sketch_rst_btn = gr.Button("reset")
with gr.TabItem('Pixelize', elem_id='haku_Pixelize'):
p_colors = gr.Slider(2, 256, 128, step=1, label='colors')
p_dot_size = gr.Slider(1, 32, 6, step=1, label='dot size')
p_outline = gr.Slider(0, 10, 1, step=1, label='outline inflating')
p_smooth = gr.Slider(0, 10, 4, step=1, label='Smoothing')
with gr.TabItem("Pixelize", elem_id="haku_Pixelize"):
p_colors = gr.Slider(
2, 256, 128, step=1, label="colors"
)
p_dot_size = gr.Slider(
1, 32, 6, step=1, label="dot size"
)
p_outline = gr.Slider(
0, 10, 1, step=1, label="outline inflating"
)
p_smooth = gr.Slider(
0, 10, 4, step=1, label="Smoothing"
)
p_mode = gr.Radio(
['kmeans', 'dithering', 'kmeans with dithering'],
value='kmeans', label='Color reduce algo'
["kmeans", "dithering", "kmeans with dithering"],
value="kmeans",
label="Color reduce algo",
)
with gr.Row():
pixel_btn = gr.Button("refresh", variant="primary")
pixel_rst_btn = gr.Button("reset")
with gr.TabItem('Glow', elem_id='haku_Glow'):
neon_mode = gr.Radio(['BS', 'BMBL'], value='BS', label='Glow mode')
neon_blur = gr.Slider(2, 128, 16, step=1, label='range')
neon_str = gr.Slider(0, 1, 1, step=0.05, label='strength')
with gr.TabItem("Glow", elem_id="haku_Glow"):
neon_mode = gr.Radio(
["BS", "BMBL"], value="BS", label="Glow mode"
)
neon_blur = gr.Slider(2, 128, 16, step=1, label="range")
neon_str = gr.Slider(
0, 1, 1, step=0.05, label="strength"
)
with gr.Row():
neon_btn = gr.Button("refresh", variant="primary")
neon_rst_btn = gr.Button("reset")
with gr.TabItem('Chromatic', elem_id='haku_Chromatic'):
with gr.TabItem("Chromatic", elem_id="haku_Chromatic"):
chromatic_slider = gr.Slider(0, 3, 1, label="chromatic")
chromatic_blur = gr.Checkbox(label="Blur", value=False)
chromatic_btn = gr.Button("refresh", variant="primary")
with gr.TabItem("Lens distortion (Fisheye)", elem_id="haku_LensDistortion"):
with gr.TabItem(
"Lens distortion (Fisheye)",
elem_id="haku_LensDistortion",
):
lens_distortion_k1_slider = gr.Slider(
-1, 1, 0,
-1,
1,
0,
label="Concavity of distortion of circles",
)
lens_distortion_k2_slider = gr.Slider(
-1, 1, 0,
-1,
1,
0,
label="Amplification of distortion of circles",
)
lens_distortion_btn = gr.Button("refresh", variant="primary")
lens_distortion_btn = gr.Button(
"refresh", variant="primary"
)
with gr.TabItem("Tilt shift", elem_id="haku_TiltShift"):
tilt_shift_focus_ratio = gr.Slider(-3, 3, 0, step=0.5, label="Positioning the effect on the y-axis")
tilt_shift_dof = gr.Slider(10, 100, 60, step=1, label="The width of the focus region in pixels")
tilt_shift_focus_ratio = gr.Slider(
-3,
3,
0,
step=0.5,
label="Positioning the effect on the y-axis",
)
tilt_shift_dof = gr.Slider(
10,
100,
60,
step=1,
label="The width of the focus region in pixels",
)
tilt_shift_btn = gr.Button("refresh", variant="primary")
with gr.TabItem('Other'):
img_other_h_slider = gr.Slider(160, 1280, 320, step=10, label="Image preview height", elem_id='haku_img_h_oth')
image_other = gr.Image(type='numpy', label="img", elem_id='haku_img_other', show_label=False)
with gr.Tabs(elem_id='function list'):
with gr.TabItem('InOutPaint'):
iop_u = gr.Slider(0, 512, 0, step=64, label='fill up')
iop_d = gr.Slider(0, 512, 0, step=64, label='fill down')
iop_l = gr.Slider(0, 512, 0, step=64, label='fill left')
iop_r = gr.Slider(0, 512, 0, step=64, label='fill right')
with gr.TabItem("Other"):
img_other_h_slider = gr.Slider(
160,
1280,
320,
step=10,
label="Image preview height",
elem_id="haku_img_h_oth",
)
image_other = gr.Image(
type="pil",
label="img",
elem_id="haku_img_other",
show_label=False,
)
with gr.Tabs(elem_id="function list"):
with gr.TabItem("InOutPaint"):
iop_u = gr.Slider(0, 512, 0, step=64, label="fill up")
iop_d = gr.Slider(0, 512, 0, step=64, label="fill down")
iop_l = gr.Slider(0, 512, 0, step=64, label="fill left")
iop_r = gr.Slider(
0, 512, 0, step=64, label="fill right"
)
iop_btn = gr.Button("refresh", variant="primary")
with gr.TabItem("Flip"):
flip_axis = gr.Radio(["horizontal", "vertical"], value="horizontal", label="Axis")
flip_axis = gr.Radio(
["horizontal", "vertical"],
value="horizontal",
label="Axis",
)
flip_btn = gr.Button("refresh", variant="primary")
with gr.TabItem("Custom EXIF"):
custom_exif_area = gr.TextArea(label="Custom parameters")
custom_exif_btn = gr.Button("refresh", variant="primary")
custom_exif_area = gr.TextArea(
label="Custom parameters"
)
custom_exif_btn = gr.Button(
"refresh", variant="primary"
)
with gr.Column():
img_out_h_slider = gr.Slider(160, 1280, 420, step=10, label="Image preview height", elem_id='haku_img_h_out')
res_info = gr.Textbox(label='Resolution')
img_out_h_slider = gr.Slider(
160,
1280,
420,
step=10,
label="Image preview height",
elem_id="haku_img_h_out",
)
res_info = gr.Textbox(label="Resolution")
image_out = gr.Image(
interactive=False,
type='pil',
type="pil",
label="haku_output",
elem_id='haku_out'
elem_id="haku_out",
)
image_mask = gr.Image(visible=False)
with gr.Row():
send_btns = gpc.create_buttons(["img2img", "inpaint", "extras"])
send_ip_b = gr.Button("Send to inpaint upload", elem_id='send_inpaint_base')
send_ip_b = gr.Button(
"Send to inpaint upload", elem_id="send_inpaint_base"
)
with gr.Row():
with gr.Accordion('Send to Blend', open=False):
with gr.Accordion("Send to Blend", open=False):
send_blends = []
for i in range(layers, 0, -1):
send_blends.append(gr.Button(f"Send to Layer{i}", elem_id=f'send_haku_blend{i}'))
send_eff = gr.Button("Send to Effect", elem_id='send_haku_blur')
send_blends.append(
gr.Button(
f"Send to Layer{i}", elem_id=f"send_haku_blend{i}"
)
)
send_eff = gr.Button("Send to Effect", elem_id="send_haku_blur")
#preview height slider
img_blend_h_slider.change(None, img_blend_h_slider, _js=f'get_change_height("haku_img_blend")')
img_eff_h_slider.change(None, img_eff_h_slider, _js=f'get_change_height("haku_img_eff")')
img_other_h_slider.change(None, img_other_h_slider, _js=f'get_change_height("haku_img_other")')
img_out_h_slider.change(None, img_out_h_slider, _js=f'get_change_height("haku_out")')
image_out.change(lambda x:f'{x.width} x {x.height}', image_out, res_info)
# preview height slider
img_blend_h_slider.change(
None, img_blend_h_slider, _js=f'get_change_height("haku_img_blend")'
)
img_eff_h_slider.change(
None, img_eff_h_slider, _js=f'get_change_height("haku_img_eff")'
)
img_other_h_slider.change(
None, img_other_h_slider, _js=f'get_change_height("haku_img_other")'
)
img_out_h_slider.change(
None, img_out_h_slider, _js=f'get_change_height("haku_out")'
)
image_out.change(lambda x: f"{x.width} x {x.height}", image_out, res_info)
image_out.change(None, img_out_h_slider, _js=f'get_change_height("haku_out")')
# blend
all_blend_set = [bg_color]
all_blend_set += all_alphas+all_mask_blur+all_mask_str+all_mode
all_blend_set += all_alphas + all_mask_blur + all_mask_str + all_mode
all_blend_input = all_blend_set + all_layers
for component in all_blend_set:
_release_if_possible(component, blend.run(layers), all_blend_input, image_out)
_release_if_possible(
component, blend.run(layers), all_blend_input, image_out
)
expand_btn.click(blend.run(layers), all_blend_input, image_out)
#blur
# blur
all_blur_input = [image_eff, blur_slider]
_release_if_possible(blur_slider, blur.run, all_blur_input, outputs=image_out)
blur_btn.click(blur.run, all_blur_input, outputs=image_out)
#chromatic
# chromatic
all_chromatic_set = [chromatic_slider, chromatic_blur]
all_chromatic_input = [image_eff] + all_chromatic_set
for component in all_chromatic_set:
_release_if_possible(component, chromatic.run, all_chromatic_input, image_out)
_release_if_possible(
component, chromatic.run, all_chromatic_input, image_out
)
chromatic_btn.click(chromatic.run, all_chromatic_input, image_out)
#color
# color
all_color_set = [
bright_slider, contrast_slider, sat_slider,
temp_slider, hue_slider, gamma_slider,
exposure_offset_slider, hdr_slider, noise_slider,
sharpness_slider, vignette_slider
bright_slider,
contrast_slider,
sat_slider,
temp_slider,
hue_slider,
gamma_slider,
exposure_offset_slider,
hdr_slider,
noise_slider,
sharpness_slider,
vignette_slider,
]
all_color_input = [image_eff] + all_color_set
for component in all_color_set:
_release_if_possible(component, color.run, all_color_input, image_out)
color_btn.click(color.run, all_color_input, image_out)
color_rst_btn.click(lambda:[0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 0], None, all_color_set)
color_rst_btn.click(
lambda: [0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 0], None, all_color_set
)
#curve
# curve
all_curve_set = sum(all_points, start=[])
all_curve_defaults = sum(all_curve_defaults, [])
all_curve_input = [image_eff] + all_curve_set
for index, components in enumerate(all_points):
for component in components:
_release_if_possible(component, curve.curve_img, components, all_curves[index])
_release_if_possible(
component, curve.curve_img, components, all_curves[index]
)
curve_btn.click(curve.curve_img, components, all_curves[index])
curve_btn.click(curve.run(points), all_curve_input, image_out)
curve_rst_btn.click(lambda: all_curve_defaults, None, all_curve_set)
#sketch
# sketch
all_sk_set = [
sk_kernel, sk_sigma, sk_k_sigma, sk_eps, sk_phi, sk_gamma, sk_color, sk_scale
sk_kernel,
sk_sigma,
sk_k_sigma,
sk_eps,
sk_phi,
sk_gamma,
sk_color,
sk_scale,
]
all_sk_input = [image_eff] + all_sk_set
for component in all_sk_set:
_release_if_possible(component, sketch.run, all_sk_input, image_out)
sketch_btn.click(sketch.run, all_sk_input, image_out)
sketch_rst_btn.click(lambda: [0, 1.4, 1.6, -0.03, 10, 1, 'gray', False], None, all_sk_set)
sketch_rst_btn.click(
lambda: [0, 1.4, 1.6, -0.03, 10, 1, "gray", False], None, all_sk_set
)
#pixelize
all_p_set = [
p_colors, p_dot_size, p_smooth, p_outline, p_mode
]
# pixelize
all_p_set = [p_colors, p_dot_size, p_smooth, p_outline, p_mode]
all_p_input = [image_eff] + all_p_set
for component in all_p_set:
_release_if_possible(component, pixel.run, all_p_input, image_out)
pixel_btn.click(pixel.run, all_p_input, image_out)
pixel_rst_btn.click(lambda: [16, 8, 0, 5, 'kmeans'], None, all_p_set)
pixel_rst_btn.click(lambda: [16, 8, 0, 5, "kmeans"], None, all_p_set)
#neon
# neon
all_neon_set = [
neon_blur, neon_str, neon_mode,
neon_blur,
neon_str,
neon_mode,
]
all_neon_input = [image_eff] + all_neon_set
for component in all_neon_set:
_release_if_possible(component, neon.run, all_neon_input, image_out)
neon_btn.click(neon.run, all_neon_input, image_out)
neon_rst_btn.click(lambda: [16, 1, 'BS'], None, all_neon_set)
neon_rst_btn.click(lambda: [16, 1, "BS"], None, all_neon_set)
#lens distortion
# lens distortion
all_ = [
lens_distortion_k1_slider,
lens_distortion_k2_slider,
@ -354,57 +524,69 @@ def add_tab():
_release_if_possible(component, lens_distortion.run, input_, image_out)
lens_distortion_btn.click(lens_distortion.run, input_, image_out)
#tilt shift
# tilt shift
all_ = [tilt_shift_focus_ratio, tilt_shift_dof]
input_ = [image_eff] + all_
for component in all_:
_release_if_possible(component, tilt_shift.run, input_, image_out)
tilt_shift_btn.click(tilt_shift.run, input_, image_out)
#iop
all_iop_set = [
iop_u, iop_d, iop_l, iop_r
]
# iop
all_iop_set = [iop_u, iop_d, iop_l, iop_r]
all_iop_input = [image_other] + all_iop_set
for component in all_iop_set:
_release_if_possible(component, outpaint.run, all_iop_input, [image_out, image_mask])
_release_if_possible(
component, outpaint.run, all_iop_input, [image_out, image_mask]
)
iop_btn.click(outpaint.run, all_iop_input, [image_out, image_mask])
#flip axis
# flip axis
all_ = [flip_axis]
input_ = [image_other] + all_
for component in all_:
_release_if_possible(component, flip.run, input_, image_out)
flip_btn.click(flip.run, input_, image_out)
#custom exif
# custom exif
all_ = [custom_exif_area]
input_ = [image_other] + all_
custom_exif_btn.click(custom_exif.run, input_, image_out)
#send
# send
print(all_btns)
for btns, btn3, img_src in all_btns:
for btn, img in zip(btns, all_layers):
btn.click(gpc.image_from_url_text, img_src, img, _js="extract_image_from_gallery")
btn3.click(gpc.image_from_url_text, img_src, image_eff, _js="extract_image_from_gallery")
btn.click(
gpc.image_from_url_text,
img_src,
img,
_js="extract_image_from_gallery",
)
btn3.click(
gpc.image_from_url_text,
img_src,
image_eff,
_js="extract_image_from_gallery",
)
gpc.bind_buttons(send_btns, image_out, None)
for btn, img in zip(btns, all_layers):
btn.click(lambda x:x, image_out, img)
btn.click(None, _js = 'switch_to_haku_blend')
btn.click(lambda x: x, image_out, img)
btn.click(None, _js="switch_to_haku_blend")
for layer, send_btn in zip(all_layers, send_blends):
send_btn.click(lambda x:x, image_out, layer)
send_btn.click(None, _js='switch_to_haku_blend')
send_btn.click(lambda x: x, image_out, layer)
send_btn.click(None, _js="switch_to_haku_blend")
send_ip_b.click(lambda *x:x, [image_out, image_mask], [inpaint_base, inpaint_mask])
send_ip_b.click(None, _js = 'switch_to_inpaint_upload')
send_ip_b.click(
lambda *x: x, [image_out, image_mask], [inpaint_base, inpaint_mask]
)
send_ip_b.click(None, _js="switch_to_inpaint_upload")
send_eff.click(lambda x:x, image_out, image_eff)
send_eff.click(None, _js = 'switch_to_haku_eff')
send_eff.click(lambda x: x, image_out, image_eff)
send_eff.click(None, _js="switch_to_haku_eff")
return (demo , "HakuImg", "haku_img"),
return ((demo, "HakuImg", "haku_img"),)
def _release_if_possible(component, *args, **kwargs):
@ -415,22 +597,16 @@ def _release_if_possible(component, *args, **kwargs):
def on_ui_settings():
section = ('haku-img', "HakuImg")
section = ("haku-img", "HakuImg")
shared.opts.add_option(
"hakuimg_layer_num",
shared.OptionInfo(
5,
"Total num of layers (reload required)",
section=section
)
shared.OptionInfo(5, "Total num of layers (reload required)", section=section),
)
shared.opts.add_option(
"hakuimg_curve_points",
shared.OptionInfo(
3,
"Total num of point for curve (reload required)",
section=section
)
3, "Total num of point for curve (reload required)", section=section
),
)