fix save. Add send to inpainting option
parent
0e8a518a3d
commit
5dc53eb994
|
|
@ -311,7 +311,9 @@ def explore_onnx_faceswap_model(model_path):
|
|||
return df
|
||||
|
||||
def upscaler_ui():
|
||||
with gr.Tab(f"Upscaler"):
|
||||
with gr.Tab(f"Enhancements"):
|
||||
gr.Markdown(
|
||||
"""Upscaling is performed on the whole image. Upscaling happens before face restoration.""")
|
||||
with gr.Row():
|
||||
face_restorer_name = gr.Radio(
|
||||
label="Restore Face",
|
||||
|
|
@ -330,18 +332,36 @@ def upscaler_ui():
|
|||
upscaler_visibility = gr.Slider(
|
||||
0, 1, 1, step=0.1, label="Upscaler visibility (if scale = 1)"
|
||||
)
|
||||
with gr.Accordion(f"Post Inpainting (Beta)", open=True):
|
||||
gr.Markdown(
|
||||
"""Inpainting sends image to inpainting with a mask on face (once for each faces). This is done before upscaling and face restoration.""")
|
||||
inpainting_denoising_strength = gr.Slider(
|
||||
0, 1, 0, step=0.01, label="Denoising strenght (will send face to img2img after processing)"
|
||||
)
|
||||
|
||||
inpainting_denoising_prompt = gr.Textbox("Portrait of a [gender]", label="Inpainting prompt use [gender] instead of men or woman")
|
||||
inpainting_denoising_negative_prompt = gr.Textbox("", label="Inpainting negative prompt use [gender] instead of men or woman")
|
||||
inpainting_denoising_steps = gr.Slider(
|
||||
1, 150, 20, step=1, label="Inpainting steps)"
|
||||
)
|
||||
return [
|
||||
face_restorer_name,
|
||||
face_restorer_visibility,
|
||||
upscaler_name,
|
||||
upscaler_scale,
|
||||
upscaler_visibility,
|
||||
inpainting_denoising_strength,
|
||||
inpainting_denoising_prompt,
|
||||
inpainting_denoising_negative_prompt,
|
||||
inpainting_denoising_steps
|
||||
]
|
||||
|
||||
def tools_ui():
|
||||
models = get_models()
|
||||
with gr.Tab("Tools"):
|
||||
with gr.Tab("Build"):
|
||||
gr.Markdown(
|
||||
"""Build a face based on a batch list of images. Will blend the resulting face and store the checkpoint in the roop/faces directory.""")
|
||||
with gr.Row():
|
||||
batch_files = gr.components.File(
|
||||
type="file",
|
||||
|
|
@ -357,6 +377,9 @@ def tools_ui():
|
|||
)
|
||||
generate_checkpoint_btn = gr.Button("Save")
|
||||
with gr.Tab("Compare"):
|
||||
gr.Markdown(
|
||||
"""Give a similarity score between two images (only first face is compared).""")
|
||||
|
||||
with gr.Row():
|
||||
img1 = gr.components.Image(type="pil", label="Face 1")
|
||||
img2 = gr.components.Image(type="pil", label="Face 2")
|
||||
|
|
@ -365,6 +388,8 @@ def tools_ui():
|
|||
interactive=False, label="Similarity", value="0"
|
||||
)
|
||||
with gr.Tab("Extract"):
|
||||
gr.Markdown(
|
||||
"""Extract all faces from a batch of images. Will apply enhancement in the tools enhancement tab.""")
|
||||
with gr.Row():
|
||||
extracted_source_files = gr.components.File(
|
||||
type="file",
|
||||
|
|
@ -412,6 +437,9 @@ class FaceSwapScript(scripts.Script):
|
|||
def faceswap_unit_ui(self, is_img2img, unit_num=1):
|
||||
with gr.Tab(f"Face {unit_num}"):
|
||||
with gr.Column():
|
||||
gr.Markdown(
|
||||
"""Reference is an image. First face will be extracted.
|
||||
First face of batches sources will be extracted and used as input (or blended if blend is activated).""")
|
||||
with gr.Row():
|
||||
img = gr.components.Image(type="pil", label="Reference")
|
||||
batch_files = gr.components.File(
|
||||
|
|
@ -420,7 +448,10 @@ class FaceSwapScript(scripts.Script):
|
|||
label="Batch Sources Images",
|
||||
optional=True,
|
||||
)
|
||||
gr.Markdown(
|
||||
"""Face checkpoint built with the checkpoint builder in tools. Will overwrite reference image.""")
|
||||
with gr.Row() :
|
||||
|
||||
face = gr.inputs.Dropdown(
|
||||
choices=get_face_checkpoints(),
|
||||
label="Face Checkpoint (precedence over reference face)",
|
||||
|
|
@ -438,6 +469,7 @@ class FaceSwapScript(scripts.Script):
|
|||
blend_faces = gr.Checkbox(
|
||||
True, placeholder="Blend Faces", label="Blend Faces ((Source|Checkpoint)+References = 1)"
|
||||
)
|
||||
gr.Markdown("""Discard images with low similarity or no faces :""")
|
||||
min_sim = gr.Slider(0, 1, 0, step=0.01, label="Min similarity")
|
||||
min_ref_sim = gr.Slider(
|
||||
0, 1, 0, step=0.01, label="Min reference similarity"
|
||||
|
|
@ -447,6 +479,7 @@ class FaceSwapScript(scripts.Script):
|
|||
placeholder="Which face to swap (comma separated), start from 0 (by gender if same_gender is enabled)",
|
||||
label="Comma separated face number(s)",
|
||||
)
|
||||
gr.Markdown("""Configure swapping. Swapping can occure before img2img, after or both :""", visible=is_img2img)
|
||||
swap_in_source = gr.Checkbox(
|
||||
False,
|
||||
placeholder="Swap face in source image",
|
||||
|
|
@ -564,7 +597,7 @@ class FaceSwapScript(scripts.Script):
|
|||
result_infos.append(f"{info}, similarity = {result.similarity}, ref_similarity = {result.ref_similarity}")
|
||||
result_images.append(result.image)
|
||||
else:
|
||||
logger.info(
|
||||
logger.warning(
|
||||
f"skip, similarity to low, sim = {result.similarity} (target {unit.min_sim}) ref sim = {result.ref_similarity} (target = {unit.min_ref_sim})"
|
||||
)
|
||||
logger.info(f"{len(result_images)} images processed")
|
||||
|
|
@ -593,8 +626,7 @@ class FaceSwapScript(scripts.Script):
|
|||
if self.upscale_options is not None:
|
||||
result_images[i] = upscale_image(img, self.upscale_options)
|
||||
if p.outpath_samples :
|
||||
save_image(result_images[i], p.outpath_samples, "swapped")
|
||||
|
||||
save_image(result_images[i], p.outpath_samples, seed=int(p.seed), info=result_infos[i], basename="swapped")
|
||||
if len(result_images) > 1:
|
||||
try :
|
||||
# prepend swapped grid to result_images :
|
||||
|
|
|
|||
|
|
@ -75,6 +75,9 @@ def compare_faces(img1: Image.Image, img2: Image.Image) -> float:
|
|||
ANALYSIS_MODEL = None
|
||||
|
||||
|
||||
class FaceModelException(Exception):
|
||||
pass
|
||||
|
||||
def getAnalysisModel():
|
||||
"""
|
||||
Retrieves the analysis model for face analysis.
|
||||
|
|
@ -86,12 +89,15 @@ def getAnalysisModel():
|
|||
|
||||
# Check if the analysis model has been initialized
|
||||
if ANALYSIS_MODEL is None:
|
||||
logger.info("Load analysis model, will take some time.")
|
||||
# Initialize the analysis model with the specified name and providers
|
||||
ANALYSIS_MODEL = insightface.app.FaceAnalysis(
|
||||
name="buffalo_l", providers=providers
|
||||
)
|
||||
|
||||
try :
|
||||
logger.info("Load analysis model, will take some time.")
|
||||
# Initialize the analysis model with the specified name and providers
|
||||
ANALYSIS_MODEL = insightface.app.FaceAnalysis(
|
||||
name="buffalo_l", providers=providers
|
||||
)
|
||||
except Exception as e :
|
||||
logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)")
|
||||
raise FaceModelException()
|
||||
# Return the analysis model
|
||||
return ANALYSIS_MODEL
|
||||
|
||||
|
|
@ -114,10 +120,13 @@ def getFaceSwapModel(model_path: str):
|
|||
|
||||
# Check if the current model path is different from the new model path.
|
||||
if CURRENT_FS_MODEL_PATH is None or CURRENT_FS_MODEL_PATH != model_path:
|
||||
CURRENT_FS_MODEL_PATH = model_path
|
||||
# Initializes the face swap model using the specified model path.
|
||||
FS_MODEL = insightface.model_zoo.get_model(model_path, providers=providers)
|
||||
|
||||
logger.info("Load swapping model, will take some time.")
|
||||
try :
|
||||
CURRENT_FS_MODEL_PATH = model_path
|
||||
# Initializes the face swap model using the specified model path.
|
||||
FS_MODEL = insightface.model_zoo.get_model(model_path, providers=providers)
|
||||
except Exception as e :
|
||||
logger.error("Loading of swapping model failed, please check the requirements (On Windows, download and install Visual Studio. During the install, make sure to include the Python and C++ packages.)")
|
||||
return FS_MODEL
|
||||
|
||||
|
||||
|
|
@ -282,49 +291,51 @@ def swap_face(
|
|||
|
||||
"""
|
||||
return_result = ImageResult(target_img, {}, {})
|
||||
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
||||
gender = source_face["gender"]
|
||||
logger.info("Source Gender %s", gender)
|
||||
if source_face is not None:
|
||||
result = target_img
|
||||
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), model)
|
||||
face_swapper = getFaceSwapModel(model_path)
|
||||
target_faces = get_faces(target_img)
|
||||
logger.info("Target faces count : %s", len(target_faces))
|
||||
try :
|
||||
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
||||
gender = source_face["gender"]
|
||||
logger.info("Source Gender %s", gender)
|
||||
if source_face is not None:
|
||||
result = target_img
|
||||
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), model)
|
||||
face_swapper = getFaceSwapModel(model_path)
|
||||
target_faces = get_faces(target_img)
|
||||
logger.info("Target faces count : %s", len(target_faces))
|
||||
|
||||
if same_gender:
|
||||
target_faces = [x for x in target_faces if x["gender"] == gender]
|
||||
logger.info("Target Gender Matches count %s", len(target_faces))
|
||||
|
||||
for i, swapped_face in enumerate(target_faces):
|
||||
logger.info(f"swap face {i}")
|
||||
if i in faces_index:
|
||||
result = face_swapper.get(result, swapped_face, source_face)
|
||||
|
||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||
return_result.image = result_image
|
||||
|
||||
try:
|
||||
result_faces = get_faces(
|
||||
cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR)
|
||||
)
|
||||
if same_gender:
|
||||
result_faces = [x for x in result_faces if x["gender"] == gender]
|
||||
target_faces = [x for x in target_faces if x["gender"] == gender]
|
||||
logger.info("Target Gender Matches count %s", len(target_faces))
|
||||
|
||||
for i, swapped_face in enumerate(result_faces):
|
||||
logger.info(f"compare face {i}")
|
||||
if i in faces_index and i < len(target_faces):
|
||||
return_result.similarity[i] = cosine_similarity_face(
|
||||
source_face, swapped_face
|
||||
)
|
||||
return_result.ref_similarity[i] = cosine_similarity_face(
|
||||
reference_face, swapped_face
|
||||
)
|
||||
for i, swapped_face in enumerate(target_faces):
|
||||
logger.info(f"swap face {i}")
|
||||
if i in faces_index:
|
||||
result = face_swapper.get(result, swapped_face, source_face)
|
||||
|
||||
logger.info(f"similarity {return_result.similarity}")
|
||||
logger.info(f"ref similarity {return_result.ref_similarity}")
|
||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||
return_result.image = result_image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(str(e))
|
||||
try:
|
||||
result_faces = get_faces(
|
||||
cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR)
|
||||
)
|
||||
if same_gender:
|
||||
result_faces = [x for x in result_faces if x["gender"] == gender]
|
||||
|
||||
for i, swapped_face in enumerate(result_faces):
|
||||
logger.info(f"compare face {i}")
|
||||
if i in faces_index and i < len(target_faces):
|
||||
return_result.similarity[i] = cosine_similarity_face(
|
||||
source_face, swapped_face
|
||||
)
|
||||
return_result.ref_similarity[i] = cosine_similarity_face(
|
||||
reference_face, swapped_face
|
||||
)
|
||||
|
||||
logger.info(f"similarity {return_result.similarity}")
|
||||
logger.info(f"ref similarity {return_result.ref_similarity}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Swapping failed %s", e)
|
||||
except Exception as e :
|
||||
logger.error("Conversion failed %s", e)
|
||||
return return_result
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ from modules.face_restoration import FaceRestoration
|
|||
from modules.upscaler import UpscalerData
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union, Dict, Set, Tuple
|
||||
from scripts import imgutils
|
||||
from scripts.roop_logging import logger
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
|
@ -10,6 +11,7 @@ from modules import scripts, shared, processing
|
|||
from modules.processing import (Processed, StableDiffusionProcessing,
|
||||
StableDiffusionProcessingImg2Img,
|
||||
StableDiffusionProcessingTxt2Img)
|
||||
import cv2
|
||||
|
||||
@dataclass
|
||||
class UpscaleOptions:
|
||||
|
|
@ -18,7 +20,11 @@ class UpscaleOptions:
|
|||
upscaler_name: str = ""
|
||||
scale: int = 1
|
||||
upscale_visibility: float = 0.5
|
||||
|
||||
|
||||
inpainting_denoising_strengh : float = 0
|
||||
inpainting_prompt : str = ""
|
||||
inpainting_negative_prompt : str = ""
|
||||
inpainting_steps : int = 20
|
||||
|
||||
@property
|
||||
def upscaler(self) -> UpscalerData:
|
||||
|
|
@ -38,6 +44,13 @@ class UpscaleOptions:
|
|||
def upscale_image(image: Image.Image, upscale_options: UpscaleOptions):
|
||||
result_image = image
|
||||
try :
|
||||
if upscale_options.inpainting_denoising_strengh > 0 :
|
||||
result_image = img2img_diffusion(image,
|
||||
inpainting_prompt=upscale_options.inpainting_prompt,
|
||||
inpainting_negative_prompt=upscale_options.inpainting_negative_prompt,
|
||||
inpainting_denoising_strength=upscale_options.inpainting_denoising_strengh,
|
||||
inpainting_steps=upscale_options.inpainting_steps)
|
||||
|
||||
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
||||
original_image = result_image.copy()
|
||||
logger.info(
|
||||
|
|
@ -62,18 +75,78 @@ def upscale_image(image: Image.Image, upscale_options: UpscaleOptions):
|
|||
result_image = Image.blend(
|
||||
original_image, restored_image, upscale_options.restorer_visibility
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.info("Failed to upscale %s", e)
|
||||
logger.error("Failed to upscale %s", e)
|
||||
|
||||
return result_image
|
||||
|
||||
def img2img_diffusion(img, prompt) :
|
||||
faces = swapper.get_faces(imgutils.pil_to_cv2(img))
|
||||
if faces:
|
||||
for face in faces:
|
||||
bbox = face.bbox.astype(int)
|
||||
x_min, y_min, x_max, y_max = bbox
|
||||
face_image = img.crop((x_min, y_min, x_max, y_max))
|
||||
i2i_p = StableDiffusionProcessingImg2Img([face_image], width = img.width, height = img.height, prompt = prompt, denoising_strength=0.1)
|
||||
i2i_processed = processing.process_images(i2i_p)
|
||||
return i2i_processed.images
|
||||
def resize_bbox(bbox):
|
||||
x_min, y_min, x_max, y_max = bbox
|
||||
x_min = int(x_min // 8) * 8 if x_min % 8 != 0 else x_min
|
||||
y_min = int(y_min // 8) * 8 if y_min % 8 != 0 else y_min
|
||||
x_max = int(x_max // 8 + 1) * 8 if x_max % 8 != 0 else x_max
|
||||
y_max = int(y_max // 8 + 1) * 8 if y_max % 8 != 0 else y_max
|
||||
return x_min, y_min, x_max, y_max
|
||||
|
||||
def get_ldsr() -> UpscalerData:
|
||||
for upscaler in shared.sd_upscalers:
|
||||
if upscaler.name == "LDSR":
|
||||
return upscaler
|
||||
return None
|
||||
|
||||
def resize_small_image(img: Image.Image, min_resolution=512, use_ldsr = True):
|
||||
width, height = img.size
|
||||
if min(width, height) > min_resolution:
|
||||
return img
|
||||
k = float(min_resolution) / float(min(width, height))
|
||||
target_width = int(round(width * k))
|
||||
target_height = int(round(height * k))
|
||||
if not use_ldsr :
|
||||
resized_img = img.resize((target_width, target_height), resample=Image.Resampling.LANCZOS)
|
||||
else :
|
||||
logger.info("Upscale face with LDSR")
|
||||
resized_img = get_ldsr().scaler.upscale(
|
||||
img, k, get_ldsr().data_path
|
||||
)
|
||||
return resized_img
|
||||
|
||||
def create_mask(image, box_coords):
|
||||
width, height = image.size
|
||||
mask = Image.new("L", (width, height), 255)
|
||||
x1, y1, x2, y2 = box_coords
|
||||
for x in range(width):
|
||||
for y in range(height):
|
||||
if x1 <= x <= x2 and y1 <= y <= y2:
|
||||
mask.putpixel((x, y), 255)
|
||||
else:
|
||||
mask.putpixel((x, y), 0)
|
||||
return mask
|
||||
|
||||
def img2img_diffusion(img : Image.Image, inpainting_prompt : str, inpainting_denoising_strength : float = 0.1, inpainting_negative_prompt : str="", inpainting_steps : int = 20) -> Image.Image :
|
||||
try :
|
||||
logger.info("send faces to image to image")
|
||||
img = img.copy()
|
||||
faces = swapper.get_faces(imgutils.pil_to_cv2(img))
|
||||
if faces:
|
||||
for face in faces:
|
||||
bbox =face.bbox.astype(int)
|
||||
mask = create_mask(img, bbox)
|
||||
prompt = inpainting_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman")
|
||||
negative_prompt = inpainting_negative_prompt.replace("[gender]", "man" if face["gender"] == 1 else "woman")
|
||||
|
||||
logger.info("Denoising prompt : %s", prompt)
|
||||
logger.info("Denoising strenght : %s", inpainting_denoising_strength)
|
||||
i2i_p = StableDiffusionProcessingImg2Img([img], steps =inpainting_steps, width = img.width, inpainting_fill=1, inpaint_full_res= True, height = img.height, mask=mask, prompt = prompt,negative_prompt=negative_prompt, denoising_strength=inpainting_denoising_strength)
|
||||
i2i_processed = processing.process_images(i2i_p)
|
||||
images = i2i_processed.images
|
||||
if len(images) > 0 :
|
||||
img = images[0]
|
||||
return img
|
||||
except Exception as e :
|
||||
logger.error("Failed to apply img2img to face : %s", e)
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
raise e
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue