fix blending and reference pb

pull/152/head
Tran Xen 2023-07-14 16:50:22 +02:00
parent 44acb4cb47
commit de89041da7
4 changed files with 75 additions and 49 deletions

View File

@ -7,6 +7,7 @@ from scripts.roop_utils import imgutils
from scripts.roop_utils import models_utils
from scripts.roop_postprocessing import upscaling
from pprint import pprint
import numpy as np
#Reload all the modules when using "apply and restart"
#This is mainly done for development purposes
@ -64,6 +65,8 @@ class FaceSwapUnitSettings:
# If True, discard images with low similarity
check_similarity : bool
# if True will compute similarity and add it to the image info
_compute_similarity :bool
# Minimum similarity against the used face (reference, batch or checkpoint)
min_sim: float
@ -96,6 +99,10 @@ class FaceSwapUnitSettings:
return faces_index
@property
def compute_similarity(self) :
return self._compute_similarity or self.check_similarity
@property
def batch_files(self):
"""
@ -110,7 +117,7 @@ class FaceSwapUnitSettings:
Reference face is the checkpoint or the source image or the first image in the batch in that order.
"""
if not hasattr(self,"_reference_face") :
if self.source_face and self.source_face != "None" :
if self.source_face is not None and self.source_face != "None" :
with open(self.source_face, "rb") as file:
try :
logger.info(f"loading pickle {file.name}")
@ -128,11 +135,15 @@ class FaceSwapUnitSettings:
img_bytes = base64.b64decode(self.source_img)
self.source_img = Image.open(io.BytesIO(img_bytes))
source_img = pil_to_cv2(self.source_img)
self._reference_face = swapper.get_or_default(swapper.get_faces(source_img), 0, None)
self._reference_face = swapper.get_or_default(swapper.get_faces(source_img), 0, None)
if self._reference_face is None :
logger.error("Face not found in reference image")
else :
logger.error("You need at least one face")
self._reference_face = None
if self._reference_face is None :
logger.error("You need at least one reference face")
return self._reference_face
@property
@ -157,6 +168,9 @@ class FaceSwapUnitSettings:
"""
if not hasattr(self,"_blended_faces") :
self._blended_faces = swapper.blend_faces(self.faces)
assert(all([not np.array_equal(self._blended_faces.embedding, face.embedding) for face in self.faces]) if len(self.faces) > 1 else True), "Blended faces cannot be the same as one of the face if len(face)>0"
assert(not np.array_equal(self._blended_faces.embedding,self.reference_face.embedding) if len(self.faces) > 1 else True), "Blended faces cannot be the same as reference face if len(face)>0"
return self._blended_faces
# Register the tab, done here to prevent it from being added twice
@ -209,20 +223,6 @@ class FaceSwapScript(scripts.Script):
return scripts.AlwaysVisible
# def after_component(self, component, **kwargs):
# def update_default(component, elem_id, option_name ) :
# if hasattr(component, "elem_id") :
# id = component.elem_id
# if id == elem_id :
# component.update(value = opts.data.get(option_name, component.value))
# if hasattr(component, "elem_id") :
# update_default(component, "roop_pp_face_restorer","roop_pp_default_face_restorer")
# update_default(component, "roop_pp_face_restorer_visibility","roop_pp_default_face_restorer_visibility")
# update_default(component, "roop_pp_face_restorer_weight","roop_pp_default_face_restorer_weight")
# update_default(component, "roop_pp_upscaler","roop_pp_default_upscaler")
# update_default(component, "roop_pp_upscaler_visibility","roop_pp_default_upscaler_visibility")
def ui(self, is_img2img):
with gr.Accordion(f"Roop {VERSION_FLAG}", open=False):
components = []
@ -296,17 +296,26 @@ class FaceSwapScript(scripts.Script):
else :
logger.info("blend all faces together")
src_faces = [unit.blended_faces]
assert(not np.array_equal(unit.reference_face.embedding,src_faces[0].embedding) if len(unit.faces)>1 else True), "Reference face cannot be the same as blended"
for i,src_face in enumerate(src_faces):
logger.info(f"Process face {i}")
if unit.reference_face is not None :
reference_face = unit.reference_face
else :
logger.info("Use source face as reference face")
reference_face = src_face
result: swapper.ImageResult = swapper.swap_face(
unit.reference_face if unit.reference_face is not None else src_face,
reference_face,
src_face,
image,
faces_index=unit.faces_index,
model=self.model,
same_gender=unit.same_gender,
upscaled_swapper=upscaled_swapper
upscaled_swapper=upscaled_swapper,
compute_similarity=unit.compute_similarity
)
if result.image is None :
logger.error("Result image is None")

View File

@ -11,7 +11,12 @@ def on_ui_settings():
False, "keep original image before swapping", gr.Checkbox, {"interactive": True}, section=section))
shared.opts.add_option("roop_units_count", shared.OptionInfo(
3, "Max faces units (requires restart)", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}, section=section))
shared.opts.add_option("roop_detection_threshold", shared.OptionInfo(
0.5, "Detection threshold ", gr.Slider, {"minimum": 0.1, "maximum": 0.99, "step": 0.001}, section=section))
shared.opts.add_option("roop_pp_default_face_restorer", shared.OptionInfo(
None, "UI Default post processing face restorer (requires restart)", gr.Dropdown, {"interactive": True, "choices" : ["None"] + [x.name() for x in shared.face_restorers]}, section=section))
shared.opts.add_option("roop_pp_default_face_restorer_visibility", shared.OptionInfo(

View File

@ -37,7 +37,9 @@ def faceswap_unit_ui(is_img2img, unit_num=1):
True, placeholder="Blend Faces", label="Blend Faces ((Source|Checkpoint)+References = 1)"
)
gr.Markdown("""Discard images with low similarity or no faces :""")
check_similarity = gr.Checkbox(False, placeholder="discard", label="Check similarity")
with gr.Row():
check_similarity = gr.Checkbox(False, placeholder="discard", label="Check similarity")
compute_similarity = gr.Checkbox(False, label="Compute similarity")
min_sim = gr.Slider(0, 1, 0, step=0.01, label="Min similarity")
min_ref_sim = gr.Slider(
0, 1, 0, step=0.01, label="Min reference similarity"
@ -60,6 +62,7 @@ def faceswap_unit_ui(is_img2img, unit_num=1):
label="Swap in generated image",
visible=is_img2img,
)
# If changed, you need to change FaceSwapUnitSettings accordingly
return [
img,
face,
@ -68,6 +71,7 @@ def faceswap_unit_ui(is_img2img, unit_num=1):
enable,
same_gender,
check_similarity,
compute_similarity,
min_sim,
min_ref_sim,
faces_index,

View File

@ -2,7 +2,7 @@ import copy
import os
from dataclasses import dataclass
from pprint import pprint
from typing import Dict, List, Set, Tuple, Union
from typing import Dict, List, Set, Tuple, Union, Optional
import cv2
import insightface
@ -15,6 +15,7 @@ from sklearn.metrics.pairwise import cosine_similarity
from scripts.roop_swapping import upscaled_inswapper
from scripts.roop_utils.imgutils import cv2_to_pil, pil_to_cv2
from scripts.roop_logging import logger
from modules.shared import opts
providers = ["CPUExecutionProvider"]
@ -131,7 +132,7 @@ def getFaceSwapModel(model_path: str):
return FS_MODEL
def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh=0.5) -> List[Face]:
def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh : Optional[int]=None) -> List[Face]:
"""
Detects and retrieves faces from an image using an analysis model.
@ -142,6 +143,10 @@ def get_faces(img_data: np.ndarray, det_size=(640, 640), det_thresh=0.5) -> List
Returns:
list: A list of detected faces, sorted by their x-coordinate of the bounding box.
"""
if det_thresh is None :
det_thresh = opts.data.get("roop_detection_threshold", 0.5)
# Create a deep copy of the analysis model (otherwise det_size is attached to the analysis model and can't be changed)
face_analyser = copy.deepcopy(getAnalysisModel())
@ -256,11 +261,11 @@ def blend_faces(faces: List[Face]) -> Face:
# Compute the mean of all embeddings
blended_embedding = np.mean(embeddings, axis=0)
# Create a new Face object using the first face in the list
blended = faces[0]
# Create a new Face object using the properties of the first face in the list
# Assign the blended embedding to the blended Face object
blended.embedding = blended_embedding
blended = Face(embedding=blended_embedding, gender=faces[0].gender, age=faces[0].age)
assert not np.array_equal(blended.embedding,faces[0].embedding) if len(faces) > 1 else True, "If len(faces)>0, the blended embedding should not be the same than the first image"
return blended
@ -275,7 +280,8 @@ def swap_face(
model: str,
faces_index: Set[int] = {0},
same_gender=True,
upscaled_swapper = False
upscaled_swapper = False,
compute_similarity = True
) -> ImageResult:
"""
Swaps faces in the target image with the source face.
@ -316,29 +322,31 @@ def swap_face(
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
return_result.image = result_image
try:
result_faces = get_faces(
cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR)
)
if same_gender:
result_faces = [x for x in result_faces if x["gender"] == gender]
for i, swapped_face in enumerate(result_faces):
logger.info(f"compare face {i}")
if i in faces_index and i < len(target_faces):
return_result.similarity[i] = cosine_similarity_face(
source_face, swapped_face
)
return_result.ref_similarity[i] = cosine_similarity_face(
reference_face, swapped_face
)
if compute_similarity :
try:
result_faces = get_faces(
cv2.cvtColor(np.array(result_image), cv2.COLOR_RGB2BGR)
)
if same_gender:
result_faces = [x for x in result_faces if x["gender"] == gender]
logger.info(f"similarity {return_result.similarity}")
logger.info(f"ref similarity {return_result.ref_similarity}")
for i, swapped_face in enumerate(result_faces):
logger.info(f"compare face {i}")
if i in faces_index and i < len(target_faces):
return_result.similarity[i] = cosine_similarity_face(
source_face, swapped_face
)
return_result.ref_similarity[i] = cosine_similarity_face(
reference_face, swapped_face
)
except Exception as e:
logger.error("Swapping failed %s", e)
raise e
logger.info(f"similarity {return_result.similarity}")
logger.info(f"ref similarity {return_result.ref_similarity}")
except Exception as e:
logger.error("Similarity processing failed %s", e)
raise e
except Exception as e :
logger.error("Conversion failed %s", e)
raise e