Fix PuLID VRAM leak (#2902)

pull/2904/head
Chenlei Hu 2024-05-18 22:48:29 -04:00 committed by GitHub
parent e338a861dc
commit 681de2c4f0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 24 additions and 22 deletions

View File

@ -262,12 +262,16 @@ class ControlNetUnit(BaseModel):
def is_inpaint(self) -> bool:
return "inpaint" in self.module
def get_actual_preprocessor(self):
def get_actual_preprocessors(self) -> List[Any]:
p = ControlNetUnit.cls_get_preprocessor(self.module)
# Map "ip-adapter-auto" to actual preprocessor.
if self.module == "ip-adapter-auto":
return ControlNetUnit.cls_get_preprocessor(
self.module
).get_preprocessor_by_model(self.model)
return ControlNetUnit.cls_get_preprocessor(self.module)
p = p.get_preprocessor_by_model(self.model)
# Add all dependencies.
return [p] + [
ControlNetUnit.cls_get_preprocessor(dep) for dep in p.preprocessor_deps
]
@classmethod
def parse_image(cls, image) -> np.ndarray:

View File

@ -882,8 +882,9 @@ class Script(scripts.Script, metaclass=(
# Unload unused preprocessors
Preprocessor.unload_unused(active_processors={
unit.get_actual_preprocessor()
p
for unit in self.enabled_units
for p in unit.get_actual_preprocessors()
})
high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(p, 'enable_hr', False)

View File

@ -1,5 +1,6 @@
# https://github.com/ToTheBeginning/PuLID
import gc
import torch
import cv2
import numpy as np
@ -110,7 +111,7 @@ class PreprocessorPuLID(Preprocessor):
self.tags = ["IP-Adapter"]
self.slider_resolution = PreprocessorParameter(visible=False)
self.returns_image = False
self.preprocessors_deps = [
self.preprocessor_deps = [
"facexlib",
"instant_id_face_embedding",
"EVA02-CLIP-L-14-336",
@ -128,7 +129,7 @@ class PreprocessorPuLID(Preprocessor):
def unload(self) -> bool:
unloaded = False
for p_name in self.preprocessors_deps:
for p_name in self.preprocessor_deps:
p = Preprocessor.get_preprocessor(p_name)
if p is not None:
unloaded = unloaded or p.unload()
@ -155,6 +156,12 @@ class PreprocessorPuLID(Preprocessor):
), "EVA02-CLIP-L-14-336 preprocessor not found! Please install sd-webui-controlnet-evaclip"
r = evaclip_preprocessor(face_features_image)
# Free memory
# This is necessary as facexlib and evaclip both seem to
# not properly free memory on themselves.
gc.collect()
torch.cuda.empty_cache()
return Preprocessor.Result(
value=PuLIDProjInput(
id_ante_embedding=id_ante_embedding,

View File

@ -102,6 +102,7 @@ class Preprocessor(ABC):
expand_mask_when_resize_and_fill = False
model: Optional[torch.nn.Module] = None
device = devices.get_device_for("controlnet")
preprocessor_deps: List[str] = field(default_factory=list)
all_processors: ClassVar[Dict[str, "Preprocessor"]] = {}
all_processors_by_name: ClassVar[Dict[str, "Preprocessor"]] = {}
@ -176,21 +177,10 @@ class Preprocessor(ABC):
@classmethod
def unload_unused(cls, active_processors: Set["Preprocessor"]):
# Prevent unloading for following preprocessors.
# https://github.com/Mikubill/sd-webui-controlnet/issues/2862
# TODO: Investigate proper way to unload PuLID.
# Current unloading method will cause VRAM leak. It is suspected
# the current unload method causes new model to load each time
# preprocessor is called.
prevent_unload = [
"EVA02-CLIP-L-14-336",
"facexlib",
"ip-adapter_pulid",
]
logger.debug(
f"Unload unused preprocessors. Active: {[p.name for p in active_processors]}"
)
for p in cls.all_processors.values():
if p.label in prevent_unload:
continue
if p not in active_processors:
success = p.unload()
if success: