use ControlNetHED with Apache License 2.0
parent
2a4507a448
commit
f4cd2d51f6
|
|
@ -1,133 +1,96 @@
|
||||||
from distutils import extension
|
# This is an improved version and model of HED edge detection without GPL contamination
|
||||||
import numpy as np
|
# Please use this implementation in your products
|
||||||
|
# This implementation may produce slightly different results from Saining Xie's official implementations,
|
||||||
|
# but it generates smoother edges and is more suitable for ControlNet as well as other image-to-image translations.
|
||||||
|
# Different from official models and other implementations, this is an RGB-input model (rather than BGR)
|
||||||
|
# and in this way it works better for gradio's RGB protocol
|
||||||
|
|
||||||
|
import os
|
||||||
import cv2
|
import cv2
|
||||||
import torch
|
import torch
|
||||||
from einops import rearrange
|
import numpy as np
|
||||||
|
|
||||||
|
from einops import rearrange
|
||||||
import os
|
import os
|
||||||
from modules import devices
|
from modules import devices
|
||||||
from annotator.annotator_path import models_path
|
from annotator.annotator_path import models_path
|
||||||
from annotator.util import safe_step, nms
|
from annotator.util import safe_step, nms
|
||||||
|
|
||||||
class Network(torch.nn.Module):
|
|
||||||
def __init__(self, model_path):
|
class DoubleConvBlock(torch.nn.Module):
|
||||||
|
def __init__(self, input_channel, output_channel, layer_number):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
self.convs = torch.nn.Sequential()
|
||||||
|
self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
|
||||||
|
for i in range(1, layer_number):
|
||||||
|
self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
|
||||||
|
self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0)
|
||||||
|
|
||||||
self.netVggOne = torch.nn.Sequential(
|
def __call__(self, x, down_sampling=False):
|
||||||
torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
|
h = x
|
||||||
torch.nn.ReLU(inplace=False),
|
if down_sampling:
|
||||||
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
|
h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
|
||||||
torch.nn.ReLU(inplace=False)
|
for conv in self.convs:
|
||||||
)
|
h = conv(h)
|
||||||
|
h = torch.nn.functional.relu(h)
|
||||||
|
return h, self.projection(h)
|
||||||
|
|
||||||
self.netVggTwo = torch.nn.Sequential(
|
|
||||||
torch.nn.MaxPool2d(kernel_size=2, stride=2),
|
|
||||||
torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
|
|
||||||
torch.nn.ReLU(inplace=False),
|
|
||||||
torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
|
|
||||||
torch.nn.ReLU(inplace=False)
|
|
||||||
)
|
|
||||||
|
|
||||||
self.netVggThr = torch.nn.Sequential(
|
class ControlNetHED_Apache2(torch.nn.Module):
|
||||||
torch.nn.MaxPool2d(kernel_size=2, stride=2),
|
def __init__(self):
|
||||||
torch.nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
|
super().__init__()
|
||||||
torch.nn.ReLU(inplace=False),
|
self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
|
||||||
torch.nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
|
self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
|
||||||
torch.nn.ReLU(inplace=False),
|
self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
|
||||||
torch.nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
|
self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
|
||||||
torch.nn.ReLU(inplace=False)
|
self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
|
||||||
)
|
self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
|
||||||
|
|
||||||
self.netVggFou = torch.nn.Sequential(
|
def __call__(self, x):
|
||||||
torch.nn.MaxPool2d(kernel_size=2, stride=2),
|
h = x - self.norm
|
||||||
torch.nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
|
h, projection1 = self.block1(h)
|
||||||
torch.nn.ReLU(inplace=False),
|
h, projection2 = self.block2(h, down_sampling=True)
|
||||||
torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
|
h, projection3 = self.block3(h, down_sampling=True)
|
||||||
torch.nn.ReLU(inplace=False),
|
h, projection4 = self.block4(h, down_sampling=True)
|
||||||
torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
|
h, projection5 = self.block5(h, down_sampling=True)
|
||||||
torch.nn.ReLU(inplace=False)
|
return projection1, projection2, projection3, projection4, projection5
|
||||||
)
|
|
||||||
|
|
||||||
self.netVggFiv = torch.nn.Sequential(
|
|
||||||
torch.nn.MaxPool2d(kernel_size=2, stride=2),
|
|
||||||
torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
|
|
||||||
torch.nn.ReLU(inplace=False),
|
|
||||||
torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
|
|
||||||
torch.nn.ReLU(inplace=False),
|
|
||||||
torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
|
|
||||||
torch.nn.ReLU(inplace=False)
|
|
||||||
)
|
|
||||||
|
|
||||||
self.netScoreOne = torch.nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1, stride=1, padding=0)
|
|
||||||
self.netScoreTwo = torch.nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0)
|
|
||||||
self.netScoreThr = torch.nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1, stride=1, padding=0)
|
|
||||||
self.netScoreFou = torch.nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, stride=1, padding=0)
|
|
||||||
self.netScoreFiv = torch.nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, stride=1, padding=0)
|
|
||||||
|
|
||||||
self.netCombine = torch.nn.Sequential(
|
|
||||||
torch.nn.Conv2d(in_channels=5, out_channels=1, kernel_size=1, stride=1, padding=0),
|
|
||||||
torch.nn.Sigmoid()
|
|
||||||
)
|
|
||||||
|
|
||||||
self.load_state_dict({strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.load(model_path).items()})
|
|
||||||
# end
|
|
||||||
|
|
||||||
def forward(self, tenInput):
|
|
||||||
tenInput = tenInput * 255.0
|
|
||||||
tenInput = tenInput - torch.tensor(data=[104.00698793, 116.66876762, 122.67891434], dtype=tenInput.dtype, device=tenInput.device).view(1, 3, 1, 1)
|
|
||||||
|
|
||||||
tenVggOne = self.netVggOne(tenInput)
|
|
||||||
tenVggTwo = self.netVggTwo(tenVggOne)
|
|
||||||
tenVggThr = self.netVggThr(tenVggTwo)
|
|
||||||
tenVggFou = self.netVggFou(tenVggThr)
|
|
||||||
tenVggFiv = self.netVggFiv(tenVggFou)
|
|
||||||
|
|
||||||
tenScoreOne = self.netScoreOne(tenVggOne)
|
|
||||||
tenScoreTwo = self.netScoreTwo(tenVggTwo)
|
|
||||||
tenScoreThr = self.netScoreThr(tenVggThr)
|
|
||||||
tenScoreFou = self.netScoreFou(tenVggFou)
|
|
||||||
tenScoreFiv = self.netScoreFiv(tenVggFiv)
|
|
||||||
|
|
||||||
tenScoreOne = torch.nn.functional.interpolate(input=tenScoreOne, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
|
|
||||||
tenScoreTwo = torch.nn.functional.interpolate(input=tenScoreTwo, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
|
|
||||||
tenScoreThr = torch.nn.functional.interpolate(input=tenScoreThr, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
|
|
||||||
tenScoreFou = torch.nn.functional.interpolate(input=tenScoreFou, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
|
|
||||||
tenScoreFiv = torch.nn.functional.interpolate(input=tenScoreFiv, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
|
|
||||||
|
|
||||||
return self.netCombine(torch.cat([ tenScoreOne, tenScoreTwo, tenScoreThr, tenScoreFou, tenScoreFiv ], 1))
|
|
||||||
# end
|
|
||||||
# end
|
|
||||||
|
|
||||||
netNetwork = None
|
netNetwork = None
|
||||||
remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/network-bsds500.pth"
|
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth"
|
||||||
modeldir = os.path.join(models_path, "hed")
|
modeldir = os.path.join(models_path, "hed")
|
||||||
old_modeldir = os.path.dirname(os.path.realpath(__file__))
|
old_modeldir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
|
||||||
def apply_hed(input_image, is_safe=False):
|
def apply_hed(input_image, is_safe=False):
|
||||||
global netNetwork
|
global netNetwork
|
||||||
if netNetwork is None:
|
if netNetwork is None:
|
||||||
modelpath = os.path.join(modeldir, "network-bsds500.pth")
|
modelpath = os.path.join(modeldir, "ControlNetHED.pth")
|
||||||
old_modelpath = os.path.join(old_modeldir, "network-bsds500.pth")
|
old_modelpath = os.path.join(old_modeldir, "ControlNetHED.pth")
|
||||||
if os.path.exists(old_modelpath):
|
if os.path.exists(old_modelpath):
|
||||||
modelpath = old_modelpath
|
modelpath = old_modelpath
|
||||||
elif not os.path.exists(modelpath):
|
elif not os.path.exists(modelpath):
|
||||||
from basicsr.utils.download_util import load_file_from_url
|
from basicsr.utils.download_util import load_file_from_url
|
||||||
load_file_from_url(remote_model_path, model_dir=modeldir)
|
load_file_from_url(remote_model_path, model_dir=modeldir)
|
||||||
netNetwork = Network(modelpath)
|
netNetwork = ControlNetHED_Apache2().to(devices.get_device_for("controlnet"))
|
||||||
netNetwork.to(devices.get_device_for("controlnet")).eval()
|
netNetwork.load_state_dict(torch.load(modelpath, map_location='cpu'))
|
||||||
|
netNetwork.to(devices.get_device_for("controlnet")).float().eval()
|
||||||
|
|
||||||
assert input_image.ndim == 3
|
assert input_image.ndim == 3
|
||||||
input_image = input_image[:, :, ::-1].copy()
|
H, W, C = input_image.shape
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
image_hed = torch.from_numpy(input_image).float().to(devices.get_device_for("controlnet"))
|
image_hed = torch.from_numpy(input_image.copy()).float().to(devices.get_device_for("controlnet"))
|
||||||
image_hed = image_hed / 255.0
|
|
||||||
image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
|
image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
|
||||||
edge = netNetwork(image_hed)[0]
|
edges = netNetwork(image_hed)
|
||||||
edge = edge.cpu().numpy()
|
edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
|
||||||
|
edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges]
|
||||||
|
edges = np.stack(edges, axis=2)
|
||||||
|
edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
|
||||||
if is_safe:
|
if is_safe:
|
||||||
edge = safe_step(edge)
|
edge = safe_step(edge)
|
||||||
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
|
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
|
||||||
return edge[0]
|
return edge
|
||||||
|
|
||||||
|
|
||||||
def unload_hed_model():
|
def unload_hed_model():
|
||||||
global netNetwork
|
global netNetwork
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue