mirror of https://github.com/vladmandic/automatic
80 lines
4.6 KiB
Python
80 lines
4.6 KiB
Python
import cv2
|
|
import torch
|
|
import numpy as np
|
|
from PIL import Image
|
|
from modules import devices
|
|
from modules.shared import opts
|
|
from modules.control.util import HWC3, resize_image
|
|
|
|
|
|
ADE20K_PALETTE = np.array([
|
|
[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
|
|
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7],
|
|
[224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51],
|
|
[255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
|
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
|
|
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255],
|
|
[8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10],
|
|
[7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
|
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
|
|
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15],
|
|
[20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0],
|
|
[0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
|
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112],
|
|
[0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0],
|
|
[0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173],
|
|
[10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
|
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184],
|
|
[0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194],
|
|
[0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255],
|
|
[0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
|
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170],
|
|
[133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255],
|
|
[0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255],
|
|
[70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
|
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235],
|
|
[245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0],
|
|
[0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255],
|
|
[0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
|
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0],
|
|
[0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255],
|
|
], dtype=np.uint8)
|
|
|
|
|
|
class OneFormerDetector:
|
|
def __init__(self, model, processor):
|
|
self.model = model
|
|
self.processor = processor
|
|
|
|
@classmethod
|
|
def from_pretrained(cls, pretrained_model_or_path="shi-labs/oneformer_ade20k_swin_large", cache_dir=None, local_files_only=False):
|
|
from transformers import AutoProcessor, OneFormerForUniversalSegmentation
|
|
processor = AutoProcessor.from_pretrained(pretrained_model_or_path, cache_dir=cache_dir, local_files_only=local_files_only)
|
|
model = OneFormerForUniversalSegmentation.from_pretrained(pretrained_model_or_path, cache_dir=cache_dir, local_files_only=local_files_only, use_safetensors=True).to(devices.device).eval()
|
|
return cls(model, processor)
|
|
|
|
def __call__(self, input_image, detect_resolution=512, image_resolution=512, output_type="pil", **kwargs):
|
|
self.model.to(devices.device)
|
|
if not isinstance(input_image, np.ndarray):
|
|
input_image = np.array(input_image, dtype=np.uint8)
|
|
input_image = HWC3(input_image)
|
|
input_image = resize_image(input_image, detect_resolution)
|
|
h, w = input_image.shape[:2]
|
|
pil_image = Image.fromarray(input_image)
|
|
inputs = self.processor(images=pil_image, task_inputs=["semantic"], return_tensors="pt")
|
|
inputs = {k: v.to(devices.device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
|
|
with devices.inference_context():
|
|
outputs = self.model(**inputs)
|
|
seg = self.processor.post_process_semantic_segmentation(outputs, target_sizes=[(h, w)])[0].cpu().numpy()
|
|
if opts.control_move_processor:
|
|
self.model.to("cpu")
|
|
palette = ADE20K_PALETTE
|
|
colored = palette[seg % len(palette)]
|
|
detected_map = HWC3(colored)
|
|
img = resize_image(input_image, image_resolution)
|
|
out_h, out_w = img.shape[:2]
|
|
detected_map = cv2.resize(detected_map, (out_w, out_h), interpolation=cv2.INTER_NEAREST)
|
|
if output_type == "pil":
|
|
detected_map = Image.fromarray(detected_map)
|
|
return detected_map
|