reduce mandatory requirements

Signed-off-by: Vladimir Mandic <mandic00@live.com>
pull/4658/head
Vladimir Mandic 2026-02-18 17:53:08 +01:00
parent d6bbfe3dc2
commit 6fdd3a53cf
55 changed files with 120 additions and 103 deletions

View File

@ -1,12 +1,12 @@
# Change Log for SD.Next
## Update for 2026-02-17
## Update for 2026-02-18
### Highlights for 2026-02-17
### Highlights for 2026-02-18
TBD
### Details for 2026-02-17
### Details for 2026-02-18
- **Models**
- [FireRed Image Edit](https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.0)
@ -28,6 +28,10 @@ TBD
- **nunchaku** models are now listed in networks tab as reference models
instead of being used implicitly via quantization, thanks @CalamitousFelicitousness
- removed: old `codeformer` and `gfpgan` face restorers, thanks @CalamitousFelicitousness
- **Compute**
- **ROCm** support for additional AMD GPUs: `gfx103X`, thanks @crashingalexsan
- **Cuda** `torch==2.10` removed support for `rtx1000` series, use following before first startup:
> set TORCH_COMMAND='torch==2.9.1 torchvision==0.24.1 torchaudio==2.9.1 --index-url https://download.pytorch.org/whl/cu126'
- **UI**
- ui: **localization** improved translation quality and new translations locales:
*en, en1, en2, en3, en4, hr, es, it, fr, de, pt, ru, zh, ja, ko, hi, ar, bn, ur, id, vi, tr, sr, po, he, xx, yy, qq, tlh*
@ -39,7 +43,10 @@ TBD
- **Internal**
- `python==3.14` initial support
see [docs](https://vladmandic.github.io/sdnext-docs/Python/) for details
- remove hard-dependnecies: `clip`, `numba`, `skimage`, `torchsde`
- remove hard-dependnecies:
`clip, numba, skimage, torchsde, omegaconf, antlr, patch-ng, patch-ng, astunparse, addict, inflection, jsonmerge, kornia`,
`resize-right, voluptuous, yapf, sqlalchemy, invisible-watermark, pi-heif, ftfy, blendmodes, PyWavelets`
these are now installed on-demand when needed
- refactor: to/from image/tensor logic, thanks @CalamitousFelicitousness
- refactor: switch to `pyproject.toml` for tool configs
- refactor: reorganize `cli` scripts

View File

@ -81,7 +81,7 @@ async function main() {
} else {
const json = await res.json();
console.log('result:', json.info);
for (const i in json.images) { // eslint-disable-line guard-for-in
for (const i in json.images) {
const file = args.output || `/tmp/test-${i}.jpg`;
const data = atob(json.images[i]);
fs.writeFileSync(file, data, 'binary');

View File

@ -41,7 +41,7 @@ async function main() {
} else {
const json = await res.json();
console.log('result:', json.info);
for (const i in json.images) { // eslint-disable-line guard-for-in
for (const i in json.images) {
const f = `/tmp/test-${i}.jpg`;
fs.writeFileSync(f, atob(json.images[i]), 'binary');
console.log('image saved:', f);

View File

@ -6,7 +6,6 @@ import base64
import numpy as np
import mediapipe as mp
from PIL import Image, ImageOps
from pi_heif import register_heif_opener
from skimage.metrics import structural_similarity as ssim
from scipy.stats import beta
@ -261,7 +260,6 @@ def file(filename: str, folder: str, tag = None, requested = []):
res = Result(fn = filename, typ='unknown', tag=tag, requested = requested)
# open image
try:
register_heif_opener()
res.image = Image.open(filename)
if res.image.mode == 'RGBA':
res.image = res.image.convert('RGB')

View File

@ -11071,4 +11071,4 @@
"hint": "استخراج خريطة العمق باستخدام نموذج Zoe"
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -1654,4 +1654,4 @@
{"id":"","label":"Z values","localized":"","hint":"Separate values for Z axis using commas","ui":"script_xyz_grid_script"},
{"id":"","label":"Zoe Depth","localized":"","hint":"","ui":"control"}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": "Utilizes the Zoe model for estimating relative depth from a single 2D image for ControlNet."
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": "Utilizes the Zoe neural heuristic to compute a monochromatic spatial depth-parallax map."
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": ""
}
]
}
}

View File

@ -11071,4 +11071,4 @@
"hint": "Zoe 深度图估计模型"
}
]
}
}

View File

@ -698,9 +698,9 @@ def check_diffusers():
current = opts.get('diffusers_version', '') if minor > -1 else ''
if (minor == -1) or ((current != target_commit) and (not args.experimental)):
if minor == -1:
log.info(f'Diffusers install: commit={target_commit}')
log.info(f'Install: package="diffusers" commit={target_commit}')
else:
log.info(f'Diffusers update: current={pkg.version} hash={current} target={target_commit}')
log.info(f'Update: package="diffusers" current={pkg.version} hash={current} target={target_commit}')
pip('uninstall --yes diffusers', ignore=True, quiet=True, uv=False)
if args.skip_git:
log.warning('Git: marked as not available but required for diffusers installation')
@ -728,9 +728,9 @@ def check_transformers():
target_tokenizers = '0.22.2'
if (pkg_transformers is None) or ((pkg_transformers.version != target_transformers) or (pkg_tokenizers is None) or ((pkg_tokenizers.version != target_tokenizers) and (not args.experimental))):
if pkg_transformers is None:
log.info(f'Transformers install: version={target_transformers}')
log.info(f'Install: package="transformers" version={target_transformers}')
else:
log.info(f'Transformers update: current={pkg_transformers.version} target={target_transformers}')
log.info(f'Update: package="transformers" current={pkg_transformers.version} target={target_transformers}')
pip('uninstall --yes transformers', ignore=True, quiet=True, uv=False)
pip(f'install --upgrade tokenizers=={target_tokenizers}', ignore=False, quiet=True, uv=False)
pip(f'install --upgrade transformers=={target_transformers}', ignore=False, quiet=True, uv=False)
@ -961,7 +961,7 @@ def check_cudnn():
# check torch version
def check_torch():
log.info('Verifying torch installation')
log.info('Torch: verifying installation')
t_start = time.time()
if args.skip_torch:
log.info('Torch: skip tests')
@ -1030,7 +1030,7 @@ def check_torch():
if 'torch' in torch_command:
if not installed('torch'):
log.info(f'Torch: download and install in progress... cmd="{torch_command}"')
log.info(f'Install: package="torch" download and install in progress... cmd="{torch_command}"')
install('--upgrade pip', 'pip', reinstall=True) # pytorch rocm is too large for older pip
install(torch_command, 'torch torchvision', quiet=True)
@ -1366,6 +1366,9 @@ def install_insightface():
def install_optional():
t_start = time.time()
log.info('Installing optional requirements...')
install('pi-heif')
install('addict')
install('yapf')
install('--no-build-isolation git+https://github.com/Disty0/BasicSR@23c1fb6f5c559ef5ce7ad657f2fa56e41b121754', 'basicsr', ignore=True, quiet=True)
install('av', ignore=True, quiet=True)
install('beautifulsoup4', ignore=True, quiet=True)

View File

@ -1,7 +1,7 @@
String.prototype.format = function (args) { // eslint-disable-line no-extend-native, func-names
let thisString = '';
for (let charPos = 0; charPos < this.length; charPos++) thisString += this[charPos];
for (const key in args) { // eslint-disable-line guard-for-in
for (const key in args) {
const stringKey = `{${key}}`;
thisString = thisString.replace(new RegExp(stringKey, 'g'), args[key]);
}

View File

@ -1,7 +1,7 @@
import re
import inspect
from typing import Any, Optional, Dict, List, Type, Callable, Union
from pydantic import BaseModel, Field, create_model # pylint: disable=no-name-in-module
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
import modules.shared as shared
@ -29,6 +29,12 @@ if not hasattr(BaseModel, "__config__"):
BaseModel.__config__ = DummyConfig
def underscore(name: str) -> str: # Convert CamelCase or PascalCase string to underscore_case (snake_case).
# use instead of inflection.underscore
s1 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name)
s2 = re.sub('([A-Z]+)([A-Z][a-z])', r'\1_\2', s1)
return s2.lower()
class PydanticModelGenerator:
def __init__(
self,

View File

@ -15,7 +15,6 @@
import html
from typing import Any, Callable, Dict, List, Optional, Union
import ftfy
import regex as re
import torch
from transformers import AutoTokenizer, UMT5EncoderModel
@ -86,6 +85,9 @@ def optimized_scale(positive_flat, negative_flat):
return st_star
def basic_clean(text):
from installer import install
install('ftfy')
import ftfy
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()

View File

@ -33,7 +33,7 @@ def run_modelmerger(id_task, **kwargs): # pylint: disable=unused-argument
from installer import install
install('tensordict', quiet=True)
try:
from tensordict import TensorDict
from tensordict import TensorDict # pylint: disable=unused-import
except Exception as e:
shared.log.error(f"Merge: {e}")
return [*[gr.update() for _ in range(4)], "tensordict not available"]

View File

@ -70,7 +70,7 @@ def face_id(
shared.prompt_styles.apply_styles_to_extra(p)
if shared.opts.cuda_compile_backend == 'none':
token_merge.apply_token_merging(p.sd_model)
token_merge.apply_token_merging(shared.sd_model)
sd_hijack_freeu.apply_freeu(p)
script_callbacks.before_process_callback(p)

View File

@ -13,11 +13,6 @@ from modules.image.watermark import set_watermark
debug = errors.log.trace if os.environ.get('SD_PATH_DEBUG', None) is not None else lambda *args, **kwargs: None
debug_save = errors.log.trace if os.environ.get('SD_SAVE_DEBUG', None) is not None else lambda *args, **kwargs: None
try:
from pi_heif import register_heif_opener
register_heif_opener()
except Exception:
pass
def sanitize_filename_part(text, replace_spaces=True):

View File

@ -9,6 +9,8 @@ from modules import devices
class UpscalerRealESRGAN(Upscaler):
def __init__(self, dirname):
from installer import install
install('addict')
install('yapf')
install('--no-build-isolation git+https://github.com/Disty0/BasicSR@23c1fb6f5c559ef5ce7ad657f2fa56e41b121754', 'basicsr', ignore=True, quiet=True)
from basicsr.archs.rrdbnet_arch import RRDBNet
self.name = "RealESRGAN"

View File

@ -75,7 +75,7 @@ def process_pre(p: processing.StableDiffusionProcessing):
# apply-with-unapply
sd_models_compile.check_deepcache(enable=True)
ipadapter.apply(shared.sd_model, p)
token_merge.apply_token_merging(p.sd_model)
token_merge.apply_token_merging(shared.sd_model)
hidiffusion.apply(p, shared.sd_model_type)
ras.apply(shared.sd_model, p)
pag.apply(p)
@ -117,7 +117,7 @@ def process_post(p: processing.StableDiffusionProcessing):
try:
sd_models_compile.check_deepcache(enable=False)
ipadapter.unapply(shared.sd_model, unload=getattr(p, 'ip_adapter_unload', False))
token_merge.remove_token_merging(p.sd_model)
token_merge.remove_token_merging(shared.sd_model)
hidiffusion.unapply()
ras.unapply(shared.sd_model)
pag.unapply()

View File

@ -7,7 +7,6 @@ import torch
import numpy as np
import cv2
from PIL import Image
from blendmodes.blend import blendLayers, BlendType
from modules import shared, devices, images, sd_models, sd_samplers, sd_vae, sd_hijack_hypertile, processing_vae, timer
from modules.api import helpers
@ -38,7 +37,9 @@ def setup_color_correction(image):
def apply_color_correction(correction, original_image):
from installer import install
install('scikit-image', quiet=True)
install('blendmodes', quiet=True)
from skimage import exposure
from blendmodes.blend import blendLayers, BlendType
shared.log.debug(f"Applying color correction: correction={correction.shape} image={original_image}")
np_image = np.asarray(original_image)
np_recolor = cv2.cvtColor(np_image, cv2.COLOR_RGB2LAB)

View File

@ -129,7 +129,7 @@ class Agent:
return "v2-staging/gfx1152"
if self.gfx_version == 0x1153:
return "v2-staging/gfx1153"
if self.gfx_version in (0x1030, 0x1032,):
if self.gfx_version in (0x1030, 0x1031, 0x1032, 0x1034,):
return "v2-staging/gfx103X-dgpu"
#if (self.gfx_version & 0xFFF0) == 0x1010:
# return "gfx101X-dgpu"

View File

@ -1,7 +1,12 @@
import importlib
from typing import Any, Callable, List, Union
from omegaconf import DictConfig, ListConfig, OmegaConf
try:
from installer import install
install('omegaconf')
from omegaconf import DictConfig, ListConfig, OmegaConf
except Exception as e:
raise ImportError(f"Failed to import omegaconf. Error: {e}") from e
try:
OmegaConf.register_new_resolver("eval", eval)
@ -10,7 +15,7 @@ except Exception as e:
raise
def load_config(path: str, argv: List[str] = None) -> Union[DictConfig, ListConfig]:
def load_config(path: str, argv: List[str] = None):
"""
Load a configuration. Will resolve inheritance.
"""
@ -25,7 +30,7 @@ def load_config(path: str, argv: List[str] = None) -> Union[DictConfig, ListConf
def resolve_recursive(
config: Any,
resolver: Callable[[Union[DictConfig, ListConfig]], Union[DictConfig, ListConfig]],
resolver: Callable[[Any], Any],
) -> Any:
config = resolver(config)
if isinstance(config, DictConfig):
@ -41,7 +46,7 @@ def resolve_recursive(
return config
def resolve_inheritance(config: Union[DictConfig, ListConfig]) -> Any:
def resolve_inheritance(config: Any) -> Any:
"""
Recursively resolve inheritance if the config contains:
__inherit__: path/to/parent.yaml or a ListConfig of such paths.
@ -104,7 +109,7 @@ def import_item(path: Union[str, List[str]], name: str) -> Any:
raise ValueError(f"Path must be string or list of strings, got: {type(path)}")
def create_object(config: DictConfig) -> Any:
def create_object(config: Any) -> Any:
"""
Create an object from config.
The config is expected to contains the following:

View File

@ -17,7 +17,6 @@ Utility functions for creating schedules and samplers from config.
"""
import torch
from omegaconf import DictConfig
from .samplers.base import Sampler
from .samplers.euler import EulerSampler
@ -28,7 +27,7 @@ from .timesteps.sampling.trailing import UniformTrailingSamplingTimesteps
def create_schedule_from_config(
config: DictConfig,
config,
) -> Schedule:
"""
Create a schedule from configuration.
@ -40,7 +39,7 @@ def create_schedule_from_config(
def create_sampler_from_config(
config: DictConfig,
config,
schedule: Schedule,
timesteps: SamplingTimesteps,
) -> Sampler:
@ -57,7 +56,7 @@ def create_sampler_from_config(
def create_sampling_timesteps_from_config(
config: DictConfig,
config,
schedule: Schedule,
device: torch.device,
) -> SamplingTimesteps:

View File

@ -1,7 +1,6 @@
from typing import List, Optional, Tuple, Union
import torch
from einops import rearrange
from omegaconf import DictConfig, ListConfig
from ..common.diffusion import classifier_free_guidance_dispatcher, create_sampler_from_config, create_sampling_timesteps_from_config, create_schedule_from_config
from ..models.dit_v2 import na
@ -40,7 +39,9 @@ def optimized_channels_to_second(tensor):
class VideoDiffusionInfer():
def __init__(self, config: DictConfig, device: str, dtype: torch.dtype):
def __init__(self, config, device: str, dtype: torch.dtype):
from installer import install
install('omegaconf')
self.config = config
self.device = device
self.dtype = dtype
@ -48,6 +49,7 @@ class VideoDiffusionInfer():
self.dit = None
self.sampler = None
self.schedule = None
def get_condition(self, latent: torch.Tensor, latent_blur: torch.Tensor, task: str) -> torch.Tensor:
t, h, w, c = latent.shape
cond = torch.zeros([t, h, w, c + 1], device=latent.device, dtype=latent.dtype)
@ -93,6 +95,7 @@ class VideoDiffusionInfer():
@torch.no_grad()
def vae_encode(self, samples: List[torch.Tensor]) -> List[torch.Tensor]:
from omegaconf import ListConfig
use_sample = self.config.vae.get("use_sample", True)
latents = []
if len(samples) > 0:
@ -138,6 +141,7 @@ class VideoDiffusionInfer():
@torch.no_grad()
def vae_decode(self, latents: List[torch.Tensor], target_dtype: torch.dtype = None) -> List[torch.Tensor]:
"""🚀 VAE decode optimisé - décodage direct sans chunking, compatible avec autocast externe"""
from omegaconf import ListConfig
samples = []
if len(latents) > 0:
device = self.device

View File

@ -1,6 +1,5 @@
import os
import torch
from omegaconf import OmegaConf
from safetensors.torch import load_file as load_safetensors_file
from huggingface_hub import hf_hub_download
from ..optimization.memory_manager import preinitialize_rope_cache
@ -9,6 +8,10 @@ from ..core.infer import VideoDiffusionInfer
def configure_runner(model_name, cache_dir, device:str='cpu', dtype:torch.dtype=None):
from installer import install
install('omegaconf')
from omegaconf import OmegaConf
repo_id = "vladmandic/SeedVR2"
script_directory = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
config_path = os.path.join(script_directory, './config_7b.yaml') if "7b" in model_name else os.path.join(script_directory, './config_3b.yaml')

View File

@ -437,7 +437,7 @@ class ExtraNetworksPage:
files = list(files_cache.list_files(reference_path, ext_filter=exts, recursive=False))
if shared.opts.diffusers_dir in path:
path = os.path.relpath(path, shared.opts.diffusers_dir)
fn = os.path.join(reference_path, path.replace('models--', '').replace('\\', '/').split('/')[0])
fn = os.path.join(reference_path, path.replace('models--', '').replace('\\', '/').split('/')[0]) # pylint: disable=use-maxsplit-arg
else:
fn = os.path.splitext(path)[0]
files += list(files_cache.list_files(os.path.dirname(path), ext_filter=exts, recursive=False))

View File

@ -25,9 +25,9 @@
"eslint-ui": "cd extensions-builtin/sdnext-modernui && eslint . javascript/",
"ruff": ". venv/bin/activate && ruff check",
"ruff-win": "venv\\scripts\\activate && ruff check",
"pylint": ". venv/bin/activate && pylint --disable=W0511 *.py modules/ pipelines/ scripts/ extensions-builtin/ | grep -v '^*'",
"pylint-win": "venv\\scripts\\activate && pylint --disable=W0511 *.py modules/ pipelines/ scripts/ extensions-builtin/",
"lint": "npm run format && npm run eslint && npm run eslint-ui && npm run ruff && npm run pylint | grep -v TODO",
"pylint": ". venv/bin/activate && pylint *.py modules/ pipelines/ scripts/ extensions-builtin/ | grep -v '^*'",
"pylint-win": "venv\\scripts\\activate && pylint *.py modules/ pipelines/ scripts/ extensions-builtin/",
"lint": "npm run format && npm run eslint && npm run eslint-ui && npm run ruff && npm run pylint",
"lint-win": "npm run format-win && npm run eslint && npm run eslint-ui && npm run ruff-win && npm run pylint-win",
"test": ". venv/bin/activate; python launch.py --debug --test",
"todo": "grep -oIPR 'TODO.*' *.py modules/ pipelines/ | sort -u",

View File

@ -1,5 +1,4 @@
import importlib
import omegaconf
from inspect import isfunction
from random import shuffle
@ -16,6 +15,9 @@ def get_obj_from_str(string, reload=False):
def instantiate(obj):
from installer import install
install('omegaconf')
import omegaconf
if isinstance(obj, omegaconf.DictConfig):
obj = dict(**obj)
if isinstance(obj, dict) and "class" in obj:

View File

@ -1,9 +1,13 @@
import os
import toml
import omegaconf
def load_train_config(file):
from installer import install
install('omegaconf')
install('toml')
import omegaconf
import toml
config = toml.load(file)
model = config["model"]

View File

@ -56,7 +56,7 @@ class GoogleNanoBananaPipeline():
)
def img2img(self, prompt, image):
from google import genai
from google import genai # pylint: disable=no-name-in-module
image_bytes = io.BytesIO()
image.save(image_bytes, format='JPEG')
return self.client.models.generate_content(
@ -109,7 +109,7 @@ class GoogleNanoBananaPipeline():
return args
def __call__(self, prompt: list[str], width: int, height: int, image: Image.Image = None):
from google import genai
from google import genai # pylint: disable=no-name-in-module
if self.client is None:
args = self.get_args()
if args is None:

View File

@ -81,6 +81,7 @@ ignore = [
"B006", # Do not use mutable data structures for argument defaults
"B008", # Do not perform function call in argument defaults
"B905", # Strict zip() usage
"ASYNC240", # Async functions should not use os.path methods
"C420", # Unnecessary dict comprehension for iterable; use `dict.fromkeys` instead
"C408", # Unnecessary `dict` call
"I001", # Import block is un-sorted or un-formatted
@ -194,7 +195,7 @@ main.ignore-patterns=[
".*_model_arch_v2.py$",
]
main.ignored-modules=""
main.jobs=8
main.jobs=4
main.limit-inference-results=100
main.load-plugins=""
main.persistent=false
@ -334,15 +335,15 @@ similarities.ignore-imports=true
similarities.ignore-signatures=true
similarities.min-similarity-lines=4
spelling.max-spelling-suggestions=4
# spelling.dict=""
# spelling.ignore-comment-directives=["fmt: on","fmt: off","noqa:","noqa","nosec","isort:skip","mypy:"]
# spelling.ignore-words=""
# spelling.private-dict-file=""
# spelling.store-unknown-words=false
string.check-quote-consistency=false
string.check-str-concat-over-line-jumps=false
typecheck.contextmanager-decorators="contextlib.contextmanager"
typecheck.generated-members=["numpy.*","logging.*","torch.*","cv2.*"]
typecheck.generated-members=[
"numpy.*",
"logging.*",
"torch.*",
"cv2.*",
]
typecheck.ignore-none=true
typecheck.ignore-on-opaque-inference=true
typecheck.ignored-checks-for-mixins=["no-member","not-async-context-manager","not-context-manager","attribute-defined-outside-init"]

View File

@ -3,35 +3,19 @@ setuptools==69.5.1
wheel
# standard
patch-ng
anyio
addict
astunparse
filetype
future
GitPython
httpcore
inflection
jsonmerge
kornia
lark
omegaconf
piexif
mpmath
psutil
pyyaml
resize-right
toml
voluptuous
yapf
fasteners
orjson
sqlalchemy
invisible-watermark
PyWavelets
pi-heif
ftfy
blendmodes
# versioned
fastapi==0.124.4
@ -40,13 +24,11 @@ safetensors==0.7.0
peft==0.18.1
httpx==0.28.1
compel==2.2.1
antlr4-python3-runtime==4.9.3
requests==2.32.3
tqdm==4.67.3
accelerate==1.12.0
einops==0.8.1
huggingface_hub==0.36.2
numexpr==2.11.0
numpy==2.1.2
pandas==2.3.1
protobuf==4.25.3

View File

@ -8,7 +8,6 @@ import os
from functools import lru_cache
from typing import Union, List
import ftfy
import regex as re
import torch
@ -58,6 +57,9 @@ def get_pairs(word):
def basic_clean(text):
from installer import install
install('ftfy')
import ftfy
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()

View File

@ -14,11 +14,12 @@ from torchvision.transforms import InterpolationMode
from torchvision.transforms.functional import normalize, resize
import insightface
from basicsr.utils import img2tensor, tensor2img
from facexlib.parsing import init_parsing_model
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
from insightface.app import FaceAnalysis
from .pulid_utils import img2tensor, tensor2img
from eva_clip import create_model_and_transforms
from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from encoders_transformer import IDFormer, IDEncoder

2
wiki

@ -1 +1 @@
Subproject commit c0924688d04e3b41399f2cbd8e6050d937bebc06
Subproject commit 51a98c9e39bc8a226987a10c54eaaba36bfa8e8e