unified logger

pull/4663/head
Vladimir Mandic 2026-02-19 09:46:42 +01:00
parent bfe014f5da
commit a3074baf8b
315 changed files with 2507 additions and 2116 deletions

View File

@ -59,6 +59,7 @@ TBD
- refactor: switch from deprecated `pkg_resources` to `importlib`
- refactor: modernize typing and type annotations
- refactor: improve `pydantic==2.x` compatibility
- refactor: entire logging into separate `modules/logger`
- update `lint` rules, thanks @awsr
- remove requirements: `clip`, `open-clip`
- update `requirements`

View File

@ -13,6 +13,7 @@ import cProfile
import importlib
import importlib.util
import importlib.metadata
from modules.logger import setup_logging, get_console, get_log, install_traceback, log, console
class Dot(dict): # dot notation access to dictionary attributes
@ -30,8 +31,6 @@ version = {
}
setuptools, distutils = None, None # defined via ensure_base_requirements
current_branch = None
log = logging.getLogger("sd")
console = None
debug = log.debug if os.environ.get('SD_INSTALL_DEBUG', None) is not None else lambda *args, **kwargs: None
pip_log = '--log pip.log ' if os.environ.get('SD_PIP_DEBUG', None) is not None else ''
log_file = os.path.join(os.path.dirname(__file__), 'sdnext.log')
@ -86,232 +85,7 @@ except Exception:
elapsed = lambda *args, **kwargs: None # pylint: disable=unnecessary-lambda-assignment
def get_console():
return console
def get_log():
return log
@overload
def str_to_bool(val: str | bool) -> bool: ...
@overload
def str_to_bool(val: None) -> None: ...
def str_to_bool(val: str | bool | None) -> bool | None:
if isinstance(val, str):
if val.strip() and val.strip().lower() in ("1", "true"):
return True
return False
return val
def install_traceback(suppress: list = None):
from rich.traceback import install as traceback_install
from rich.pretty import install as pretty_install
if suppress is None:
suppress = []
width = os.environ.get("SD_TRACEWIDTH", console.width if console else None)
if width is not None:
width = int(width)
log.excepthook = traceback_install(
console=console,
extra_lines=int(os.environ.get("SD_TRACELINES", 1)),
max_frames=int(os.environ.get("SD_TRACEFRAMES", 16)),
width=width,
word_wrap=str_to_bool(os.environ.get("SD_TRACEWRAP", False)),
indent_guides=str_to_bool(os.environ.get("SD_TRACEINDENT", False)),
show_locals=str_to_bool(os.environ.get("SD_TRACELOCALS", False)),
locals_hide_dunder=str_to_bool(os.environ.get("SD_TRACEDUNDER", True)),
locals_hide_sunder=str_to_bool(os.environ.get("SD_TRACESUNDER", None)),
suppress=suppress,
)
pretty_install(console=console)
# setup console and file logging
def setup_logging():
from functools import partial, partialmethod
from logging.handlers import RotatingFileHandler
try:
pass # pylint: disable=unused-import
except Exception:
log.error('Please restart SD.Next so changes take effect')
sys.exit(1)
from rich.theme import Theme
from rich.logging import RichHandler
from rich.console import Console
from rich.padding import Padding
from rich.segment import Segment
from rich import box
from rich import print as rprint
from rich.pretty import install as pretty_install
class RingBuffer(logging.StreamHandler):
def __init__(self, capacity):
super().__init__()
self.capacity = capacity
self.buffer = []
self.formatter = logging.Formatter('{ "asctime":"%(asctime)s", "created":%(created)f, "facility":"%(name)s", "pid":%(process)d, "tid":%(thread)d, "level":"%(levelname)s", "module":"%(module)s", "func":"%(funcName)s", "msg":"%(message)s" }')
def emit(self, record):
if record.msg is not None and not isinstance(record.msg, str):
record.msg = str(record.msg)
try:
record.msg = record.msg.replace('"', "'")
except Exception:
pass
msg = self.format(record)
# self.buffer.append(json.loads(msg))
self.buffer.append(msg)
if len(self.buffer) > self.capacity:
self.buffer.pop(0)
def get(self):
return self.buffer
class LogFilter(logging.Filter):
def __init__(self):
super().__init__()
def filter(self, record):
return len(record.getMessage()) > 2
def override_padding(self, console, options): # pylint: disable=redefined-outer-name
style = console.get_style(self.style)
width = options.max_width
self.left = 0
render_options = options.update_width(width - self.left - self.right)
if render_options.height is not None:
render_options = render_options.update_height(height=render_options.height - self.top - self.bottom)
lines = console.render_lines(self.renderable, render_options, style=style, pad=False)
_Segment = Segment
left = _Segment(" " * self.left, style) if self.left else None
right = [_Segment.line()]
blank_line: list[Segment] | None = None
if self.top:
blank_line = [_Segment(f'{" " * width}\n', style)]
yield from blank_line * self.top
if left:
for line in lines:
yield left
yield from line
yield from right
else:
for line in lines:
yield from line
yield from right
if self.bottom:
blank_line = blank_line or [_Segment(f'{" " * width}\n', style)]
yield from blank_line * self.bottom
t_start = time.time()
if args.log:
global log_file # pylint: disable=global-statement
log_file = args.log
logging.TRACE = 25
logging.addLevelName(logging.TRACE, 'TRACE')
logging.Logger.trace = partialmethod(logging.Logger.log, logging.TRACE)
logging.trace = partial(logging.log, logging.TRACE)
def exception_hook(e: Exception, suppress=None):
from rich.traceback import Traceback
if suppress is None:
suppress = []
tb = Traceback.from_exception(type(e), e, e.__traceback__, show_locals=False, max_frames=16, extra_lines=1, suppress=suppress, theme="ansi_dark", word_wrap=False, width=console.width)
# print-to-console, does not get printed-to-file
exc_type, exc_value, exc_traceback = sys.exc_info()
log.excepthook(exc_type, exc_value, exc_traceback)
# print-to-file, temporarily disable-console-handler
for handler in log.handlers.copy():
if isinstance(handler, RichHandler):
log.removeHandler(handler)
with console.capture() as capture:
console.print(tb)
log.critical(capture.get())
log.addHandler(rh)
log.traceback = exception_hook
level = logging.DEBUG if (args.debug or args.trace) else logging.INFO
log.setLevel(logging.DEBUG) # log to file is always at level debug for facility `sd`
log.print = rprint
global console # pylint: disable=global-statement
theme = Theme({
"traceback.border": "black",
"inspect.value.border": "black",
"traceback.border.syntax_error": "dark_red",
"logging.level.info": "blue_violet",
"logging.level.debug": "purple4",
"logging.level.trace": "dark_blue",
})
Padding.__rich_console__ = override_padding
box.ROUNDED = box.SIMPLE
console = Console(
log_time=True,
log_time_format='%H:%M:%S-%f',
tab_size=4,
soft_wrap=True,
safe_box=True,
theme=theme,
)
logging.basicConfig(level=logging.ERROR, format='%(asctime)s | %(name)s | %(levelname)s | %(module)s | %(message)s', handlers=[logging.NullHandler()]) # redirect default logger to null
pretty_install(console=console)
install_traceback()
while log.hasHandlers() and len(log.handlers) > 0:
log.removeHandler(log.handlers[0])
log_filter = LogFilter()
# handlers
rh = RichHandler(show_time=True, omit_repeated_times=False, show_level=True, show_path=False, markup=False, rich_tracebacks=True, log_time_format='%H:%M:%S-%f', level=level, console=console)
if args.trace:
rh.formatter = logging.Formatter('[%(module)s][%(pathname)s:%(lineno)d] %(message)s')
rh.addFilter(log_filter)
rh.setLevel(level)
log.addHandler(rh)
fh = RotatingFileHandler(log_file, maxBytes=32*1024*1024, backupCount=9, encoding='utf-8', delay=True) # 10MB default for log rotation
if args.trace:
fh.formatter = logging.Formatter(f'%(asctime)s | {hostname} | %(name)s | %(levelname)s | %(module)s | | %(pathname)s:%(lineno)d | %(message)s')
else:
fh.formatter = logging.Formatter(f'%(asctime)s | {hostname} | %(name)s | %(levelname)s | %(module)s | %(message)s')
fh.addFilter(log_filter)
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
global log_rolled # pylint: disable=global-statement
if not log_rolled and args.debug and not args.log:
try:
fh.doRollover()
except Exception:
pass
log_rolled = True
rb = RingBuffer(100) # 100 entries default in log ring buffer
rb.addFilter(log_filter)
rb.setLevel(level)
log.addHandler(rb)
log.buffer = rb.buffer
def quiet_log(quiet: bool=False, *args, **kwargs): # pylint: disable=redefined-outer-name,keyword-arg-before-vararg
if not quiet:
log.debug(*args, **kwargs)
log.quiet = quiet_log
# overrides
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("httpx").setLevel(logging.ERROR)
logging.getLogger("diffusers").setLevel(logging.ERROR)
logging.getLogger("torch").setLevel(logging.ERROR)
logging.getLogger("ControlNet").handlers = log.handlers
logging.getLogger("lycoris").handlers = log.handlers
ts('log', t_start)
# setup console and file logging is imported from modules.logger
def get_logfile():

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python
from modules import logger
import os
import sys
import time
@ -9,7 +10,7 @@ from functools import lru_cache
import installer
debug_install = installer.log.debug if os.environ.get('SD_INSTALL_DEBUG', None) is not None else lambda *args, **kwargs: None
debug_install = logger.log.debug if os.environ.get('SD_INSTALL_DEBUG', None) is not None else lambda *args, **kwargs: None
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
sys.argv += shlex.split(commandline_args)
args = None
@ -58,17 +59,17 @@ def get_custom_args():
current = getattr(args, arg)
if current != default:
custom[arg] = getattr(args, arg)
installer.log.info(f'Command line args: {sys.argv[1:]} {installer.print_dict(custom)}')
logger.log.info(f'Command line args: {sys.argv[1:]} {installer.print_dict(custom)}')
if os.environ.get('SD_ENV_DEBUG', None) is not None:
env = os.environ.copy()
if 'PATH' in env:
del env['PATH']
if 'PS1' in env:
del env['PS1']
installer.log.trace(f'Environment: {installer.print_dict(env)}')
logger.log.trace(f'Environment: {installer.print_dict(env)}')
env = [f'{k}={v}' for k, v in os.environ.items() if k.startswith('SD_')]
ld = [f'{k}={v}' for k, v in os.environ.items() if k.startswith('LD_')]
installer.log.debug(f'Flags: sd={env} ld={ld}')
logger.log.debug(f'Flags: sd={env} ld={ld}')
rec('args')
@ -88,7 +89,7 @@ def commit_hash(): # compatbility function
@lru_cache
def run(command, desc=None, errdesc=None, custom_env=None, live=False): # compatbility function
if desc is not None:
installer.log.info(desc)
logger.log.info(desc)
if live:
result = subprocess.run(command, check=False, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0:
@ -183,9 +184,9 @@ def clean_server():
modules_sorted = {}
for module_key in modules_keys:
modules_sorted[module_key] = len([m for m in modules_cleaned if m.startswith(module_key)])
installer.log.trace(f'Server modules: {modules_sorted}')
logger.log.trace(f'Server modules: {modules_sorted}')
t1 = time.time()
installer.log.trace(f'Server modules: total={len(modules_loaded)} unloaded={len(removed_removed)} remaining={len(modules_cleaned)} gc={collected} time={t1-t0:.2f}')
logger.log.trace(f'Server modules: total={len(modules_loaded)} unloaded={len(removed_removed)} remaining={len(modules_cleaned)} gc={collected} time={t1-t0:.2f}')
def start_server(immediate=True, server=None):
@ -202,20 +203,20 @@ def start_server(immediate=True, server=None):
if not immediate:
time.sleep(3)
if collected > 0:
installer.log.debug(f'Memory: {get_memory_stats()} collected={collected}')
logger.log.debug(f'Memory: {get_memory_stats()} collected={collected}')
module_spec = importlib.util.spec_from_file_location('webui', 'webui.py')
server = importlib.util.module_from_spec(module_spec)
installer.log.debug(f'Starting module: {server}')
logger.log.debug(f'Starting module: {server}')
module_spec.loader.exec_module(server)
uvicorn = None
if args.test:
installer.log.info("Test only")
installer.log.critical('Logging: level=critical')
installer.log.error('Logging: level=error')
installer.log.warning('Logging: level=warning')
installer.log.info('Logging: level=info')
installer.log.debug('Logging: level=debug')
installer.log.trace('Logging: level=trace')
logger.log.info("Test only")
logger.log.critical('Logging: level=critical')
logger.log.error('Logging: level=error')
logger.log.warning('Logging: level=warning')
logger.log.info('Logging: level=info')
logger.log.debug('Logging: level=debug')
logger.log.trace('Logging: level=trace')
server.wants_restart = False
else:
uvicorn = server.webui(restart=not immediate)
@ -231,8 +232,8 @@ def main():
installer.ensure_base_requirements()
init_args() # setup argparser and default folders
installer.args = args
installer.setup_logging()
installer.log.info('Starting SD.Next')
installer.setup_logging(debug=args.debug, trace=args.trace, log_filename=args.log)
logger.log.info('Starting SD.Next')
installer.get_logfile()
try:
sys.excepthook = installer.custom_excepthook
@ -245,10 +246,10 @@ def main():
if args.reset:
installer.git_reset()
if args.skip_git or args.skip_all:
installer.log.info('Skipping GIT operations')
installer.log.info(f'Platform: {installer.print_dict(installer.get_platform())}')
logger.log.info('Skipping GIT operations')
logger.log.info(f'Platform: {installer.print_dict(installer.get_platform())}')
installer.check_venv()
installer.log.info(f'Args: {sys.argv[1:]}')
logger.log.info(f'Args: {sys.argv[1:]}')
if not args.skip_env or args.skip_all:
installer.set_environment()
if args.uv:
@ -259,42 +260,42 @@ def main():
installer.check_transformers()
installer.check_diffusers()
if args.test:
installer.log.info('Startup: test mode')
logger.log.info('Startup: test mode')
installer.quick_allowed = False
if args.reinstall:
installer.log.info('Startup: force reinstall of all packages')
logger.log.info('Startup: force reinstall of all packages')
installer.quick_allowed = False
if args.skip_all:
installer.log.info('Startup: skip all')
logger.log.info('Startup: skip all')
installer.quick_allowed = True
init_paths()
else:
installer.install_requirements()
if installer.check_timestamp():
installer.log.info('Startup: quick launch')
logger.log.info('Startup: quick launch')
init_paths()
installer.check_extensions()
else:
installer.log.info('Startup: standard')
logger.log.info('Startup: standard')
installer.install_submodules()
init_paths()
installer.install_extensions()
installer.install_requirements() # redo requirements since extensions may change them
if len(installer.errors) == 0:
installer.log.debug(f'Setup complete without errors: {round(time.time())}')
logger.log.debug(f'Setup complete without errors: {round(time.time())}')
else:
installer.log.warning(f'Setup complete with errors: {installer.errors}')
installer.log.warning(f'See log file for more details: {installer.log_file}')
logger.log.warning(f'Setup complete with errors: {installer.errors}')
logger.log.warning(f'See log file for more details: {logger.log_file}')
installer.extensions_preload(parser) # adds additional args from extensions
args = installer.parse_args(parser)
installer.log.info(f'Installer time: {init_summary()}')
logger.log.info(f'Installer time: {init_summary()}')
get_custom_args()
import threading
threading.Thread(target=installer.run_deferred_tasks, daemon=True).start()
uv, instance = start_server(immediate=True, server=None)
if installer.restart_required:
installer.log.warning('Restart is recommended due to packages updates...')
logger.log.warning('Restart is recommended due to packages updates...')
t_server = time.time()
t_monitor = time.time()
while True:
@ -308,19 +309,19 @@ def main():
if float(args.status) > 0 and (t_current - t_server) > float(args.status):
s = instance.state.status()
if (s.timestamp is None) or (s.step == 0): # dont spam during active job
installer.log.trace(f'Server: alive={alive} requests={requests} memory={get_memory_stats()} {s}')
logger.log.trace(f'Server: alive={alive} requests={requests} memory={get_memory_stats()} {s}')
t_server = t_current
if float(args.monitor) > 0 and t_current - t_monitor > float(args.monitor):
installer.log.trace(f'Monitor: {get_memory_stats(detailed=True)}')
logger.log.trace(f'Monitor: {get_memory_stats(detailed=True)}')
t_monitor = t_current
if not alive:
if uv is not None and uv.wants_restart:
clean_server()
installer.log.info('Server restarting...')
logger.log.info('Server restarting...')
# uv, instance = start_server(immediate=False, server=instance)
os.execv(sys.executable, ['python'] + sys.argv)
else:
installer.log.info('Exiting...')
logger.log.info('Exiting...')
break
time.sleep(1.0)

View File

@ -4,6 +4,7 @@ from fastapi import FastAPI, APIRouter, Depends, Request
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from fastapi.exceptions import HTTPException
from modules import errors, shared
from modules import logger
from modules.api import models, endpoints, script, helpers, server, generate, process, control, docs, gpu
@ -142,13 +143,13 @@ class Api:
if hasattr(self.app, 'tokens') and (self.app.tokens is not None):
if credentials.password in self.app.tokens.keys():
return True
shared.log.error(f'API authentication: user="{credentials.username}"')
logger.log.error(f'API authentication: user="{credentials.username}"')
raise HTTPException(status_code=401, detail="Unauthorized", headers={"WWW-Authenticate": "Basic"})
def get_session_start(self, req: Request, agent: str | None = None):
token = req.cookies.get("access-token") or req.cookies.get("access-token-unsecure")
user = self.app.tokens.get(token) if hasattr(self.app, 'tokens') else None
shared.log.info(f'Browser session: user={user} client={req.client.host} agent={agent}')
logger.log.info(f'Browser session: user={user} client={req.client.host} agent={agent}')
return {}
def launch(self):
@ -165,7 +166,7 @@ class Api:
# from modules.server import HypercornServer
# server = HypercornServer(self.app, **config)
http_server.start()
shared.log.info(f'API server: Uvicorn options={config}')
logger.log.info(f'API server: Uvicorn options={config}')
return http_server

View File

@ -2,6 +2,7 @@ from typing import Optional
from threading import Lock
from pydantic import BaseModel, Field # pylint: disable=no-name-in-module
from modules import errors, shared, processing_helpers
from modules import logger
from modules.api import models, helpers
from modules.control import run
@ -156,7 +157,7 @@ class APIControl:
if req.unit_type is None:
req.unit_type = 'controlnet'
if req.unit_type not in unit_types:
shared.log.error(f'Control uknown unit type: type={req.unit_type} available={unit_types}')
logger.log.error(f'Control uknown unit type: type={req.unit_type} available={unit_types}')
return
for i in range(len(req.control)):
u = req.control[i]

View File

@ -1,3 +1,4 @@
from modules import logger
from modules import shared
from modules.api import models, helpers
@ -90,7 +91,7 @@ def get_schedulers():
from modules.sd_samplers import list_samplers
all_schedulers = list_samplers()
for s in all_schedulers:
shared.log.critical(s)
logger.log.critical(s)
return all_schedulers
def post_unload_checkpoint():

View File

@ -9,10 +9,11 @@ from starlette.websockets import WebSocket, WebSocketState
from pydantic import BaseModel, Field # pylint: disable=no-name-in-module
from PIL import Image
from modules import shared, images, files_cache, modelstats
from modules import logger
from modules.paths import resolve_output_path
debug = shared.log.debug if os.environ.get('SD_BROWSER_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.debug if os.environ.get('SD_BROWSER_DEBUG', None) is not None else lambda *args, **kwargs: None
OPTS_FOLDERS = [
@ -96,7 +97,7 @@ def register_api(app: FastAPI): # register api
}
return content
except Exception as e:
shared.log.error(f'Gallery video: file="{filepath}" {e}')
logger.log.error(f'Gallery video: file="{filepath}" {e}')
return {}
def get_image_thumbnail(filepath):
@ -123,7 +124,7 @@ def register_api(app: FastAPI): # register api
}
return content
except Exception as e:
shared.log.error(f'Gallery image: file="{filepath}" {e}')
logger.log.error(f'Gallery image: file="{filepath}" {e}')
return {}
# @app.get('/sdapi/v1/browser/folders', response_model=List[str])
@ -183,7 +184,7 @@ def register_api(app: FastAPI): # register api
else:
return JSONResponse(content=get_image_thumbnail(decoded))
except Exception as e:
shared.log.error(f'Gallery: {file} {e}')
logger.log.error(f'Gallery: {file} {e}')
content = { 'error': str(e) }
return JSONResponse(content=content)
@ -199,10 +200,10 @@ def register_api(app: FastAPI): # register api
msg = msg[:1] + ":" + msg[4:] if msg[1:4] == "%3A" else msg
lines.append(msg)
t1 = time.time()
shared.log.debug(f'Gallery: type=ht folder="{folder}" files={len(lines)} time={t1-t0:.3f}')
logger.log.debug(f'Gallery: type=ht folder="{folder}" files={len(lines)} time={t1-t0:.3f}')
return lines
except Exception as e:
shared.log.error(f'Gallery: {folder} {e}')
logger.log.error(f'Gallery: {folder} {e}')
return []
shared.api.add_api_route("/sdapi/v1/browser/folders", get_folders, methods=["GET"], response_model=list[str])
@ -228,7 +229,7 @@ def register_api(app: FastAPI): # register api
await manager.send(ws, msg)
await manager.send(ws, '#END#')
t1 = time.time()
shared.log.debug(f'Gallery: type=ws folder="{folder}" files={numFiles} time={t1-t0:.3f}')
logger.log.debug(f'Gallery: type=ws folder="{folder}" files={numFiles} time={t1-t0:.3f}')
except Exception as e:
debug(f'Browser WS error: {e}')
manager.disconnect(ws)

View File

@ -1,5 +1,5 @@
import torch
from installer import log
from modules.logger import log
device = None

View File

@ -5,6 +5,7 @@ import piexif
import piexif.helper
from fastapi.exceptions import HTTPException
from modules import shared, sd_samplers
from modules import logger
def validate_sampler_name(name):
@ -25,7 +26,7 @@ def decode_base64_to_image(encoding, quiet=False):
image = Image.open(data)
return image
except Exception as e:
shared.log.warning(f'API cannot decode image: {e}')
logger.log.warning(f'API cannot decode image: {e}')
# from modules import errors
# errors.display(e, 'API cannot decode image')
if not quiet:
@ -41,7 +42,7 @@ def encode_pil_to_base64(image):
return base64.b64encode(bytes_data)
"""
if not isinstance(image, Image.Image):
shared.log.error('API cannot encode image: not a PIL image')
logger.log.error('API cannot encode image: not a PIL image')
return ''
buffered = io.BytesIO()
save_image(image, fn=buffered, ext=shared.opts.samples_format)
@ -66,7 +67,7 @@ def save_image(image, fn, ext):
image.save(fn, format=image_format, quality=shared.opts.jpeg_quality, pnginfo=pnginfo_data)
elif image_format == 'JPEG':
if image.mode == 'RGBA':
shared.log.warning('Save: RGBA image as JPEG - removed alpha channel')
logger.log.warning('Save: RGBA image as JPEG - removed alpha channel')
image = image.convert("RGB")
elif image.mode == 'I;16':
image = image.point(lambda p: p * 0.0038910505836576).convert("L")
@ -87,5 +88,5 @@ def save_image(image, fn, ext):
exif_bytes = piexif.dump({ "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") } })
image.save(fn, format=image_format, quality=shared.opts.jpeg_quality, lossless=shared.opts.webp_lossless, exif=exif_bytes)
else:
# shared.log.warning(f'Unrecognized image format: {extension} attempting save as {image_format}')
# logger.log.warning(f'Unrecognized image format: {extension} attempting save as {image_format}')
image.save(fn, format=image_format, quality=shared.opts.jpeg_quality)

View File

@ -10,7 +10,7 @@ from starlette.responses import JSONResponse
from fastapi import FastAPI, Request, Response
from fastapi.exceptions import HTTPException
from fastapi.encoders import jsonable_encoder
from installer import log
from modules.logger import log
import modules.errors as errors

View File

@ -1,5 +1,6 @@
try:
from installer import install, log
from installer import install
from modules.logger import log
except Exception:
def install(*args, **kwargs): # pylint: disable=unused-argument
pass

View File

@ -5,7 +5,7 @@ from enum import IntFlag
try:
from installer import log
from modules.logger import log
except Exception:
import logging
log = logging.getLogger(__name__)

View File

@ -5,11 +5,12 @@ from fastapi import Request, Depends
from fastapi.exceptions import HTTPException
from fastapi.responses import FileResponse
from modules import shared
from modules import logger
from modules.api import models, helpers
def post_shutdown():
shared.log.info('Shutdown request received')
logger.log.info('Shutdown request received')
import sys
sys.exit(0)
@ -21,7 +22,7 @@ def get_js(request: Request):
if ext not in ['js', 'css', 'map', 'html', 'wasm', 'ttf', 'mjs', 'json']:
raise HTTPException(status_code=400, detail=f"invalid file extension: {ext}")
if not os.path.exists(file):
shared.log.error(f"API: file not found: {file}")
logger.log.error(f"API: file not found: {file}")
raise HTTPException(status_code=404, detail=f"file not found: {file}")
if ext in ['js', 'mjs']:
media_type = 'application/javascript'
@ -50,12 +51,12 @@ def get_motd():
res = requests.get('https://vladmandic.github.io/sdnext/motd', timeout=3)
if res.status_code == 200:
msg = (res.text or '').strip()
shared.log.info(f'MOTD: {msg if len(msg) > 0 else "N/A"}')
logger.log.info(f'MOTD: {msg if len(msg) > 0 else "N/A"}')
motd += res.text
else:
shared.log.error(f'MOTD: {res.status_code}')
logger.log.error(f'MOTD: {res.status_code}')
except Exception as err:
shared.log.error(f'MOTD: {err}')
logger.log.error(f'MOTD: {err}')
return motd
def get_version():
@ -67,18 +68,18 @@ def get_platform():
return { **installer_get_platform(), **loader_get_packages() }
def get_log(req: models.ReqGetLog = Depends()):
lines = shared.log.buffer[:req.lines] if req.lines > 0 else shared.log.buffer.copy()
lines = logger.log.buffer[:req.lines] if req.lines > 0 else logger.log.buffer.copy()
if req.clear:
shared.log.buffer.clear()
logger.log.buffer.clear()
return lines
def post_log(req: models.ReqPostLog):
if req.message is not None:
shared.log.info(f'UI: {req.message}')
logger.log.info(f'UI: {req.message}')
if req.debug is not None:
shared.log.debug(f'UI: {req.debug}')
logger.log.debug(f'UI: {req.debug}')
if req.error is not None:
shared.log.error(f'UI: {req.error}')
logger.log.error(f'UI: {req.error}')
return {}
@ -132,8 +133,8 @@ def get_progress(req: models.ReqProgress = Depends()):
progress = min((current / total) if current > 0 and total > 0 else 0, 1)
time_since_start = time.time() - shared.state.time_start
eta_relative = (time_since_start / progress) - time_since_start if progress > 0 else 0
# shared.log.critical(f'get_progress: batch {batch_x}/{batch_y} step {step_x}/{step_y} current {current}/{total} time={time_since_start} eta={eta_relative}')
# shared.log.critical(shared.state)
# logger.log.critical(f'get_progress: batch {batch_x}/{batch_y} step {step_x}/{step_y} current {current}/{total} time={time_since_start} eta={eta_relative}')
# logger.log.critical(shared.state)
res = models.ResProgress(id=shared.state.id, progress=round(progress, 2), eta_relative=round(eta_relative, 2), current_image=current_image, textinfo=shared.state.textinfo, state=shared.state.dict(), )
return res

View File

@ -1,5 +1,5 @@
try:
from installer import log
from modules.logger import log
except Exception:
import logging
log = logging.getLogger(__name__)

View File

@ -1,6 +1,7 @@
from functools import wraps
import torch
from modules import rocm
from modules import logger
from modules.errors import log
from installer import install, installed
@ -194,7 +195,7 @@ def set_diffusers_attention(pipe, quiet:bool=False):
if 'Nunchaku' in pipe.unet.__class__.__name__:
pass
else:
shared.log.error(f'Torch attention: type="{name}" cls={attention.__class__.__name__} pipe={pipe.__class__.__name__} {e}')
logger.log.error(f'Torch attention: type="{name}" cls={attention.__class__.__name__} pipe={pipe.__class__.__name__} {e}')
""" # each transformer typically has its own attention processor
if getattr(pipe, "transformer", None) is not None and hasattr(pipe.transformer, "set_attn_processor"):
try:
@ -203,10 +204,10 @@ def set_diffusers_attention(pipe, quiet:bool=False):
if 'Nunchaku' in pipe.transformer.__class__.__name__:
pass
else:
shared.log.error(f'Torch attention: type="{name}" cls={attention.__class__.__name__} pipe={pipe.__class__.__name__} {e}')
logger.log.error(f'Torch attention: type="{name}" cls={attention.__class__.__name__} pipe={pipe.__class__.__name__} {e}')
"""
shared.log.quiet(quiet, f'Setting model: attention="{shared.opts.cross_attention_optimization}"')
logger.log.quiet(quiet, f'Setting model: attention="{shared.opts.cross_attention_optimization}"')
if shared.opts.cross_attention_optimization == "Disabled":
pass # do nothing
elif shared.opts.cross_attention_optimization == "Scaled-Dot-Product": # The default set by Diffusers
@ -216,7 +217,7 @@ def set_diffusers_attention(pipe, quiet:bool=False):
if hasattr(pipe, 'enable_xformers_memory_efficient_attention'):
pipe.enable_xformers_memory_efficient_attention()
else:
shared.log.warning(f"Attention: xFormers is not compatible with {pipe.__class__.__name__}")
logger.log.warning(f"Attention: xFormers is not compatible with {pipe.__class__.__name__}")
elif shared.opts.cross_attention_optimization == "Batch matrix-matrix":
set_attn(pipe, p.AttnProcessor(), name="Batch matrix-matrix")
elif shared.opts.cross_attention_optimization == "Dynamic Attention BMM":
@ -228,6 +229,6 @@ def set_diffusers_attention(pipe, quiet:bool=False):
pipe.enable_attention_slicing()
else:
pipe.disable_attention_slicing()
shared.log.debug(f"Torch attention: slicing={shared.opts.attention_slicing}")
logger.log.debug(f"Torch attention: slicing={shared.opts.attention_slicing}")
pipe.current_attn_name = shared.opts.cross_attention_optimization

View File

@ -1,6 +1,7 @@
import os
from installer import install
from modules import shared
from modules import logger
def apply_cache_dit(pipe):
@ -11,12 +12,12 @@ def apply_cache_dit(pipe):
try:
import cache_dit
except Exception as e:
shared.log.error(f'Cache-DIT: {e}')
logger.log.error(f'Cache-DIT: {e}')
return
_, supported = cache_dit.supported_pipelines()
supported = [s.replace('*', '') for s in supported]
if not any(pipe.__class__.__name__.startswith(s) for s in supported):
shared.log.error(f'Cache-DiT: pipeline={pipe.__class__.__name__} unsupported')
logger.log.error(f'Cache-DiT: pipeline={pipe.__class__.__name__} unsupported')
return
if getattr(pipe, 'has_cache_dit', False):
@ -38,7 +39,7 @@ def apply_cache_dit(pipe):
calibrator_config = cache_dit.FoCaCalibratorConfig()
else:
calibrator_config = None
shared.log.info(f'Apply Cache-DiT: config="{cache_config.strify()}" calibrator="{calibrator_config.strify() if calibrator_config else "None"}"')
logger.log.info(f'Apply Cache-DiT: config="{cache_config.strify()}" calibrator="{calibrator_config.strify() if calibrator_config else "None"}"')
try:
cache_dit.enable_cache(
pipe,
@ -47,7 +48,7 @@ def apply_cache_dit(pipe):
)
shared.sd_model.has_cache_dit = True
except Exception as e:
shared.log.error(f'Cache-DiT: {e}')
logger.log.error(f'Cache-DiT: {e}')
return
@ -57,7 +58,7 @@ def unapply_cache_dir(pipe):
try:
import cache_dit
# stats = cache_dit.summary(pipe)
# shared.log.critical(f'Unapply Cache-DiT: {stats}')
# logger.log.critical(f'Unapply Cache-DiT: {stats}')
cache_dit.disable_cache(pipe)
pipe.has_cache_dit = False
except Exception:

View File

@ -5,6 +5,7 @@ import threading
import time
import cProfile
from modules import shared, progress, errors, timer
from modules import logger
queue_lock = threading.Lock()
@ -14,7 +15,7 @@ debug = os.environ.get('SD_QUEUE_DEBUG', None) is not None
def get_lock():
if debug:
fn = f'{sys._getframe(3).f_code.co_name}:{sys._getframe(2).f_code.co_name}:{sys._getframe(1).f_code.co_name}' # pylint: disable=protected-access
errors.log.debug(f'Queue: fn={fn} lock={queue_lock.locked()}')
logger.log.debug(f'Queue: fn={fn} lock={queue_lock.locked()}')
return queue_lock
@ -41,8 +42,8 @@ def wrap_gradio_gpu_call(func, extra_outputs=None, name=None):
res = func(*args, **kwargs)
progress.record_results(id_task, res)
except Exception as e:
shared.log.error(f"Exception: {e}")
shared.log.error(f"Arguments: args={str(args)[:10240]} kwargs={str(kwargs)[:10240]}")
logger.log.error(f"Exception: {e}")
logger.log.error(f"Arguments: args={str(args)[:10240]} kwargs={str(kwargs)[:10240]}")
errors.display(e, 'gradio call')
res = extra_outputs or []
res.append(f"<div class='error'>{html.escape(str(e))}</div>")
@ -69,7 +70,7 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False, name=None):
res = func(*args, **kwargs)
if res is None:
msg = "No result returned from function"
shared.log.warning(msg)
logger.log.warning(msg)
res = extra_outputs_array or []
res.append(f"<div class='error'>{html.escape(msg)}</div>")
else:

View File

@ -1,6 +1,7 @@
import time
from PIL import Image
from modules import shared
from modules import logger
def caption(image):
@ -9,21 +10,21 @@ def caption(image):
if isinstance(image, dict) and 'name' in image:
image = Image.open(image['name'])
if image is None:
shared.log.error('Caption: no image provided')
logger.log.error('Caption: no image provided')
return ''
t0 = time.time()
if shared.opts.caption_default_type == 'OpenCLiP':
shared.log.info(f'Caption: type={shared.opts.caption_default_type} clip="{shared.opts.caption_openclip_model}" blip="{shared.opts.caption_openclip_blip_model}" mode="{shared.opts.caption_openclip_mode}"')
logger.log.info(f'Caption: type={shared.opts.caption_default_type} clip="{shared.opts.caption_openclip_model}" blip="{shared.opts.caption_openclip_blip_model}" mode="{shared.opts.caption_openclip_mode}"')
from modules.caption import openclip
openclip.load_captioner(clip_model=shared.opts.caption_openclip_model, blip_model=shared.opts.caption_openclip_blip_model)
openclip.update_caption_params()
prompt = openclip.caption(image, mode=shared.opts.caption_openclip_mode)
if shared.opts.caption_offload:
openclip.unload_clip_model()
shared.log.debug(f'Caption: time={time.time()-t0:.2f} answer="{prompt}"')
logger.log.debug(f'Caption: time={time.time()-t0:.2f} answer="{prompt}"')
return prompt
elif shared.opts.caption_default_type == 'Tagger':
shared.log.info(f'Caption: type={shared.opts.caption_default_type} model="{shared.opts.waifudiffusion_model}"')
logger.log.info(f'Caption: type={shared.opts.caption_default_type} model="{shared.opts.waifudiffusion_model}"')
from modules.caption import tagger
prompt = tagger.tag(
image=image,
@ -37,14 +38,14 @@ def caption(image):
use_spaces=shared.opts.tagger_use_spaces,
escape_brackets=shared.opts.tagger_escape_brackets,
)
shared.log.debug(f'Caption: time={time.time()-t0:.2f} answer="{prompt}"')
logger.log.debug(f'Caption: time={time.time()-t0:.2f} answer="{prompt}"')
return prompt
elif shared.opts.caption_default_type == 'VLM':
shared.log.info(f'Caption: type={shared.opts.caption_default_type} vlm="{shared.opts.caption_vlm_model}" prompt="{shared.opts.caption_vlm_prompt}"')
logger.log.info(f'Caption: type={shared.opts.caption_default_type} vlm="{shared.opts.caption_vlm_model}" prompt="{shared.opts.caption_vlm_prompt}"')
from modules.caption import vqa
prompt = vqa.caption(image=image, model_name=shared.opts.caption_vlm_model, question=shared.opts.caption_vlm_prompt, prompt=None, system_prompt=shared.opts.caption_vlm_system)
shared.log.debug(f'Caption: time={time.time()-t0:.2f} answer="{prompt}"')
logger.log.debug(f'Caption: time={time.time()-t0:.2f} answer="{prompt}"')
return prompt
else:
shared.log.error(f'Caption: type="{shared.opts.caption_default_type}" unknown')
logger.log.error(f'Caption: type="{shared.opts.caption_default_type}" unknown')
return ''

View File

@ -5,6 +5,7 @@ import torch
import numpy as np
from PIL import Image
from modules import modelloader, devices, shared, paths
from modules import logger
re_special = re.compile(r'([\\()])')
load_lock = threading.Lock()
@ -19,7 +20,7 @@ class DeepDanbooru:
if self.model is not None:
return
model_path = os.path.join(paths.models_path, "DeepDanbooru")
shared.log.debug(f'Caption load: module=DeepDanbooru folder="{model_path}"')
logger.log.debug(f'Caption load: module=DeepDanbooru folder="{model_path}"')
files = modelloader.load_models(
model_path=model_path,
model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt',
@ -139,14 +140,14 @@ def load_model(model_name: str = None) -> bool: # pylint: disable=unused-argumen
model.load()
return model.model is not None
except Exception as e:
shared.log.error(f'DeepBooru load: {e}')
logger.log.error(f'DeepBooru load: {e}')
return False
def unload_model():
"""Unload the DeepBooru model and free memory."""
if model.model is not None:
shared.log.debug('DeepBooru unload')
logger.log.debug('DeepBooru unload')
model.model.to(devices.cpu)
model.model = None
devices.torch_gc(force=True)
@ -166,14 +167,14 @@ def tag(image, **kwargs) -> str:
import time
t0 = time.time()
jobid = shared.state.begin('DeepBooru Tag')
shared.log.info(f'DeepBooru: image_size={image.size if image else None}')
logger.log.info(f'DeepBooru: image_size={image.size if image else None}')
try:
result = model.tag(image, **kwargs)
shared.log.debug(f'DeepBooru: complete time={time.time()-t0:.2f} tags={len(result.split(", ")) if result else 0}')
logger.log.debug(f'DeepBooru: complete time={time.time()-t0:.2f} tags={len(result.split(", ")) if result else 0}')
except Exception as e:
result = f"Exception {type(e)}"
shared.log.error(f'DeepBooru: {e}')
logger.log.error(f'DeepBooru: {e}')
shared.state.end(jobid)
return result
@ -264,18 +265,18 @@ def batch(
image_files = unique_files
if not image_files:
shared.log.warning('DeepBooru batch: no images found')
logger.log.warning('DeepBooru batch: no images found')
return ''
t0 = time.time()
jobid = shared.state.begin('DeepBooru Batch')
shared.log.info(f'DeepBooru batch: images={len(image_files)} write={save_output} append={save_append} recursive={recursive}')
logger.log.info(f'DeepBooru batch: images={len(image_files)} write={save_output} append={save_append} recursive={recursive}')
results = []
model.start()
# Progress bar
pbar = rp.Progress(rp.TextColumn('[cyan]DeepBooru:'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=shared.console)
pbar = rp.Progress(rp.TextColumn('[cyan]DeepBooru:'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=logger.console)
with pbar:
task = pbar.add_task(total=len(image_files), description='starting...')
@ -283,7 +284,7 @@ def batch(
pbar.update(task, advance=1, description=str(img_path.name))
try:
if shared.state.interrupted:
shared.log.info('DeepBooru batch: interrupted')
logger.log.info('DeepBooru batch: interrupted')
break
image = Image.open(img_path)
@ -296,12 +297,12 @@ def batch(
results.append(f'{img_path.name}: {tags_str[:100]}...' if len(tags_str) > 100 else f'{img_path.name}: {tags_str}')
except Exception as e:
shared.log.error(f'DeepBooru batch: file="{img_path}" error={e}')
logger.log.error(f'DeepBooru batch: file="{img_path}" error={e}')
results.append(f'{img_path.name}: ERROR - {e}')
model.stop()
elapsed = time.time() - t0
shared.log.info(f'DeepBooru batch: complete images={len(results)} time={elapsed:.1f}s')
logger.log.info(f'DeepBooru batch: complete images={len(results)} time={elapsed:.1f}s')
shared.state.end(jobid)
return '\n'.join(results)

View File

@ -13,6 +13,7 @@ import sys
import importlib
from transformers import AutoModelForCausalLM
from modules import shared, devices, paths, sd_models
from modules import logger
# model_path = "deepseek-ai/deepseek-vl2-small"
@ -32,11 +33,11 @@ def load(repo: str):
"""Load DeepSeek VL2 model (experimental)."""
global vl_gpt, vl_chat_processor, loaded_repo # pylint: disable=global-statement
if not shared.cmd_opts.experimental:
shared.log.error(f'Caption: type=vlm model="DeepSeek VL2" repo="{repo}" is experimental-only')
logger.log.error(f'Caption: type=vlm model="DeepSeek VL2" repo="{repo}" is experimental-only')
return False
folder = os.path.join(paths.script_path, 'repositories', 'deepseek-vl2')
if not os.path.exists(folder):
shared.log.error(f'Caption: type=vlm model="DeepSeek VL2" repo="{repo}" deepseek-vl2 repo not found')
logger.log.error(f'Caption: type=vlm model="DeepSeek VL2" repo="{repo}" deepseek-vl2 repo not found')
return False
if vl_gpt is None or loaded_repo != repo:
# GLOBAL PATCHES (not reverted): DeepSeek VL2 requires attrdict and uses LlamaFlashAttention2
@ -58,7 +59,7 @@ def load(repo: str):
vl_gpt.eval() # required: trust_remote_code model
loaded_repo = repo
devices.torch_gc()
shared.log.info(f'Caption: type=vlm model="DeepSeek VL2" repo="{repo}"')
logger.log.info(f'Caption: type=vlm model="DeepSeek VL2" repo="{repo}"')
sd_models.move_model(vl_gpt, devices.device)
return True
@ -67,14 +68,14 @@ def unload():
"""Release DeepSeek VL2 model from GPU/memory."""
global vl_gpt, vl_chat_processor, loaded_repo # pylint: disable=global-statement
if vl_gpt is not None:
shared.log.debug(f'DeepSeek unload: model="{loaded_repo}"')
logger.log.debug(f'DeepSeek unload: model="{loaded_repo}"')
sd_models.move_model(vl_gpt, devices.cpu, force=True)
vl_gpt = None
vl_chat_processor = None
loaded_repo = None
devices.torch_gc(force=True)
else:
shared.log.debug('DeepSeek unload: no model loaded')
logger.log.debug('DeepSeek unload: no model loaded')
def predict(question, image, repo):

View File

@ -3,6 +3,7 @@
from dataclasses import dataclass
from transformers import AutoProcessor, LlavaForConditionalGeneration
from modules import shared, devices, sd_models, model_quant
from modules import logger
"""
@ -63,7 +64,7 @@ def load(repo: str = None):
if llava_model is None or opts.repo != repo:
opts.repo = repo
llava_model = None
shared.log.info(f'Caption: type=vlm model="JoyCaption" {str(opts)}')
logger.log.info(f'Caption: type=vlm model="JoyCaption" {str(opts)}')
processor = AutoProcessor.from_pretrained(repo, max_pixels=1024*1024, cache_dir=shared.opts.hfcache_dir)
quant_args = model_quant.create_config(module='LLM')
llava_model = LlavaForConditionalGeneration.from_pretrained(
@ -80,13 +81,13 @@ def unload():
"""Release JoyCaption model from GPU/memory."""
global llava_model, processor # pylint: disable=global-statement
if llava_model is not None:
shared.log.debug(f'JoyCaption unload: model="{opts.repo}"')
logger.log.debug(f'JoyCaption unload: model="{opts.repo}"')
sd_models.move_model(llava_model, devices.cpu, force=True)
llava_model = None
processor = None
devices.torch_gc(force=True)
else:
shared.log.debug('JoyCaption unload: no model loaded')
logger.log.debug('JoyCaption unload: no model loaded')
def predict(question: str, image, vqa_model: str = None) -> str:

View File

@ -17,6 +17,7 @@ import einops
from einops.layers.torch import Rearrange
import huggingface_hub
from modules import shared, devices, sd_models
from modules import logger
from modules.image import convert
@ -1049,7 +1050,7 @@ def load():
model.eval() # required: custom loader, not from_pretrained
with open(os.path.join(folder, 'top_tags.txt'), encoding='utf8') as f:
tags = [line.strip() for line in f.readlines() if line.strip()]
shared.log.info(f'Caption: type=vlm model="JoyTag" repo="{MODEL_REPO}" tags={len(tags)}')
logger.log.info(f'Caption: type=vlm model="JoyTag" repo="{MODEL_REPO}" tags={len(tags)}')
sd_models.move_model(model, devices.device)
@ -1057,13 +1058,13 @@ def unload():
"""Release JoyTag model from GPU/memory."""
global model, tags # pylint: disable=global-statement
if model is not None:
shared.log.debug('JoyTag unload')
logger.log.debug('JoyTag unload')
sd_models.move_model(model, devices.cpu, force=True)
model = None
tags = None
devices.torch_gc(force=True)
else:
shared.log.debug('JoyTag unload: no model loaded')
logger.log.debug('JoyTag unload: no model loaded')
def predict(image: Image.Image):

View File

@ -9,6 +9,7 @@ import collections
import transformers
from PIL import Image
from modules import shared, devices, sd_models
from modules import logger
from modules.caption import vqa_detection
@ -17,7 +18,7 @@ debug_enabled = os.environ.get('SD_CAPTION_DEBUG', None) is not None
def debug(*args, **kwargs):
if debug_enabled:
shared.log.trace(*args, **kwargs)
logger.log.trace(*args, **kwargs)
# Global state
@ -47,7 +48,7 @@ def load_model(repo: str):
global moondream3_model, loaded # pylint: disable=global-statement
if moondream3_model is None or loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
moondream3_model = None
moondream3_model = transformers.AutoModelForCausalLM.from_pretrained(
@ -410,18 +411,18 @@ def clear_cache():
cache_size = len(image_cache)
image_cache.clear()
debug(f'VQA caption: handler=moondream3 cleared image cache cache_size_was={cache_size}')
shared.log.debug(f'Moondream3: Cleared image cache ({cache_size} entries)')
logger.log.debug(f'Moondream3: Cleared image cache ({cache_size} entries)')
def unload():
"""Release Moondream 3 model from GPU/memory."""
global moondream3_model, loaded # pylint: disable=global-statement
if moondream3_model is not None:
shared.log.debug(f'Moondream3 unload: model="{loaded}"')
logger.log.debug(f'Moondream3 unload: model="{loaded}"')
sd_models.move_model(moondream3_model, devices.cpu, force=True)
moondream3_model = None
loaded = None
clear_cache()
devices.torch_gc(force=True)
else:
shared.log.debug('Moondream3 unload: no model loaded')
logger.log.debug('Moondream3 unload: no model loaded')

View File

@ -6,10 +6,11 @@ import re
import gradio as gr
from PIL import Image
from modules import devices, shared, errors
from modules import logger
debug_enabled = os.environ.get('SD_CAPTION_DEBUG', None) is not None
debug_log = shared.log.trace if debug_enabled else lambda *args, **kwargs: None
debug_log = logger.log.trace if debug_enabled else lambda *args, **kwargs: None
# Per-request overrides for API calls
_clip_overrides = None
@ -107,7 +108,7 @@ def refresh_clip_models():
global clip_models # pylint: disable=global-statement
import open_clip
models = sorted(open_clip.list_pretrained())
shared.log.debug(f'Caption: pkg=openclip version={open_clip.__version__} models={len(models)}')
logger.log.debug(f'Caption: pkg=openclip version={open_clip.__version__} models={len(models)}')
clip_models = ['/'.join(x) for x in models]
return clip_models
@ -141,7 +142,7 @@ def load_captioner(clip_model, blip_model):
t0 = time.time()
device = devices.get_optimal_device()
cache_path = shared.opts.clip_models_path
shared.log.info(f'CLIP load: clip="{clip_model}" blip="{blip_model}" device={device}')
logger.log.info(f'CLIP load: clip="{clip_model}" blip="{blip_model}" device={device}')
debug_log(f'CLIP load: cache_path="{cache_path}" max_length={shared.opts.caption_openclip_max_length} chunk_size={shared.opts.caption_openclip_chunk_size} flavor_count={shared.opts.caption_openclip_flavor_count} offload={shared.opts.caption_offload}')
caption_model, caption_processor = _load_blip_model(blip_model, device)
captioner_config = clip_interrogator.Config(
@ -163,18 +164,18 @@ def load_captioner(clip_model, blip_model):
if blip_model.startswith('blip2-'):
_apply_blip2_fix(ci.caption_model, ci.caption_processor)
shared.log.debug(f'CLIP load: time={time.time()-t0:.2f}')
logger.log.debug(f'CLIP load: time={time.time()-t0:.2f}')
elif clip_model != ci.config.clip_model_name or blip_model != ci.config.caption_model_name:
t0 = time.time()
if clip_model != ci.config.clip_model_name:
shared.log.info(f'CLIP load: clip="{clip_model}" reloading')
logger.log.info(f'CLIP load: clip="{clip_model}" reloading')
debug_log(f'CLIP load: previous clip="{ci.config.clip_model_name}"')
ci.config.clip_model_name = clip_model
ci.config.clip_model = None
ci.load_clip_model()
ci.clip_offloaded = True # Reset flag so _prepare_clip() will move model to device
if blip_model != ci.config.caption_model_name:
shared.log.info(f'CLIP load: blip="{blip_model}" reloading')
logger.log.info(f'CLIP load: blip="{blip_model}" reloading')
debug_log(f'CLIP load: previous blip="{ci.config.caption_model_name}"')
ci.config.caption_model_name = blip_model
caption_model, caption_processor = _load_blip_model(blip_model, ci.device)
@ -183,14 +184,14 @@ def load_captioner(clip_model, blip_model):
ci.caption_offloaded = True # Reset flag so _prepare_caption() will move model to device
if blip_model.startswith('blip2-'):
_apply_blip2_fix(ci.caption_model, ci.caption_processor)
shared.log.debug(f'CLIP load: time={time.time()-t0:.2f}')
logger.log.debug(f'CLIP load: time={time.time()-t0:.2f}')
else:
debug_log(f'CLIP: models already loaded clip="{clip_model}" blip="{blip_model}"')
def unload_clip_model():
if ci is not None and shared.opts.caption_offload:
shared.log.debug('CLIP unload: offloading models to CPU')
logger.log.debug('CLIP unload: offloading models to CPU')
# Direct .to() instead of sd_models.move_model — models are from clip_interrogator, not transformers
if ci.caption_model is not None and hasattr(ci.caption_model, 'to'):
ci.caption_model.to(devices.cpu)
@ -237,7 +238,7 @@ def caption_image(image, clip_model, blip_model, mode, overrides=None):
global _clip_overrides # pylint: disable=global-statement
jobid = shared.state.begin('Caption CLiP')
t0 = time.time()
shared.log.info(f'CLIP: mode="{mode}" clip="{clip_model}" blip="{blip_model}" image_size={image.size if image else None}')
logger.log.info(f'CLIP: mode="{mode}" clip="{clip_model}" blip="{blip_model}" image_size={image.size if image else None}')
if overrides:
debug_log(f'CLIP: overrides={overrides}')
try:
@ -255,10 +256,10 @@ def caption_image(image, clip_model, blip_model, mode, overrides=None):
if shared.opts.caption_offload:
unload_clip_model()
devices.torch_gc()
shared.log.debug(f'CLIP: complete time={time.time()-t0:.2f}')
logger.log.debug(f'CLIP: complete time={time.time()-t0:.2f}')
except Exception as e:
prompt = f"Exception {type(e)}"
shared.log.error(f'CLIP: {e}')
logger.log.error(f'CLIP: {e}')
errors.display(e, 'Caption')
finally:
# Clear per-request overrides
@ -278,10 +279,10 @@ def caption_batch(batch_files, batch_folder, batch_str, clip_model, blip_model,
from modules.files_cache import list_files
files += list(list_files(batch_str, ext_filter=['.png', '.jpg', '.jpeg', '.webp', '.jxl'], recursive=recursive))
if len(files) == 0:
shared.log.warning('CLIP batch: no images found')
logger.log.warning('CLIP batch: no images found')
return ''
t0 = time.time()
shared.log.info(f'CLIP batch: mode="{mode}" images={len(files)} clip="{clip_model}" blip="{blip_model}" write={write} append={append}')
logger.log.info(f'CLIP batch: mode="{mode}" images={len(files)} clip="{clip_model}" blip="{blip_model}" write={write} append={append}')
debug_log(f'CLIP batch: recursive={recursive} files={files[:5]}{"..." if len(files) > 5 else ""}')
jobid = shared.state.begin('Caption batch')
prompts = []
@ -292,14 +293,14 @@ def caption_batch(batch_files, batch_folder, batch_str, clip_model, blip_model,
writer = BatchWriter(os.path.dirname(files[0]), mode=file_mode)
debug_log(f'CLIP batch: writing to "{os.path.dirname(files[0])}" mode="{file_mode}"')
import rich.progress as rp
pbar = rp.Progress(rp.TextColumn('[cyan]Caption:'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=shared.console)
pbar = rp.Progress(rp.TextColumn('[cyan]Caption:'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=logger.console)
with pbar:
task = pbar.add_task(total=len(files), description='starting...')
for file in files:
pbar.update(task, advance=1, description=file)
try:
if shared.state.interrupted:
shared.log.info('CLIP batch: interrupted')
logger.log.info('CLIP batch: interrupted')
break
image = Image.open(file).convert('RGB')
prompt = caption(image, mode)
@ -307,20 +308,20 @@ def caption_batch(batch_files, batch_folder, batch_str, clip_model, blip_model,
if write:
writer.add(file, prompt)
except OSError as e:
shared.log.error(f'CLIP batch: file="{file}" error={e}')
logger.log.error(f'CLIP batch: file="{file}" error={e}')
if write:
writer.close()
ci.config.quiet = False
unload_clip_model()
shared.state.end(jobid)
shared.log.info(f'CLIP batch: complete images={len(prompts)} time={time.time()-t0:.2f}')
logger.log.info(f'CLIP batch: complete images={len(prompts)} time={time.time()-t0:.2f}')
return '\n\n'.join(prompts)
def analyze_image(image, clip_model, blip_model):
t0 = time.time()
shared.log.info(f'CLIP analyze: clip="{clip_model}" blip="{blip_model}" image_size={image.size if image else None}')
logger.log.info(f'CLIP analyze: clip="{clip_model}" blip="{blip_model}" image_size={image.size if image else None}')
load_captioner(clip_model, blip_model)
image = image.convert('RGB')
image_features = ci.image_to_features(image)
@ -335,7 +336,7 @@ def analyze_image(image, clip_model, blip_model):
movement_ranks = dict(sorted(zip(top_movements, ci.similarities(image_features, top_movements), strict=False), key=lambda x: x[1], reverse=True))
trending_ranks = dict(sorted(zip(top_trendings, ci.similarities(image_features, top_trendings), strict=False), key=lambda x: x[1], reverse=True))
flavor_ranks = dict(sorted(zip(top_flavors, ci.similarities(image_features, top_flavors), strict=False), key=lambda x: x[1], reverse=True))
shared.log.debug(f'CLIP analyze: complete time={time.time()-t0:.2f}')
logger.log.debug(f'CLIP analyze: complete time={time.time()-t0:.2f}')
# Format labels as text
def format_category(name, ranks):

View File

@ -2,6 +2,7 @@
# Provides a common interface for the Booru Tags tab
from modules import shared
from modules import logger
DEEPBOORU_MODEL = "DeepBooru"
@ -27,7 +28,7 @@ def save_tags_to_file(img_path, tags_str: str, save_append: bool) -> bool:
f.write(tags_str)
return True
except Exception as e:
shared.log.error(f'Tagger batch: failed to save file="{img_path}" error={e}')
logger.log.error(f'Tagger batch: failed to save file="{img_path}" error={e}')
return False

View File

@ -9,6 +9,7 @@ import transformers
import transformers.dynamic_module_utils
from PIL import Image
from modules import shared, devices, errors, model_quant, sd_models, sd_models_compile, ui_symbols
from modules import logger
from modules.caption import vqa_detection
@ -17,7 +18,7 @@ debug_enabled = os.environ.get('SD_CAPTION_DEBUG', None) is not None
def debug(*args, **kwargs):
if debug_enabled:
shared.log.trace(*args, **kwargs)
logger.log.trace(*args, **kwargs)
vlm_default = "Alibaba Qwen 2.5 VL 3B"
vlm_models = {
@ -455,15 +456,15 @@ class VQA:
"""Release VLM model from GPU/memory, including external handlers."""
if self.model is not None:
model_name = self.loaded
shared.log.debug(f'VQA unload: unloading model="{model_name}"')
logger.log.debug(f'VQA unload: unloading model="{model_name}"')
sd_models.move_model(self.model, devices.cpu, force=True)
self.model = None
self.processor = None
self.loaded = None
devices.torch_gc(force=True, reason='vqa unload')
shared.log.debug(f'VQA unload: model="{model_name}" unloaded')
logger.log.debug(f'VQA unload: model="{model_name}" unloaded')
else:
shared.log.debug('VQA unload: no internal model loaded')
logger.log.debug('VQA unload: no internal model loaded')
# External handlers manage their own module-level globals and are not covered by self.model
from modules.caption import moondream3, joycaption, joytag, deepseek
moondream3.unload()
@ -475,14 +476,14 @@ class VQA:
"""Load VLM model into memory for the specified model name."""
model_name = model_name or shared.opts.caption_vlm_model
if not model_name:
shared.log.warning('VQA load: no model specified')
logger.log.warning('VQA load: no model specified')
return
repo = vlm_models.get(model_name)
if repo is None:
shared.log.error(f'VQA load: unknown model="{model_name}"')
logger.log.error(f'VQA load: unknown model="{model_name}"')
return
shared.log.debug(f'VQA load: pre-loading model="{model_name}" repo="{repo}"')
logger.log.debug(f'VQA load: pre-loading model="{model_name}" repo="{repo}"')
# Dispatch to appropriate loader (same logic as caption)
repo_lower = repo.lower()
@ -515,34 +516,34 @@ class VQA:
elif 'moondream3' in repo_lower:
from modules.caption import moondream3
moondream3.load_model(repo)
shared.log.info(f'VQA load: model="{model_name}" loaded (external handler)')
logger.log.info(f'VQA load: model="{model_name}" loaded (external handler)')
return
elif 'joytag' in repo_lower:
from modules.caption import joytag
joytag.load()
shared.log.info(f'VQA load: model="{model_name}" loaded (external handler)')
logger.log.info(f'VQA load: model="{model_name}" loaded (external handler)')
return
elif 'joycaption' in repo_lower:
from modules.caption import joycaption
joycaption.load(repo)
shared.log.info(f'VQA load: model="{model_name}" loaded (external handler)')
logger.log.info(f'VQA load: model="{model_name}" loaded (external handler)')
return
elif 'deepseek' in repo_lower:
from modules.caption import deepseek
deepseek.load(repo)
shared.log.info(f'VQA load: model="{model_name}" loaded (external handler)')
logger.log.info(f'VQA load: model="{model_name}" loaded (external handler)')
return
else:
shared.log.warning(f'VQA load: no pre-loader for model="{model_name}"')
logger.log.warning(f'VQA load: no pre-loader for model="{model_name}"')
return
sd_models.move_model(self.model, devices.device)
shared.log.info(f'VQA load: model="{model_name}" loaded')
logger.log.info(f'VQA load: model="{model_name}" loaded')
def _load_fastvlm(self, repo: str):
"""Load FastVLM model and tokenizer."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
quant_args = model_quant.create_config(module='LLM')
self.model = None
self.processor = transformers.AutoTokenizer.from_pretrained(repo, trust_remote_code=True, cache_dir=shared.opts.hfcache_dir)
@ -596,7 +597,7 @@ class VQA:
def _load_qwen(self, repo: str):
"""Load Qwen VL model and processor."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
if 'Qwen3-VL' in repo or 'Qwen3VL' in repo:
cls_name = transformers.Qwen3VLForConditionalGeneration
@ -720,7 +721,7 @@ class VQA:
def _load_gemma(self, repo: str):
"""Load Gemma 3 model and processor."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
if '3n' in repo:
cls = transformers.Gemma3nForConditionalGeneration # pylint: disable=no-member
@ -834,7 +835,7 @@ class VQA:
def _load_paligemma(self, repo: str):
"""Load PaliGemma model and processor."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.processor = transformers.PaliGemmaProcessor.from_pretrained(repo, cache_dir=shared.opts.hfcache_dir)
self.model = None
self.model = transformers.PaliGemmaForConditionalGeneration.from_pretrained(
@ -864,7 +865,7 @@ class VQA:
def _load_ovis(self, repo: str):
"""Load Ovis model (requires flash-attn)."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
# Ovis remote code calls AutoConfig.register("aimv2", ...) at module scope
# without exist_ok=True, which fails on reload or when the type is already
@ -889,7 +890,7 @@ class VQA:
try:
pass # pylint: disable=unused-import
except Exception:
shared.log.error(f'Caption: vlm="{repo}" flash-attn is not available')
logger.log.error(f'Caption: vlm="{repo}" flash-attn is not available')
return ''
self._load_ovis(repo)
sd_models.move_model(self.model, devices.device)
@ -921,7 +922,7 @@ class VQA:
def _load_smol(self, repo: str):
"""Load SmolVLM model and processor."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
quant_args = model_quant.create_config(module='LLM')
self.model = transformers.AutoModelForVision2Seq.from_pretrained(
@ -1016,7 +1017,7 @@ class VQA:
def _load_git(self, repo: str):
"""Load Microsoft GIT model and processor."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
self.model = transformers.GitForCausalLM.from_pretrained(
repo,
@ -1047,7 +1048,7 @@ class VQA:
def _load_blip(self, repo: str):
"""Load Salesforce BLIP model and processor."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
self.model = transformers.BlipForQuestionAnswering.from_pretrained(
repo,
@ -1072,7 +1073,7 @@ class VQA:
def _load_vilt(self, repo: str):
"""Load ViLT model and processor."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
self.model = transformers.ViltForQuestionAnswering.from_pretrained(
repo,
@ -1099,7 +1100,7 @@ class VQA:
def _load_pix(self, repo: str):
"""Load Pix2Struct model and processor."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
self.model = transformers.Pix2StructForConditionalGeneration.from_pretrained(
repo,
@ -1127,7 +1128,7 @@ class VQA:
def _load_moondream(self, repo: str):
"""Load Moondream 2 model and tokenizer."""
if self.model is None or self.loaded != repo:
shared.log.debug(f'Caption load: vlm="{repo}"')
logger.log.debug(f'Caption load: vlm="{repo}"')
self.model = None
self.model = transformers.AutoModelForCausalLM.from_pretrained(
repo,
@ -1225,7 +1226,7 @@ class VQA:
effective_revision = revision_from_repo
if self.model is None or self.loaded != cache_key:
shared.log.debug(f'Caption load: vlm="{repo_name}" revision="{effective_revision}" path="{shared.opts.hfcache_dir}"')
logger.log.debug(f'Caption load: vlm="{repo_name}" revision="{effective_revision}" path="{shared.opts.hfcache_dir}"')
transformers.dynamic_module_utils.get_imports = get_imports
self.model = None
quant_args = model_quant.create_config(module='LLM')
@ -1354,7 +1355,7 @@ class VQA:
if image.mode != 'RGB':
image = image.convert('RGB')
if image is None:
shared.log.error(f'VQA caption: model="{model_name}" error="No input image provided"')
logger.log.error(f'VQA caption: model="{model_name}" error="No input image provided"')
self._generation_overrides = None
shared.state.end(jobid)
return 'Error: No input image provided. Please upload or select an image.'
@ -1363,7 +1364,7 @@ class VQA:
if question == "Use Prompt":
# Use content from Prompt field directly - requires user input
if not prompt or len(prompt.strip()) < 2:
shared.log.error(f'VQA caption: model="{model_name}" error="Please enter a prompt"')
logger.log.error(f'VQA caption: model="{model_name}" error="Please enter a prompt"')
self._generation_overrides = None
shared.state.end(jobid)
return 'Error: Please enter a question or instruction in the Prompt field.'
@ -1374,7 +1375,7 @@ class VQA:
if raw_mapping in ("POINT_MODE", "DETECT_MODE"):
# These modes require user input in the prompt field
if not prompt or len(prompt.strip()) < 2:
shared.log.error(f'VQA caption: model="{model_name}" error="Please specify what to find in the prompt field"')
logger.log.error(f'VQA caption: model="{model_name}" error="Please specify what to find in the prompt field"')
self._generation_overrides = None
shared.state.end(jobid)
return 'Error: Please specify what to find in the prompt field (e.g., "the red car" or "faces").'
@ -1387,12 +1388,12 @@ class VQA:
try:
if model_name is None:
shared.log.error(f'Caption: type=vlm model="{model_name}" no model selected')
logger.log.error(f'Caption: type=vlm model="{model_name}" no model selected')
shared.state.end(jobid)
return ''
vqa_model = vlm_models.get(model_name, None)
if vqa_model is None:
shared.log.error(f'Caption: type=vlm model="{model_name}" unknown')
logger.log.error(f'Caption: type=vlm model="{model_name}" unknown')
shared.state.end(jobid)
return ''
@ -1484,7 +1485,7 @@ class VQA:
debug(f'VQA caption: handler={handler} response_after_clean="{answer}" has_annotation={self.last_annotated_image is not None}')
t1 = time.time()
if not quiet:
shared.log.debug(f'Caption: type=vlm model="{model_name}" repo="{vqa_model}" args={get_kwargs()} time={t1-t0:.2f}')
logger.log.debug(f'Caption: type=vlm model="{model_name}" repo="{vqa_model}" args={get_kwargs()} time={t1-t0:.2f}')
self._generation_overrides = None # Clear per-request overrides
shared.state.end(jobid)
return answer
@ -1518,7 +1519,7 @@ class VQA:
from modules.files_cache import list_files
files += list(list_files(batch_str, ext_filter=['.png', '.jpg', '.jpeg', '.webp', '.jxl'], recursive=recursive))
if len(files) == 0:
shared.log.warning('Caption batch: type=vlm no images')
logger.log.warning('Caption batch: type=vlm no images')
return ''
jobid = shared.state.begin('Caption batch')
prompts = []
@ -1529,7 +1530,7 @@ class VQA:
shared.opts.caption_offload = False
try:
import rich.progress as rp
pbar = rp.Progress(rp.TextColumn('[cyan]Caption:'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=shared.console)
pbar = rp.Progress(rp.TextColumn('[cyan]Caption:'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=logger.console)
with pbar:
task = pbar.add_task(total=len(files), description='starting...')
for file in files:
@ -1547,7 +1548,7 @@ class VQA:
if write:
writer.add(file, result)
except Exception as e:
shared.log.error(f'Caption batch: {e}')
logger.log.error(f'Caption batch: {e}')
if write:
writer.close()
finally:

View File

@ -8,11 +8,12 @@ import threading
import numpy as np
from PIL import Image
from modules import shared, devices, errors
from modules import logger
# Debug logging - enable with SD_CAPTION_DEBUG environment variable
debug_enabled = os.environ.get('SD_CAPTION_DEBUG', None) is not None
debug_log = shared.log.trace if debug_enabled else lambda *args, **kwargs: None
debug_log = logger.log.trace if debug_enabled else lambda *args, **kwargs: None
re_special = re.compile(r'([\\()])')
load_lock = threading.Lock()
@ -56,7 +57,7 @@ class WaifuDiffusionTagger:
if model_name is None:
model_name = shared.opts.waifudiffusion_model
if model_name not in WAIFUDIFFUSION_MODELS:
shared.log.error(f'WaifuDiffusion: unknown model "{model_name}"')
logger.log.error(f'WaifuDiffusion: unknown model "{model_name}"')
return False
with load_lock:
@ -71,7 +72,7 @@ class WaifuDiffusionTagger:
repo_id = WAIFUDIFFUSION_MODELS[model_name]
t0 = time.time()
shared.log.info(f'WaifuDiffusion load: model="{model_name}" repo="{repo_id}"')
logger.log.info(f'WaifuDiffusion load: model="{model_name}" repo="{repo_id}"')
try:
# Download only ONNX model and tags CSV (skip safetensors/msgpack variants)
@ -86,7 +87,7 @@ class WaifuDiffusionTagger:
# Load ONNX model
model_file = os.path.join(self.model_path, "model.onnx")
if not os.path.exists(model_file):
shared.log.error(f'WaifuDiffusion load: model file not found: {model_file}')
logger.log.error(f'WaifuDiffusion load: model file not found: {model_file}')
return False
import onnxruntime as ort
@ -104,12 +105,12 @@ class WaifuDiffusionTagger:
self._load_tags()
load_time = time.time() - t0
shared.log.debug(f'WaifuDiffusion load: time={load_time:.2f} tags={len(self.tags)}')
logger.log.debug(f'WaifuDiffusion load: time={load_time:.2f} tags={len(self.tags)}')
debug_log(f'WaifuDiffusion load: input_name={self.session.get_inputs()[0].name} output_name={self.session.get_outputs()[0].name}')
return True
except Exception as e:
shared.log.error(f'WaifuDiffusion load: failed error={e}')
logger.log.error(f'WaifuDiffusion load: failed error={e}')
errors.display(e, 'WaifuDiffusion load')
self.unload()
return False
@ -120,7 +121,7 @@ class WaifuDiffusionTagger:
csv_path = os.path.join(self.model_path, "selected_tags.csv")
if not os.path.exists(csv_path):
shared.log.error(f'WaifuDiffusion load: tags file not found: {csv_path}')
logger.log.error(f'WaifuDiffusion load: tags file not found: {csv_path}')
return
self.tags = []
@ -141,7 +142,7 @@ class WaifuDiffusionTagger:
def unload(self):
"""Unload the model and free resources."""
if self.session is not None:
shared.log.debug(f'WaifuDiffusion unload: model="{self.model_name}"')
logger.log.debug(f'WaifuDiffusion unload: model="{self.model_name}"')
self.session = None
self.tags = None
self.tag_categories = None
@ -240,7 +241,7 @@ class WaifuDiffusionTagger:
if isinstance(image, dict) and 'name' in image:
image = Image.open(image['name'])
if image is None:
shared.log.error('WaifuDiffusion predict: no image provided')
logger.log.error('WaifuDiffusion predict: no image provided')
return ''
# Load model if needed
@ -374,19 +375,19 @@ def tag(image: Image.Image, model_name: str = None, **kwargs) -> str:
"""
t0 = time.time()
jobid = shared.state.begin('WaifuDiffusion Tag')
shared.log.info(f'WaifuDiffusion: model="{model_name or tagger.model_name or shared.opts.waifudiffusion_model}" image_size={image.size if image else None}')
logger.log.info(f'WaifuDiffusion: model="{model_name or tagger.model_name or shared.opts.waifudiffusion_model}" image_size={image.size if image else None}')
try:
if model_name and model_name != tagger.model_name:
tagger.load(model_name)
result = tagger.predict(image, **kwargs)
shared.log.debug(f'WaifuDiffusion: complete time={time.time()-t0:.2f} tags={len(result.split(", ")) if result else 0}')
logger.log.debug(f'WaifuDiffusion: complete time={time.time()-t0:.2f} tags={len(result.split(", ")) if result else 0}')
# Offload model if setting enabled
if shared.opts.caption_offload:
tagger.unload()
except Exception as e:
result = f"Exception {type(e)}"
shared.log.error(f'WaifuDiffusion: {e}')
logger.log.error(f'WaifuDiffusion: {e}')
errors.display(e, 'WaifuDiffusion Tag')
shared.state.end(jobid)
@ -479,19 +480,19 @@ def batch(
image_files = unique_files
if not image_files:
shared.log.warning('WaifuDiffusion batch: no images found')
logger.log.warning('WaifuDiffusion batch: no images found')
return ''
t0 = time.time()
jobid = shared.state.begin('WaifuDiffusion Batch')
shared.log.info(f'WaifuDiffusion batch: model="{tagger.model_name}" images={len(image_files)} write={save_output} append={save_append} recursive={recursive}')
logger.log.info(f'WaifuDiffusion batch: model="{tagger.model_name}" images={len(image_files)} write={save_output} append={save_append} recursive={recursive}')
debug_log(f'WaifuDiffusion batch: files={[str(f) for f in image_files[:5]]}{"..." if len(image_files) > 5 else ""}')
results = []
# Progress bar
import rich.progress as rp
pbar = rp.Progress(rp.TextColumn('[cyan]WaifuDiffusion:'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=shared.console)
pbar = rp.Progress(rp.TextColumn('[cyan]WaifuDiffusion:'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=logger.console)
with pbar:
task = pbar.add_task(total=len(image_files), description='starting...')
@ -499,7 +500,7 @@ def batch(
pbar.update(task, advance=1, description=str(img_path.name))
try:
if shared.state.interrupted:
shared.log.info('WaifuDiffusion batch: interrupted')
logger.log.info('WaifuDiffusion batch: interrupted')
break
image = Image.open(img_path)
@ -512,11 +513,11 @@ def batch(
results.append(f'{img_path.name}: {tags_str[:100]}...' if len(tags_str) > 100 else f'{img_path.name}: {tags_str}')
except Exception as e:
shared.log.error(f'WaifuDiffusion batch: file="{img_path}" error={e}')
logger.log.error(f'WaifuDiffusion batch: file="{img_path}" error={e}')
results.append(f'{img_path.name}: ERROR - {e}')
elapsed = time.time() - t0
shared.log.info(f'WaifuDiffusion batch: complete images={len(results)} time={elapsed:.1f}s')
logger.log.info(f'WaifuDiffusion batch: complete images={len(results)} time={elapsed:.1f}s')
shared.state.end(jobid)
return '\n'.join(results)

View File

@ -1,6 +1,7 @@
# reference: <https://github.com/WeichenFan/CFG-Zero-star>
from modules import shared, processing, sd_models
from modules import logger
orig_pipeline = None
@ -48,7 +49,7 @@ def apply(p: processing.StableDiffusionProcessing):
from modules.cfgzero.hunyuan_t2v_pipeline import HunyuanVideoCFGZeroPipeline
shared.sd_model = sd_models.switch_pipe(HunyuanVideoCFGZeroPipeline, shared.sd_model)
shared.log.debug(f'Apply CFGZero: cls={cls} init={shared.opts.cfgzero_enabled} star={shared.opts.cfgzero_star} steps={shared.opts.cfgzero_steps}')
logger.log.debug(f'Apply CFGZero: cls={cls} init={shared.opts.cfgzero_enabled} star={shared.opts.cfgzero_star} steps={shared.opts.cfgzero_steps}')
p.task_args['use_zero_init'] = shared.opts.cfgzero_enabled
p.task_args['use_cfg_zero_star'] = shared.opts.cfgzero_star
p.task_args['zero_steps'] = int(shared.opts.cfgzero_steps)

View File

@ -3,6 +3,7 @@ import json
import rich.progress as p
from PIL import Image
from modules import shared, errors, paths
from modules import logger
pbar = None
@ -13,15 +14,15 @@ def save_video_frame(filepath: str):
try:
frames, fps, duration, w, h, codec, frame = video.get_video_params(filepath, capture=True)
except Exception as e:
shared.log.error(f'Video: file={filepath} {e}')
logger.log.error(f'Video: file={filepath} {e}')
return None
if frame is not None:
basename = os.path.splitext(filepath)
thumb = f'{basename[0]}.thumb.jpg'
shared.log.debug(f'Video: file={filepath} frames={frames} fps={fps} size={w}x{h} codec={codec} duration={duration} thumb={thumb}')
logger.log.debug(f'Video: file={filepath} frames={frames} fps={fps} size={w}x{h} codec={codec} duration={duration} thumb={thumb}')
frame.save(thumb)
else:
shared.log.error(f'Video: file={filepath} no frames found')
logger.log.error(f'Video: file={filepath} no frames found')
return frame
@ -33,11 +34,11 @@ def download_civit_meta(model_path: str, model_id):
try:
data = r.json()
shared.writefile(data, filename=fn, mode='w', silent=True)
shared.log.info(f'CivitAI download: id={model_id} url={url} file="{fn}"')
logger.log.info(f'CivitAI download: id={model_id} url={url} file="{fn}"')
return r.status_code, len(data), '' # code/size/note
except Exception as e:
errors.display(e, 'civitai meta')
shared.log.error(f'CivitAI meta: id={model_id} url={url} file="{fn}" {e}')
logger.log.error(f'CivitAI meta: id={model_id} url={url} file="{fn}" {e}')
return r.status_code, '', str(e)
return r.status_code, '', ''
@ -52,7 +53,7 @@ def download_civit_preview(model_path: str, preview_url: str):
is_video = preview_file.lower().endswith('.mp4')
is_json = preview_file.lower().endswith('.json')
if is_json:
shared.log.warning(f'CivitAI download: url="{preview_url}" skip json')
logger.log.warning(f'CivitAI download: url="{preview_url}" skip json')
return 500, '', 'exepected preview image got json'
if os.path.exists(preview_file):
return 304, '', 'already exists'
@ -64,7 +65,7 @@ def download_civit_preview(model_path: str, preview_url: str):
img = None
jobid = shared.state.begin('Download CivitAI')
if pbar is None:
pbar = p.Progress(p.TextColumn('[cyan]Download'), p.DownloadColumn(), p.BarColumn(), p.TaskProgressColumn(), p.TimeRemainingColumn(), p.TimeElapsedColumn(), p.TransferSpeedColumn(), p.TextColumn('[yellow]{task.description}'), console=shared.console)
pbar = p.Progress(p.TextColumn('[cyan]Download'), p.DownloadColumn(), p.BarColumn(), p.TaskProgressColumn(), p.TimeRemainingColumn(), p.TimeElapsedColumn(), p.TransferSpeedColumn(), p.TextColumn('[yellow]{task.description}'), console=logger.console)
try:
with open(preview_file, 'wb') as f:
with pbar:
@ -81,13 +82,13 @@ def download_civit_preview(model_path: str, preview_url: str):
else:
img = Image.open(preview_file)
except Exception as e:
shared.log.error(f'CivitAI download error: url={preview_url} file="{preview_file}" written={written} {e}')
logger.log.error(f'CivitAI download error: url={preview_url} file="{preview_file}" written={written} {e}')
shared.state.end(jobid)
return 500, '', str(e)
shared.state.end(jobid)
if img is None:
return 500, '', 'image is none'
shared.log.info(f'CivitAI download: url={preview_url} file="{preview_file}" size={total_size} image={img.size}')
logger.log.info(f'CivitAI download: url={preview_url} file="{preview_file}" size={total_size} image={img.size}')
img.close()
return 200, str(total_size), '' # code/size/note
@ -135,17 +136,17 @@ def download_civit_model_thread(model_name: str, model_url: str, model_path: str
res = f'Model download: name="{model_name}" url="{model_url}" path="{model_path}" temp="{temp_file}"'
if os.path.isfile(model_file):
res += ' already exists'
shared.log.warning(res)
logger.log.warning(res)
return res
res += f' size={round((starting_pos + total_size)/1024/1024, 2)}Mb'
shared.log.info(res)
logger.log.info(res)
jobid = shared.state.begin('Download CivitAI')
block_size = 16384 # 16KB blocks
written = starting_pos
global pbar # pylint: disable=global-statement
if pbar is None:
pbar = p.Progress(p.TextColumn('[cyan]{task.description}'), p.DownloadColumn(), p.BarColumn(), p.TaskProgressColumn(), p.TimeRemainingColumn(), p.TimeElapsedColumn(), p.TransferSpeedColumn(), p.TextColumn('[cyan]{task.fields[name]}'), console=shared.console)
pbar = p.Progress(p.TextColumn('[cyan]{task.description}'), p.DownloadColumn(), p.BarColumn(), p.TaskProgressColumn(), p.TimeRemainingColumn(), p.TimeElapsedColumn(), p.TransferSpeedColumn(), p.TextColumn('[cyan]{task.fields[name]}'), console=logger.console)
with pbar:
task = pbar.add_task(description="Download starting", total=starting_pos+total_size, name=model_name)
try:
@ -153,7 +154,7 @@ def download_civit_model_thread(model_name: str, model_url: str, model_path: str
for data in r.iter_content(block_size):
if written == 0:
try: # check if response is JSON message instead of bytes
shared.log.error(f'Model download: response={json.loads(data.decode("utf-8"))}')
logger.log.error(f'Model download: response={json.loads(data.decode("utf-8"))}')
raise ValueError('response: type=json expected=bytes')
except Exception: # this is good
pass
@ -164,14 +165,14 @@ def download_civit_model_thread(model_name: str, model_url: str, model_path: str
os.remove(temp_file)
raise ValueError(f'removed invalid download: bytes={written}')
except Exception as e:
shared.log.error(f'{res} {e}')
logger.log.error(f'{res} {e}')
finally:
pbar.stop_task(task)
pbar.remove_task(task)
if starting_pos+total_size != written:
shared.log.warning(f'{res} written={round(written/1024/1024)}Mb incomplete download')
logger.log.warning(f'{res} written={round(written/1024/1024)}Mb incomplete download')
elif os.path.exists(temp_file):
shared.log.debug(f'Model download complete: temp="{temp_file}" path="{model_file}"')
logger.log.debug(f'Model download complete: temp="{temp_file}" path="{model_file}"')
os.rename(temp_file, model_file)
shared.state.end(jobid)
if os.path.exists(model_file):
@ -183,7 +184,7 @@ def download_civit_model_thread(model_name: str, model_url: str, model_path: str
def download_civit_model(model_url: str, model_name: str = '', model_path: str = '', model_type: str = '', token: str = None):
import threading
if model_url is None or len(model_url) == 0:
shared.log.error('Model download: no url provided')
logger.log.error('Model download: no url provided')
return
thread = threading.Thread(target=download_civit_model_thread, args=(model_name, model_url, model_path, model_type, token))
thread.start()

View File

@ -2,7 +2,8 @@ from dataclasses import dataclass
import os
import json
import time
from installer import install, log
from installer import install
from modules.logger import log
full_dct = False

View File

@ -10,7 +10,8 @@ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import cv2
import numpy as np
from PIL import Image
from installer import installed, pip, log
from installer import installed, pip
from modules.logger import log
from modules.control.util import HWC3, resize_image
from .draw import draw_bodypose, draw_handpose, draw_facepose
checked_ok = False

View File

@ -9,7 +9,8 @@ checked_ok = False
def check_dependencies():
global checked_ok # pylint: disable=global-statement
from installer import installed, install, log
from installer import installed, install
from modules.logger import log
packages = [('mediapipe', 'mediapipe')]
for pkg in packages:
if not installed(pkg[1], reload=True, quiet=True):

View File

@ -5,13 +5,14 @@ import numpy as np
from PIL import Image
from modules.processing_class import StableDiffusionProcessingControl
from modules import shared, images, masking, sd_models
from modules import logger
from modules.timer import process as process_timer
from modules.control import util
from modules.control import processors as control_processors
debug = os.environ.get('SD_CONTROL_DEBUG', None) is not None
debug_log = shared.log.trace if debug else lambda *args, **kwargs: None
debug_log = logger.log.trace if debug else lambda *args, **kwargs: None
processors = [
'None',
'OpenPose',
@ -137,7 +138,7 @@ def preprocess_image(
except Exception:
pass
if any(img is None for img in processed_images):
shared.log.error('Control: one or more processed images are None')
logger.log.error('Control: one or more processed images are None')
processed_images = [img for img in processed_images if img is not None]
if len(processed_images) > 1 and len(active_process) != len(active_model):
processed_image = [np.array(i) for i in processed_images]
@ -155,7 +156,7 @@ def preprocess_image(
debug_log(f'Control: inputs match: input={len(processed_images)} models={len(selected_models)}')
p.init_images = processed_images
elif isinstance(selected_models, list) and len(processed_images) != len(selected_models):
shared.log.error(f'Control: number of inputs does not match: input={len(processed_images)} models={len(selected_models)}')
logger.log.error(f'Control: number of inputs does not match: input={len(processed_images)} models={len(selected_models)}')
elif selected_models is not None:
p.init_images = processed_image
else:
@ -170,7 +171,7 @@ def preprocess_image(
p.task_args['ref_image'] = p.ref_image
debug_log(f'Control: process=None image={p.ref_image}')
if p.ref_image is None:
shared.log.error('Control: reference mode without image')
logger.log.error('Control: reference mode without image')
elif unit_type == 'controlnet' and has_models:
if input_type == 0: # Control only
if 'control_image' in possible:
@ -198,7 +199,7 @@ def preprocess_image(
p.task_args['strength'] = p.denoising_strength
elif input_type == 2: # Separate init image
if init_image is None:
shared.log.warning('Control: separate init image not provided')
logger.log.warning('Control: separate init image not provided')
init_image = input_image
if 'inpaint_image' in possible: # flex
p.task_args['inpaint_image'] = p.init_images[0] if isinstance(p.init_images, list) else p.init_images
@ -254,7 +255,7 @@ def preprocess_image(
p.init_images = [input_image]
elif input_type == 2:
if init_image is None:
shared.log.warning('Control: separate init image not provided')
logger.log.warning('Control: separate init image not provided')
init_image = input_image
p.init_images = [init_image]

View File

@ -2,7 +2,7 @@ import os
import time
import numpy as np
from PIL import Image
from installer import log
from modules.logger import log
from modules.errors import display
from modules import devices, images

View File

@ -13,6 +13,7 @@ from modules.control.units import t2iadapter # TencentARC T2I-Adapter
from modules.control.units import reference # ControlNet-Reference
from modules.control.processor import preprocess_image
from modules import devices, shared, errors, processing, images, sd_models, sd_vae, scripts_manager, masking
from modules import logger
from modules.processing_class import StableDiffusionProcessingControl
from modules.ui_common import infotext_to_html
from modules.api import script
@ -21,7 +22,7 @@ from modules.paths import resolve_output_path
debug = os.environ.get('SD_CONTROL_DEBUG', None) is not None
debug_log = shared.log.trace if debug else lambda *args, **kwargs: None
debug_log = logger.log.trace if debug else lambda *args, **kwargs: None
pipe = None
instance = None
original_pipeline = None
@ -36,7 +37,7 @@ def restore_pipeline():
if (original_pipeline is not None) and (original_pipeline.__class__.__name__ != shared.sd_model.__class__.__name__):
if debug:
fn = f'{sys._getframe(2).f_code.co_name}:{sys._getframe(1).f_code.co_name}' # pylint: disable=protected-access
shared.log.trace(f'Control restored pipeline: class={shared.sd_model.__class__.__name__} to={original_pipeline.__class__.__name__} fn={fn}')
logger.log.trace(f'Control restored pipeline: class={shared.sd_model.__class__.__name__} to={original_pipeline.__class__.__name__} fn={fn}')
shared.sd_model = original_pipeline
pipe = None
instance = None
@ -45,7 +46,7 @@ def restore_pipeline():
def terminate(msg):
restore_pipeline()
shared.log.error(f'Control terminated: {msg}')
logger.log.error(f'Control terminated: {msg}')
return msg
@ -67,7 +68,7 @@ def set_pipe(p, has_models, unit_type, selected_models, active_model, active_str
pipe = None
if has_models and not has_inputs(inits) and not has_inputs(inputs):
if not any(has_inputs(u.override) for u in active_units if u.enabled): # check overrides
shared.log.error('Control: no input images')
logger.log.error('Control: no input images')
return pipe
if has_models:
p.ops.append('control')
@ -85,7 +86,7 @@ def set_pipe(p, has_models, unit_type, selected_models, active_model, active_str
instance = t2iadapter.AdapterPipeline(selected_models, shared.sd_model)
pipe = instance.pipeline
if inits is not None:
shared.log.warning('Control: T2I-Adapter does not support separate init image')
logger.log.warning('Control: T2I-Adapter does not support separate init image')
elif unit_type == 'controlnet' and has_models:
p.extra_generation_params["Control type"] = 'ControlNet'
if shared.sd_model_type == 'f1':
@ -108,14 +109,14 @@ def set_pipe(p, has_models, unit_type, selected_models, active_model, active_str
instance = xs.ControlNetXSPipeline(selected_models, shared.sd_model)
pipe = instance.pipeline
if inits is not None:
shared.log.warning('Control: ControlNet-XS does not support separate init image')
logger.log.warning('Control: ControlNet-XS does not support separate init image')
elif unit_type == 'lite' and has_models:
p.extra_generation_params["Control type"] = 'ControlLLLite'
p.controlnet_conditioning_scale = control_conditioning
instance = lite.ControlLLitePipeline(shared.sd_model)
pipe = instance.pipeline
if inits is not None:
shared.log.warning('Control: ControlLLLite does not support separate init image')
logger.log.warning('Control: ControlLLLite does not support separate init image')
elif unit_type == 'reference' and has_models:
p.extra_generation_params["Control type"] = 'Reference'
p.extra_generation_params["Control attention"] = p.attention
@ -127,7 +128,7 @@ def set_pipe(p, has_models, unit_type, selected_models, active_model, active_str
instance = reference.ReferencePipeline(shared.sd_model)
pipe = instance.pipeline
if inits is not None:
shared.log.warning('Control: ControlNet-XS does not support separate init image')
logger.log.warning('Control: ControlNet-XS does not support separate init image')
else: # run in txt2img/img2img mode
if len(active_strength) > 0:
p.strength = active_strength[0]
@ -166,7 +167,7 @@ def check_active(p, unit_type, units):
active_strength.append(float(u.strength))
p.adapter_conditioning_factor = u.factor
active_units.append(u)
shared.log.debug(f'Control T2I-Adapter unit: i={num_units} process="{u.process.processor_id}" model="{u.adapter.model_id}" strength={u.strength} factor={u.factor}')
logger.log.debug(f'Control T2I-Adapter unit: i={num_units} process="{u.process.processor_id}" model="{u.adapter.model_id}" strength={u.strength} factor={u.factor}')
elif unit_type == 'controlnet' and (u.controlnet.model is not None or is_unified_model()):
active_process.append(u.process)
active_model.append(u.controlnet)
@ -182,7 +183,7 @@ def check_active(p, unit_type, units):
p.is_tile = p.is_tile or 'tile' in u.mode.lower()
p.control_tile = u.tile
p.extra_generation_params["Control mode"] = u.mode
shared.log.debug(f'Control unit: i={num_units} type=ControlNet process="{u.process.processor_id}" model="{u.controlnet.model_id}" strength={u.strength} guess={u.guess} start={u.start} end={u.end} mode={u.mode}')
logger.log.debug(f'Control unit: i={num_units} type=ControlNet process="{u.process.processor_id}" model="{u.controlnet.model_id}" strength={u.strength} guess={u.guess} start={u.start} end={u.end} mode={u.mode}')
elif unit_type == 'xs' and u.controlnet.model is not None:
active_process.append(u.process)
active_model.append(u.controlnet)
@ -190,13 +191,13 @@ def check_active(p, unit_type, units):
active_start.append(float(u.start))
active_end.append(float(u.end))
active_units.append(u)
shared.log.debug(f'Control unit: i={num_units} type=ControlNetXS process={u.process.processor_id} model={u.controlnet.model_id} strength={u.strength} guess={u.guess} start={u.start} end={u.end}')
logger.log.debug(f'Control unit: i={num_units} type=ControlNetXS process={u.process.processor_id} model={u.controlnet.model_id} strength={u.strength} guess={u.guess} start={u.start} end={u.end}')
elif unit_type == 'lite' and u.controlnet.model is not None:
active_process.append(u.process)
active_model.append(u.controlnet)
active_strength.append(float(u.strength))
active_units.append(u)
shared.log.debug(f'Control unit: i={num_units} type=ControlLLite process={u.process.processor_id} model={u.controlnet.model_id} strength={u.strength} guess={u.guess} start={u.start} end={u.end}')
logger.log.debug(f'Control unit: i={num_units} type=ControlLLite process={u.process.processor_id} model={u.controlnet.model_id} strength={u.strength} guess={u.guess} start={u.start} end={u.end}')
elif unit_type == 'reference':
p.override = u.override
p.attention = u.attention
@ -204,12 +205,12 @@ def check_active(p, unit_type, units):
p.adain_weight = float(u.adain_weight)
p.fidelity = u.fidelity
active_units.append(u)
shared.log.debug('Control Reference unit')
logger.log.debug('Control Reference unit')
else:
if u.process.processor_id is not None:
active_process.append(u.process)
active_units.append(u)
shared.log.debug(f'Control unit: i={num_units} type=Process process={u.process.processor_id}')
logger.log.debug(f'Control unit: i={num_units} type=Process process={u.process.processor_id}')
active_strength.append(float(u.strength))
debug_log(f'Control active: process={len(active_process)} model={len(active_model)}')
return active_process, active_model, active_strength, active_start, active_end, active_units
@ -320,7 +321,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
input_type = 1 # inpaint always requires control_image
if sampler_index is None:
shared.log.warning('Sampler: invalid')
logger.log.warning('Sampler: invalid')
sampler_index = 0
if hr_sampler_index is None:
hr_sampler_index = sampler_index
@ -427,13 +428,13 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
# TODO modernui: monkey-patch for missing tabs.select event
if p.selected_scale_tab_before == 0 and p.resize_name_before != 'None' and p.scale_by_before != 1 and inputs is not None and len(inputs) > 0:
shared.log.debug('Control: override resize mode=before')
logger.log.debug('Control: override resize mode=before')
p.selected_scale_tab_before = 1
if p.selected_scale_tab_after == 0 and p.resize_name_after != 'None' and p.scale_by_after != 1:
shared.log.debug('Control: override resize mode=after')
logger.log.debug('Control: override resize mode=after')
p.selected_scale_tab_after = 1
if p.selected_scale_tab_mask == 0 and p.resize_name_mask != 'None' and p.scale_by_mask != 1:
shared.log.debug('Control: override resize mode=mask')
logger.log.debug('Control: override resize mode=mask')
p.selected_scale_tab_mask = 1
# hires/refine defined outside of main init
@ -449,7 +450,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
p_extra_args = {}
if shared.sd_model is None:
shared.log.warning('Aborted: op=control model not loaded')
logger.log.warning('Aborted: op=control model not loaded')
return [], '', '', 'Error: model not loaded'
unit_type = unit_type.strip().lower() if unit_type is not None else ''
@ -491,7 +492,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
if isinstance(inputs, str) and os.path.exists(inputs): # only video, the rest is a list
if input_type == 2: # separate init image
if isinstance(inits, str) and inits != inputs:
shared.log.warning('Control: separate init video not support for video input')
logger.log.warning('Control: separate init video not support for video input')
input_type = 1
try:
video = cv2.VideoCapture(inputs)
@ -507,7 +508,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
if status:
shared.state.frame_count = 1 + frames // (video_skip_frames + 1)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
shared.log.debug(f'Control: input video: path={inputs} frames={frames} fps={fps} size={w}x{h} codec={codec}')
logger.log.debug(f'Control: input video: path={inputs} frames={frames} fps={fps} size={w}x{h} codec={codec}')
except Exception as e:
if is_generator:
yield terminate(f'Video open failed: path={inputs} {e}')
@ -539,7 +540,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
try:
input_image = Image.open(input_image)
except Exception as e:
shared.log.error(f'Control: image open failed: path={input_image} type=control error={e}')
logger.log.error(f'Control: image open failed: path={input_image} type=control error={e}')
continue
# match init input
if input_type == 1:
@ -553,7 +554,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
try:
init_image = Image.open(inits[i])
except Exception as e:
shared.log.error(f'Control: image open failed: path={inits[i]} type=init error={e}')
logger.log.error(f'Control: image open failed: path={inits[i]} type=init error={e}')
continue
else:
debug_log(f'Control Init image: {i % len(inits) + 1} of {len(inits)}')
@ -576,7 +577,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
and getattr(p, 'init_images', None) is None \
and getattr(p, 'image', None) is None:
if is_generator:
shared.log.debug(f'Control args: {p.task_args}')
logger.log.debug(f'Control args: {p.task_args}')
yield terminate(f'Mode={p.extra_generation_params.get("Control type", None)} input image is none')
return [], '', '', 'Error: Input image is none'
if unit_type == 'lite':
@ -666,7 +667,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
debug_log(f'Control: pipeline units={len(active_model)} process={len(active_process)} outputs={len(output_images)}')
except Exception as e:
shared.log.error(f'Control: type={unit_type} units={len(active_model)} {e}')
logger.log.error(f'Control: type={unit_type} units={len(active_model)} {e}')
errors.display(e, 'Control')
if len(output_images) == 0:

View File

@ -1,6 +1,7 @@
import math
from PIL import Image, ImageChops, ImageDraw
from modules import shared, errors, images
from modules import logger
FONT_SIZE=48
@ -9,17 +10,17 @@ FONT_SIZE=48
def test_processors(image):
from modules.control import processors
if image is None:
shared.log.error('Image not loaded')
logger.log.error('Image not loaded')
return None, None, None
res = []
for processor_id in processors.list_models():
if shared.state.interrupted:
continue
shared.log.info(f'Testing processor: {processor_id}')
logger.log.info(f'Testing processor: {processor_id}')
processor = processors.Processor(processor_id)
output = image
if processor is None:
shared.log.error(f'Processor load failed: id="{processor_id}"')
logger.log.error(f'Processor load failed: id="{processor_id}"')
processor_id = f'{processor_id} error'
else:
output = processor(image)
@ -29,7 +30,7 @@ def test_processors(image):
output = output.resize(image.size, Image.Resampling.LANCZOS)
if output.mode != image.mode:
output = output.convert(image.mode)
shared.log.debug(f'Testing processor: input={image} mode={image.mode} output={output} mode={output.mode}')
logger.log.debug(f'Testing processor: input={image} mode={image.mode} output={output} mode={output.mode}')
diff = ImageChops.difference(image, output)
if not diff.getbbox():
processor_id = f'{processor_id} null'
@ -44,7 +45,7 @@ def test_processors(image):
w, h = 256, 256
size = (cols * w + cols, rows * h + rows)
grid = Image.new('RGB', size=size, color='black')
shared.log.info(f'Test processors: images={len(res)} grid={grid}')
logger.log.info(f'Test processors: images={len(res)} grid={grid}')
for i, image in enumerate(res):
x = (i % cols * w) + (i % cols)
y = (i // cols * h) + (i // cols)
@ -59,7 +60,7 @@ def test_controlnets(prompt, negative, image):
from modules import devices, sd_models
from modules.control.units import controlnet
if image is None:
shared.log.error('Image not loaded')
logger.log.error('Image not loaded')
return None, None, None
res = []
for model_id in controlnet.list_models():
@ -71,9 +72,9 @@ def test_controlnets(prompt, negative, image):
if model_id != 'None':
controlnet = controlnet.ControlNet(model_id=model_id, device=devices.device, dtype=devices.dtype)
if controlnet is None:
shared.log.error(f'ControlNet load failed: id="{model_id}"')
logger.log.error(f'ControlNet load failed: id="{model_id}"')
continue
shared.log.info(f'Testing ControlNet: {model_id}')
logger.log.info(f'Testing ControlNet: {model_id}')
pipe = controlnet.ControlNetPipeline(controlnet=controlnet.model, pipeline=shared.sd_model)
pipe.pipeline.to(device=devices.device, dtype=devices.dtype)
sd_models.set_diffuser_options(pipe)
@ -95,7 +96,7 @@ def test_controlnets(prompt, negative, image):
w, h = 256, 256
size = (cols * w + cols, rows * h + rows)
grid = Image.new('RGB', size=size, color='black')
shared.log.info(f'Test ControlNets: images={len(res)} grid={grid}')
logger.log.info(f'Test ControlNets: images={len(res)} grid={grid}')
for i, image in enumerate(res):
x = (i % cols * w) + (i % cols)
y = (i // cols * h) + (i // cols)
@ -110,7 +111,7 @@ def test_adapters(prompt, negative, image):
from modules import devices, sd_models
from modules.control.units import t2iadapter
if image is None:
shared.log.error('Image not loaded')
logger.log.error('Image not loaded')
return None, None, None
res = []
for model_id in t2iadapter.list_models():
@ -122,9 +123,9 @@ def test_adapters(prompt, negative, image):
if model_id != 'None':
adapter = t2iadapter.Adapter(model_id=model_id, device=devices.device, dtype=devices.dtype)
if adapter is None:
shared.log.error(f'Adapter load failed: id="{model_id}"')
logger.log.error(f'Adapter load failed: id="{model_id}"')
continue
shared.log.info(f'Testing Adapter: {model_id}')
logger.log.info(f'Testing Adapter: {model_id}')
pipe = t2iadapter.AdapterPipeline(adapter=adapter.model, pipeline=shared.sd_model)
pipe.pipeline.to(device=devices.device, dtype=devices.dtype)
sd_models.set_diffuser_options(pipe)
@ -147,7 +148,7 @@ def test_adapters(prompt, negative, image):
w, h = 256, 256
size = (cols * w + cols, rows * h + rows)
grid = Image.new('RGB', size=size, color='black')
shared.log.info(f'Test Adapters: images={len(res)} grid={grid}')
logger.log.info(f'Test Adapters: images={len(res)} grid={grid}')
for i, image in enumerate(res):
x = (i % cols * w) + (i % cols)
y = (i // cols * h) + (i // cols)
@ -162,7 +163,7 @@ def test_xs(prompt, negative, image):
from modules import devices, sd_models
from modules.control.units import xs
if image is None:
shared.log.error('Image not loaded')
logger.log.error('Image not loaded')
return None, None, None
res = []
for model_id in xs.list_models():
@ -174,9 +175,9 @@ def test_xs(prompt, negative, image):
if model_id != 'None':
xs = xs.ControlNetXS(model_id=model_id, device=devices.device, dtype=devices.dtype)
if xs is None:
shared.log.error(f'ControlNet-XS load failed: id="{model_id}"')
logger.log.error(f'ControlNet-XS load failed: id="{model_id}"')
continue
shared.log.info(f'Testing ControlNet-XS: {model_id}')
logger.log.info(f'Testing ControlNet-XS: {model_id}')
pipe = xs.ControlNetXSPipeline(controlnet=xs.model, pipeline=shared.sd_model)
pipe.pipeline.to(device=devices.device, dtype=devices.dtype)
sd_models.set_diffuser_options(pipe)
@ -198,7 +199,7 @@ def test_xs(prompt, negative, image):
w, h = 256, 256
size = (cols * w + cols, rows * h + rows)
grid = Image.new('RGB', size=size, color='black')
shared.log.info(f'Test ControlNet-XS: images={len(res)} grid={grid}')
logger.log.info(f'Test ControlNet-XS: images={len(res)} grid={grid}')
for i, image in enumerate(res):
x = (i % cols * w) + (i % cols)
y = (i // cols * h) + (i // cols)
@ -213,7 +214,7 @@ def test_lite(prompt, negative, image):
from modules import devices, sd_models
from modules.control.units import lite
if image is None:
shared.log.error('Image not loaded')
logger.log.error('Image not loaded')
return None, None, None
res = []
for model_id in lite.list_models():
@ -225,9 +226,9 @@ def test_lite(prompt, negative, image):
if model_id != 'None':
lite = lite.ControlLLLite(model_id=model_id, device=devices.device, dtype=devices.dtype)
if lite is None:
shared.log.error(f'Control-LLite load failed: id="{model_id}"')
logger.log.error(f'Control-LLite load failed: id="{model_id}"')
continue
shared.log.info(f'Testing ControlNet-XS: {model_id}')
logger.log.info(f'Testing ControlNet-XS: {model_id}')
pipe = lite.ControlLLitePipeline(pipeline=shared.sd_model)
pipe.apply(controlnet=lite.model, image=image, conditioning=1.0)
pipe.pipeline.to(device=devices.device, dtype=devices.dtype)
@ -250,7 +251,7 @@ def test_lite(prompt, negative, image):
w, h = 256, 256
size = (cols * w + cols, rows * h + rows)
grid = Image.new('RGB', size=size, color='black')
shared.log.info(f'Test ControlNet-XS: images={len(res)} grid={grid}')
logger.log.info(f'Test ControlNet-XS: images={len(res)} grid={grid}')
for i, image in enumerate(res):
x = (i % cols * w) + (i % cols)
y = (i // cols * h) + (i // cols)

View File

@ -1,6 +1,7 @@
import time
from PIL import Image
from modules import shared, processing, images, sd_models, sd_vae
from modules import logger
def get_tile(image: Image.Image, x: int, y: int, sx: int, sy: int) -> Image.Image:
@ -37,7 +38,7 @@ def run_tiling(p: processing.StableDiffusionProcessing, input_image: Image.Image
w, h = vae_scale_factor * int(sx * init_image.width) // vae_scale_factor, vae_scale_factor * int(sy * init_image.height) // vae_scale_factor
init_upscaled = images.resize_image(resize_mode=1 if sx==sy else 5, im=init_image, width=w, height=h, context='add with forward')
t1 = time.time()
shared.log.debug(f'Control Tile: scale={sx}x{sy} resize={"fixed" if sx==sy else "context"} control={control_upscaled} init={init_upscaled} time={t1-t0:.3f}')
logger.log.debug(f'Control Tile: scale={sx}x{sy} resize={"fixed" if sx==sy else "context"} control={control_upscaled} init={init_upscaled} time={t1-t0:.3f}')
# stop processing from restoring pipeline on each iteration
orig_restore_pipeline = getattr(shared.sd_model, 'restore_pipeline', None)
@ -46,7 +47,7 @@ def run_tiling(p: processing.StableDiffusionProcessing, input_image: Image.Image
# run tiling
for x in range(sx):
for y in range(sy):
shared.log.info(f'Control Tile: tile={x+1}-{sx}/{y+1}-{sy} target={control_upscaled}')
logger.log.info(f'Control Tile: tile={x+1}-{sx}/{y+1}-{sy} target={control_upscaled}')
shared.sd_model = sd_models.set_diffuser_pipe(shared.sd_model, sd_models.DiffusersTaskType.IMAGE_2_IMAGE)
p.init_images = None
p.task_args['control_mode'] = p.control_mode
@ -70,5 +71,5 @@ def run_tiling(p: processing.StableDiffusionProcessing, input_image: Image.Image
if hasattr(shared.sd_model, 'restore_pipeline') and shared.sd_model.restore_pipeline is not None:
shared.sd_model.restore_pipeline()
t2 = time.time()
shared.log.debug(f'Control Tile: image={control_upscaled} time={t2-t0:.3f}')
logger.log.debug(f'Control Tile: image={control_upscaled} time={t2-t0:.3f}')
return processed

View File

@ -1,6 +1,6 @@
from PIL import Image
import gradio as gr
from installer import log
from modules.logger import log
from modules.control import processors
from modules.control.units import controlnet
from modules.control.units import xs

View File

@ -3,7 +3,7 @@ import time
from typing import Union
import threading
from diffusers import StableDiffusionPipeline, StableDiffusionXLPipeline, T2IAdapter, MultiAdapter, StableDiffusionAdapterPipeline, StableDiffusionXLAdapterPipeline # pylint: disable=unused-import
from installer import log
from modules.logger import log
from modules import errors, sd_models
from modules.control.units import detect

View File

@ -1,3 +1,4 @@
from modules import logger
import platform
from typing import NamedTuple, Optional
from collections.abc import Callable
@ -108,9 +109,9 @@ def directml_override_opts():
if getattr(shared.opts, key) != item.value and (item.condition is None or item.condition(shared.opts)):
count += 1
setattr(shared.opts, key, item.value)
shared.log.warning(f'Overriding: {key}={item.value} {item.message if item.message is not None else ""}')
logger.log.warning(f'Overriding: {key}={item.value} {item.message if item.message is not None else ""}')
if count > 0:
shared.log.info(f'Options override: count={count}. If you want to keep them from overriding, run with --experimental argument.')
logger.log.info(f'Options override: count={count}. If you want to keep them from overriding, run with --experimental argument.')
_set_memory_provider()

View File

@ -1,7 +1,7 @@
import math
import torch
from modules.postprocess.realesrgan_model_arch import RealESRGANer
from installer import log
from modules.logger import log
# DML Solution: Some of contents of output tensor turn to 0 after Extended Slices. Move it to cpu.

View File

@ -1,6 +1,6 @@
import logging
import warnings
from installer import get_log, get_console, setup_logging, install_traceback
from modules.logger import get_log, get_console, setup_logging, install_traceback
from modules.errorlimiter import ErrorLimiterAbort

View File

@ -3,6 +3,7 @@ import os
from datetime import datetime, timezone
import git
from modules import shared, errors
from modules import logger
from modules.paths import extensions_dir, extensions_builtin_dir
@ -98,7 +99,7 @@ def temp_disable_extensions():
shared.opts.data['theme_type'] = 'None'
shared.opts.data['gradio_theme'] = theme_name
else:
shared.log.error(f'UI theme invalid: theme="{theme_name}" available={["standard/*", "modern/*", "none/*"]} fallback="standard/black-teal"')
logger.log.error(f'UI theme invalid: theme="{theme_name}" available={["standard/*", "modern/*", "none/*"]} fallback="standard/black-teal"')
shared.opts.data['theme_type'] = 'Standard'
shared.opts.data['gradio_theme'] = 'black-teal'
@ -155,7 +156,7 @@ class Extension:
try:
self.status = 'unknown'
if len(repo.remotes) == 0:
shared.log.debug(f"Extension: no remotes info repo={self.name}")
logger.log.debug(f"Extension: no remotes info repo={self.name}")
return
self.git_name = repo.remotes.origin.url.split('.git')[0].split('/')[-1]
self.description = repo.description
@ -172,7 +173,7 @@ class Extension:
self.commit_hash = head.hexsha
self.version = f"<p>{self.commit_hash[:8]}</p><p>{format_dt(ts2utc(self.commit_date))}</p>"
except Exception as ex:
shared.log.error(f"Extension: failed reading data from git repo={self.name}: {ex}")
logger.log.error(f"Extension: failed reading data from git repo={self.name}: {ex}")
self.remote = None
def list_files(self, subdir, extension):
@ -190,7 +191,7 @@ class Extension:
priority = str(f.read().strip())
res.append(scripts_manager.ScriptFile(self.path, filename, os.path.join(dirpath, filename), priority))
if priority != '50':
shared.log.debug(f'Extension priority override: {os.path.dirname(dirpath)}:{priority}')
logger.log.debug(f'Extension priority override: {os.path.dirname(dirpath)}:{priority}')
res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
return res
@ -233,7 +234,7 @@ def list_extensions():
if not os.path.isdir(extensions_dir):
return
if shared.opts.disable_all_extensions == "all" or shared.opts.disable_all_extensions == "user":
shared.log.warning(f"Option set: Disable extensions: {shared.opts.disable_all_extensions}")
logger.log.warning(f"Option set: Disable extensions: {shared.opts.disable_all_extensions}")
extension_paths = []
extension_names = []
extension_folders = [extensions_builtin_dir] if shared.cmd_opts.safe else [extensions_builtin_dir, extensions_dir]
@ -245,7 +246,7 @@ def list_extensions():
if not os.path.isdir(path):
continue
if extension_dirname in extension_names:
shared.log.info(f'Skipping conflicting extension: {path}')
logger.log.info(f'Skipping conflicting extension: {path}')
continue
extension_names.append(extension_dirname)
extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir))
@ -256,4 +257,4 @@ def list_extensions():
enabled = dirname.lower() not in disabled_extensions
extension = Extension(name=dirname, path=path, enabled=enabled, is_builtin=is_builtin)
extensions.append(extension)
shared.log.debug(f'Extensions: disabled={[e.name for e in extensions if not e.enabled]}')
logger.log.debug(f'Extensions: disabled={[e.name for e in extensions if not e.enabled]}')

View File

@ -2,6 +2,7 @@ import re
import inspect
from collections import defaultdict
from modules import errors, shared
from modules import logger
extra_network_registry = {}
@ -89,14 +90,14 @@ def activate(p, extra_network_data=None, step=0, include=None, exclude=None):
stepwise = stepwise or is_stepwise(extra_network_args)
functional = shared.opts.lora_functional
if shared.opts.lora_force_diffusers and stepwise:
shared.log.warning("Network load: type=LoRA method=composable loader=diffusers not compatible")
logger.log.warning("Network load: type=LoRA method=composable loader=diffusers not compatible")
stepwise = False
shared.opts.data['lora_functional'] = stepwise or functional
for extra_network_name, extra_network_args in extra_network_data.items():
extra_network = extra_network_registry.get(extra_network_name, None)
if extra_network is None:
errors.log.warning(f"Skipping unknown extra network: {extra_network_name}")
logger.log.warning(f"Skipping unknown extra network: {extra_network_name}")
continue
try:
signature = list(inspect.signature(extra_network.activate).parameters)

View File

@ -9,6 +9,7 @@ import gradio as gr
import safetensors.torch
from modules.merging import merge, merge_utils, modules_sdxl
from modules import shared, images, sd_models, sd_vae, sd_samplers, devices
from modules import logger
def run_pnginfo(image):
@ -35,7 +36,7 @@ def run_modelmerger(id_task, **kwargs): # pylint: disable=unused-argument
try:
pass # pylint: disable=unused-import
except Exception as e:
shared.log.error(f"Merge: {e}")
logger.log.error(f"Merge: {e}")
return [*[gr.update() for _ in range(4)], "tensordict not available"]
jobid = shared.state.begin('Merge')
@ -74,7 +75,7 @@ def run_modelmerger(id_task, **kwargs): # pylint: disable=unused-argument
assert len(alpha) == 26 or len(alpha) == 20, "Alpha Block Weights are wrong length (26 or 20 for SDXL)"
kwargs["alpha"] = alpha
except KeyError as ke:
shared.log.warning(f"Merge: Malformed manual block weight: {ke}")
logger.log.warning(f"Merge: Malformed manual block weight: {ke}")
elif kwargs.get("alpha_preset", None) or kwargs.get("alpha", None):
kwargs["alpha"] = kwargs.get("alpha_preset", kwargs["alpha"])
@ -91,7 +92,7 @@ def run_modelmerger(id_task, **kwargs): # pylint: disable=unused-argument
assert len(beta) == 26 or len(beta) == 20, "Beta Block Weights are wrong length (26 or 20 for SDXL)"
kwargs["beta"] = beta
except KeyError as ke:
shared.log.warning(f"Merge: Malformed manual block weight: {ke}")
logger.log.warning(f"Merge: Malformed manual block weight: {ke}")
elif kwargs.get("beta_preset", None) or kwargs.get("beta", None):
kwargs["beta"] = kwargs.get("beta_preset", kwargs["beta"])
@ -123,7 +124,7 @@ def run_modelmerger(id_task, **kwargs): # pylint: disable=unused-argument
bake_in_vae_filename = sd_vae.vae_dict.get(kwargs.get("bake_in_vae", None), None)
if bake_in_vae_filename is not None:
shared.log.info(f"Merge VAE='{bake_in_vae_filename}'")
logger.log.info(f"Merge VAE='{bake_in_vae_filename}'")
shared.state.textinfo = 'Merge VAE'
vae_dict = sd_vae.load_vae_dict(bake_in_vae_filename)
for key in vae_dict.keys():
@ -179,7 +180,7 @@ def run_modelmerger(id_task, **kwargs): # pylint: disable=unused-argument
torch.save(theta_0, output_modelname)
t1 = time.time()
shared.log.info(f"Merge complete: saved='{output_modelname}' time={t1-t0:.2f}")
logger.log.info(f"Merge complete: saved='{output_modelname}' time={t1-t0:.2f}")
sd_models.list_models()
created_model = next((ckpt for ckpt in sd_models.checkpoints_list.values() if ckpt.name == filename), None)
if created_model:
@ -200,9 +201,9 @@ def run_model_modules(model_type:str, model_name:str, custom_name:str,
def msg(text, err:bool=False):
nonlocal status
if err:
shared.log.error(f'Modules merge: {text}')
logger.log.error(f'Modules merge: {text}')
else:
shared.log.info(f'Modules merge: {text}')
logger.log.info(f'Modules merge: {text}')
status += text + '<br>'
return status

View File

@ -2,9 +2,10 @@ import os
import gradio as gr
from PIL import Image
from modules import scripts_manager, processing, shared, images
from modules import logger
debug = shared.log.trace if os.environ.get('SD_FACE_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_FACE_DEBUG', None) is not None else lambda *args, **kwargs: None
class Script(scripts_manager.Script):
@ -34,7 +35,7 @@ class Script(scripts_manager.Script):
raise ValueError(f'Face: unknown input: {file}')
init_images.append(image)
except Exception as e:
shared.log.warning(f'Face: failed to load image: {e}')
logger.log.warning(f'Face: failed to load image: {e}')
return init_images
def mode_change(self, mode):
@ -109,10 +110,10 @@ class Script(scripts_manager.Script):
if mode == 'None':
return None
if input_images is None or len(input_images) == 0:
shared.log.error('Face: no init images')
logger.log.error('Face: no init images')
return None
if shared.sd_model_type != 'sd' and shared.sd_model_type != 'sdxl':
shared.log.error('Face: base model not supported')
logger.log.error('Face: base model not supported')
return None
input_images = input_images.copy()
@ -142,7 +143,7 @@ class Script(scripts_manager.Script):
photo_maker(p, app=app, input_images=input_images, model=pm_model, trigger=pm_trigger, strength=pm_strength, start=pm_start)
elif mode == 'InstantID':
if hasattr(p, 'init_images') and p.init_images is not None and len(p.init_images) > 0:
shared.log.warning('Face: InstantID with init image not supported')
logger.log.warning('Face: InstantID with init image not supported')
input_images += p.init_images
from modules.face.insightface import get_app
app=get_app('antelopev2')

View File

@ -6,6 +6,7 @@ import diffusers
import huggingface_hub as hf
from PIL import Image
from modules import processing, shared, devices, extra_networks, sd_hijack_freeu, script_callbacks, ipadapter, token_merge
from modules import logger
from modules.sd_hijack_hypertile import context_hypertile_vae, context_hypertile_unet
@ -21,7 +22,7 @@ FACEID_MODELS = {
faceid_model_weights = None
faceid_model_name = None
debug = shared.log.trace if os.environ.get("SD_FACE_DEBUG", None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get("SD_FACE_DEBUG", None) is not None else lambda *args, **kwargs: None
def hijack_load_ip_adapter(self):
@ -42,7 +43,7 @@ def face_id(
):
global faceid_model_weights, faceid_model_name # pylint: disable=global-statement
if source_images is None or len(source_images) == 0:
shared.log.warning('FaceID: no input images')
logger.log.warning('FaceID: no input images')
return None
from insightface.utils import face_align
@ -57,7 +58,7 @@ def face_id(
IPAdapterFaceID as IPAdapterFaceIDPortrait,
)
except Exception as e:
shared.log.error(f"FaceID incorrect version of ip_adapter: {e}")
logger.log.error(f"FaceID incorrect version of ip_adapter: {e}")
return None
processed_images = []
@ -80,13 +81,13 @@ def face_id(
basename, _ext = os.path.splitext(filename)
model_path = hf.hf_hub_download(repo_id=folder, filename=filename, cache_dir=shared.opts.hfcache_dir)
if model_path is None:
shared.log.error(f'FaceID download failed: model={model} file="{ip_ckpt}"')
logger.log.error(f'FaceID download failed: model={model} file="{ip_ckpt}"')
return None
if faceid_model_weights is None or faceid_model_name != model or not cache:
shared.log.debug(f'FaceID load: model={model} file="{ip_ckpt}"')
logger.log.debug(f'FaceID load: model={model} file="{ip_ckpt}"')
faceid_model_weights = torch.load(model_path, map_location="cpu")
else:
shared.log.debug(f'FaceID cached: model={model} file="{ip_ckpt}"')
logger.log.debug(f'FaceID cached: model={model} file="{ip_ckpt}"')
if "XL Plus" in model and shared.sd_model_type == 'sd':
image_encoder_path = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
@ -148,7 +149,7 @@ def face_id(
torch_dtype=devices.dtype,
)
else:
shared.log.error(f'FaceID model not supported: model="{model}" class={shared.sd_model.__class__.__name__}')
logger.log.error(f'FaceID model not supported: model="{model}" class={shared.sd_model.__class__.__name__}')
return None
if override:
@ -171,15 +172,15 @@ def face_id(
np_image = cv2.cvtColor(np.array(source_image), cv2.COLOR_RGB2BGR)
faces = app.get(np_image)
if len(faces) == 0:
shared.log.error("FaceID: no faces found")
logger.log.error("FaceID: no faces found")
break
face_embeds.append(torch.from_numpy(faces[0].normed_embedding).unsqueeze(0))
face_images.append(face_align.norm_crop(np_image, landmark=faces[0].kps, image_size=224))
shared.log.debug(f'FaceID face: i={i+1} score={faces[0].det_score:.2f} gender={"female" if faces[0].gender==0 else "male"} age={faces[0].age} bbox={faces[0].bbox}')
logger.log.debug(f'FaceID face: i={i+1} score={faces[0].det_score:.2f} gender={"female" if faces[0].gender==0 else "male"} age={faces[0].age} bbox={faces[0].bbox}')
p.extra_generation_params[f"FaceID {i+1}"] = f'{faces[0].det_score:.2f} {"female" if faces[0].gender==0 else "male"} {faces[0].age}y'
if len(face_embeds) == 0:
shared.log.error("FaceID: no faces found")
logger.log.error("FaceID: no faces found")
return None
face_embeds = torch.cat(face_embeds, dim=0)
@ -198,7 +199,7 @@ def face_id(
ip_model_dict["shortcut"] = shortcut
if "Plus" in model:
ip_model_dict["s_scale"] = structure
shared.log.debug(f"FaceID args: {ip_model_dict}")
logger.log.debug(f"FaceID args: {ip_model_dict}")
if "Plus" in model:
ip_model_dict["face_image"] = face_images
ip_model_dict["faceid_embeds"] = face_embeds # overwrite placeholder

View File

@ -4,9 +4,10 @@ import numpy as np
import huggingface_hub as hf
from PIL import Image
from modules import processing, shared, devices
from modules import logger
debug = shared.log.trace if os.environ.get('SD_FACE_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_FACE_DEBUG', None) is not None else lambda *args, **kwargs: None
insightface_app = None
swapper = None
@ -17,19 +18,19 @@ def face_swap(p: processing.StableDiffusionProcessing, app, input_images: list[I
import insightface.model_zoo
repo_id = 'ezioruan/inswapper_128.onnx'
model_path = hf.hf_hub_download(repo_id=repo_id, filename='inswapper_128.onnx', cache_dir=shared.opts.hfcache_dir)
shared.log.debug(f'FaceSwap load: repo="{repo_id}" path="{model_path}"')
logger.log.debug(f'FaceSwap load: repo="{repo_id}" path="{model_path}"')
# model_path = hf.hf_hub_download(repo_id='somanchiu/reswapper', filename='reswapper_256-1567500_originalInswapperClassCompatible.onnx', cache_dir=shared.opts.hfcache_dir)
try:
router: insightface.model_zoo.model_zoo.INSwapper = insightface.model_zoo.model_zoo.ModelRouter(model_path)
swapper = router.get_model()
except Exception as e:
shared.log.error(f'FaceSwap load: {e}')
logger.log.error(f'FaceSwap load: {e}')
return None
np_image = cv2.cvtColor(np.array(source_image), cv2.COLOR_RGB2BGR)
faces = app.get(np_image)
if faces is None or len(faces) == 0:
shared.log.warning('FaceSwap: No faces detected')
logger.log.warning('FaceSwap: No faces detected')
return None
source_face = faces[0]
processed_images = []

View File

@ -4,11 +4,12 @@ import torch
import numpy as np
import huggingface_hub as hf
from modules import shared, processing, sd_models, devices
from modules import logger
REPO_ID = "InstantX/InstantID"
controlnet_model = None
debug = shared.log.trace if os.environ.get('SD_FACE_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_FACE_DEBUG', None) is not None else lambda *args, **kwargs: None
def instant_id(p: processing.StableDiffusionProcessing, app, source_images, strength=1.0, conditioning=0.5, cache=True): # pylint: disable=arguments-differ
@ -18,12 +19,12 @@ def instant_id(p: processing.StableDiffusionProcessing, app, source_images, stre
# prepare pipeline
if source_images is None or len(source_images) == 0:
shared.log.warning('InstantID: no input images')
logger.log.warning('InstantID: no input images')
return None
c = shared.sd_model.__class__.__name__ if shared.sd_loaded else ''
if c not in ['StableDiffusionXLPipeline', 'StableDiffusionXLInstantIDPipeline']:
shared.log.warning(f'InstantID invalid base model: current={c} required=StableDiffusionXLPipeline')
logger.log.warning(f'InstantID invalid base model: current={c} required=StableDiffusionXLPipeline')
return None
# prepare face emb
@ -35,9 +36,9 @@ def instant_id(p: processing.StableDiffusionProcessing, app, source_images, stre
face_embeds.append(torch.from_numpy(face['embedding']))
face_images.append(draw_kps(source_image, face['kps']))
p.extra_generation_params[f"InstantID {i+1}"] = f'{faces[0].det_score:.2f} {"female" if faces[0].gender==0 else "male"} {faces[0].age}y'
shared.log.debug(f'InstantID face: score={face.det_score:.2f} gender={"female" if face.gender==0 else "male"} age={face.age} bbox={face.bbox}')
logger.log.debug(f'InstantID face: score={face.det_score:.2f} gender={"female" if face.gender==0 else "male"} age={face.age} bbox={face.bbox}')
shared.log.debug(f'InstantID loading: model={REPO_ID}')
logger.log.debug(f'InstantID loading: model={REPO_ID}')
face_adapter = hf.hf_hub_download(repo_id=REPO_ID, filename="ip-adapter.bin")
if controlnet_model is None or not cache:
controlnet_model = ControlNetModel.from_pretrained(REPO_ID, subfolder="ControlNetModel", torch_dtype=devices.dtype, cache_dir=shared.opts.diffusers_dir)
@ -72,7 +73,7 @@ def instant_id(p: processing.StableDiffusionProcessing, app, source_images, stre
p.task_args['image'] = face_images[0]
p.task_args['controlnet_conditioning_scale'] = float(conditioning)
p.task_args['ip_adapter_scale'] = float(strength)
shared.log.debug(f"InstantID args: {p.task_args}")
logger.log.debug(f"InstantID args: {p.task_args}")
p.task_args['prompt'] = p.all_prompts[0] if p.all_prompts else p.prompt
p.task_args['negative_prompt'] = p.all_negative_prompts[0] if p.all_negative_prompts else p.negative_prompt
p.task_args['image_embeds'] = face_embeds[0] # overwrite placeholder

View File

@ -3,6 +3,7 @@ import numpy as np
import torch
import huggingface_hub as hf
from modules import shared, processing, sd_models, devices
from modules import logger
original_pipeline = None
@ -21,16 +22,16 @@ def photo_maker(p: processing.StableDiffusionProcessing, app, model: str, input_
# prepare pipeline
if len(input_images) == 0:
shared.log.warning('PhotoMaker: no input images')
logger.log.warning('PhotoMaker: no input images')
return None
if len(trigger) == 0:
shared.log.warning('PhotoMaker: no trigger word')
logger.log.warning('PhotoMaker: no trigger word')
return None
c = shared.sd_model.__class__.__name__ if shared.sd_loaded else ''
if c != 'StableDiffusionXLPipeline':
shared.log.warning(f'PhotoMaker invalid base model: current={c} required=StableDiffusionXLPipeline')
logger.log.warning(f'PhotoMaker invalid base model: current={c} required=StableDiffusionXLPipeline')
return None
# validate prompt
@ -42,10 +43,10 @@ def photo_maker(p: processing.StableDiffusionProcessing, app, model: str, input_
prompt_ids2 = shared.sd_model.tokenizer_2.encode(p.all_prompts[0])
for t in trigger_ids:
if prompt_ids1.count(t) != 1:
shared.log.error(f'PhotoMaker: trigger word not matched in prompt: {trigger} ids={trigger_ids} prompt={p.all_prompts[0]} ids={prompt_ids1}')
logger.log.error(f'PhotoMaker: trigger word not matched in prompt: {trigger} ids={trigger_ids} prompt={p.all_prompts[0]} ids={prompt_ids1}')
return None
if prompt_ids2.count(t) != 1:
shared.log.error(f'PhotoMaker: trigger word not matched in prompt: {trigger} ids={trigger_ids} prompt={p.all_prompts[0]} ids={prompt_ids1}')
logger.log.error(f'PhotoMaker: trigger word not matched in prompt: {trigger} ids={trigger_ids} prompt={p.all_prompts[0]} ids={prompt_ids1}')
return None
# create new pipeline
@ -70,7 +71,7 @@ def photo_maker(p: processing.StableDiffusionProcessing, app, model: str, input_
repo_id, fn = 'TencentARC/PhotoMaker', 'photomaker-v1.bin'
photomaker_path = hf.hf_hub_download(repo_id=repo_id, filename=fn, repo_type="model", cache_dir=shared.opts.hfcache_dir)
shared.log.debug(f'PhotoMaker: model="{model}" uri="{repo_id}/{fn}" images={len(input_images)} trigger={trigger} args={p.task_args}')
logger.log.debug(f'PhotoMaker: model="{model}" uri="{repo_id}/{fn}" images={len(input_images)} trigger={trigger} args={p.task_args}')
# load photomaker adapter
shared.sd_model.load_photomaker_adapter(
@ -90,7 +91,7 @@ def photo_maker(p: processing.StableDiffusionProcessing, app, model: str, input_
faces = app.get(cv2.cvtColor(np.array(source_image), cv2.COLOR_RGB2BGR))
face = sorted(faces, key=lambda x:(x['bbox'][2]-x['bbox'][0])*x['bbox'][3]-x['bbox'][1])[-1] # only use the maximum face
id_embed_list.append(torch.from_numpy(face['embedding']))
shared.log.debug(f'PhotoMaker: face={i+1} score={face.det_score:.2f} gender={"female" if face.gender==0 else "male"} age={face.age} bbox={face.bbox}')
logger.log.debug(f'PhotoMaker: face={i+1} score={face.det_score:.2f} gender={"female" if face.gender==0 else "male"} age={face.age} bbox={face.bbox}')
p.task_args['id_embeds'] = torch.stack(id_embed_list).to(device=devices.device, dtype=devices.dtype)
# run processing

View File

@ -5,6 +5,7 @@ import numpy as np
import huggingface_hub as hf
from PIL import Image
from modules import processing, shared, devices
from modules import logger
RESWAPPER_REPO = 'somanchiu/reswapper'
RESWAPPER_MODELS = {
@ -15,7 +16,7 @@ RESWAPPER_MODELS = {
}
reswapper_model = None
reswapper_name = None
debug = shared.log.trace if os.environ.get("SD_FACE_DEBUG", None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get("SD_FACE_DEBUG", None) is not None else lambda *args, **kwargs: None
dtype = devices.dtype
def get_model(model_name: str):
@ -30,12 +31,12 @@ def get_model(model_name: str):
reswapper_model = reswapper_model.to(device=devices.device, dtype=dtype)
reswapper_model.eval()
reswapper_name = model_name
shared.log.info(f'ReSwapper: model="{model_name}" url="{url}" cls={reswapper_model.__class__.__name__}')
logger.log.info(f'ReSwapper: model="{model_name}" url="{url}" cls={reswapper_model.__class__.__name__}')
if reswapper_model is None:
shared.log.error(f'ReSwapper: model="{model_name}" fn="{fn}" url="{url}" failed to load model')
logger.log.error(f'ReSwapper: model="{model_name}" fn="{fn}" url="{url}" failed to load model')
return reswapper_model
except Exception as e:
shared.log.error(f'ReSwapper: model="{model_name}" fn="{fn}" url="{url}" {e}')
logger.log.error(f'ReSwapper: model="{model_name}" fn="{fn}" url="{url}" {e}')
return reswapper_model
@ -49,7 +50,7 @@ def reswapper(
):
from modules.face import reswapper_utils as utils
if source_images is None or len(source_images) == 0:
shared.log.warning('ReSwapper: no input images')
logger.log.warning('ReSwapper: no input images')
return None
processed_images = []
@ -67,22 +68,22 @@ def reswapper(
source_np = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
source_faces = app.get(source_np)
if len(source_faces) == 0:
shared.log.error(f"ReSwapper: image={x+1} no source faces found")
logger.log.error(f"ReSwapper: image={x+1} no source faces found")
return source_images
if len(source_faces) != len(target_images):
shared.log.warning(f"ReSwapper: image={x+1} source-faces={len(source_faces)} target-images={len(target_images)}")
logger.log.warning(f"ReSwapper: image={x+1} source-faces={len(source_faces)} target-images={len(target_images)}")
for y, source_face in enumerate(source_faces):
target_image = target_images[y] if y < len(target_images) else target_images[-1]
target_image = target_image.convert('RGB')
target_np = cv2.cvtColor(np.array(target_image), cv2.COLOR_RGB2BGR)
target_faces = app.get(target_np)
if len(target_faces) != 1:
shared.log.error(f"ReSwapper: image={x+1} source-faces={y+1} target-faces={len(target_faces)} must be exactly one")
logger.log.error(f"ReSwapper: image={x+1} source-faces={y+1} target-faces={len(target_faces)} must be exactly one")
return source_images
target_face = target_faces[0]
source_str = f'score:{source_face.det_score:.2f} gender:{"female" if source_face.gender==0 else "male"} age:{source_face.age}'
target_str = f'score:{target_face.det_score:.2f} gender:{"female" if target_face.gender==0 else "male"} age:{target_face.age}'
shared.log.debug(f'ReSwapper image={x+1} face={y+1} source="{source_str}" target="{target_str}"')
logger.log.debug(f'ReSwapper image={x+1} face={y+1} source="{source_str}" target="{target_str}"')
source_latent = utils.getLatent(source_face)
source_tensor = torch.from_numpy(source_latent).to(device=devices.device, dtype=dtype)

View File

@ -4,7 +4,7 @@ from collections import UserDict
from dataclasses import dataclass, field
from typing import Union
from collections.abc import Callable, Iterator
from installer import log
from modules.logger import log
do_cache_folders = os.environ.get('SD_NO_CACHE', None) is None

View File

@ -1,6 +1,7 @@
from pydantic import BaseModel, Field # pylint: disable=no-name-in-module
from fastapi.exceptions import HTTPException
from modules import shared
from modules import logger
class ReqFramepack(BaseModel):
@ -60,7 +61,7 @@ def framepack_post(request: ReqFramepack):
else:
init_image = None
except Exception as e:
shared.log.error(f"API FramePack: id={task_id} cannot decode init image: {e}")
logger.log.error(f"API FramePack: id={task_id} cannot decode init image: {e}")
raise HTTPException(status_code=500, detail=str(e)) from e
try:
@ -69,12 +70,12 @@ def framepack_post(request: ReqFramepack):
else:
end_image = None
except Exception as e:
shared.log.error(f"API FramePack: id={task_id} cannot decode end image: {e}")
logger.log.error(f"API FramePack: id={task_id} cannot decode end image: {e}")
raise HTTPException(status_code=500, detail=str(e)) from e
del request.init_image
del request.end_image
shared.log.trace(f"API FramePack: id={task_id} init={init_image.shape} end={end_image.shape if end_image else None} {request}")
logger.log.trace(f"API FramePack: id={task_id} init={init_image.shape} end={end_image.shape if end_image else None} {request}")
generator = run_framepack(
_ui_state=None,

View File

@ -1,3 +1,4 @@
from modules import logger
DEFAULT_PROMPT_TEMPLATE = { # hunyuanvideo reference prompt template
"template": (
"<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: "
@ -71,4 +72,4 @@ def set_prompt_template(prompt, system_prompt:str=None, optimized_prompt:bool=Tr
"crop_start": tokens_system,
}
tokens_user = inputs['length'].item() - int(shared.sd_model.tokenizer.bos_token_id is not None) - int(shared.sd_model.tokenizer.eos_token_id is not None)
shared.log.trace(f'FramePack prompt: system={tokens_system} user={tokens_user} optimized={optimized_prompt} unmodified={unmodified_prompt} mode={mode}')
logger.log.trace(f'FramePack prompt: system={tokens_system} user={tokens_user} optimized={optimized_prompt} unmodified={unmodified_prompt} mode={mode}')

View File

@ -1,6 +1,7 @@
import os
import time
from modules import shared, devices, errors, sd_models, sd_checkpoint, model_quant
from modules import logger
models = {
@ -42,9 +43,9 @@ def set_model(receipe: str=None):
k, v = line.split(':', 1)
k = k.strip()
if k not in default_model.keys():
shared.log.warning(f'FramePack receipe: key={k} invalid')
logger.log.warning(f'FramePack receipe: key={k} invalid')
model[k] = split_url(v)
shared.log.debug(f'FramePack receipe: set {k}={model[k]}')
logger.log.debug(f'FramePack receipe: set {k}={model[k]}')
def get_model():
@ -57,7 +58,7 @@ def get_model():
def reset_model():
global model # pylint: disable=global-statement
model = default_model.copy()
shared.log.debug('FramePack receipe: reset')
logger.log.debug('FramePack receipe: reset')
return ''
@ -79,7 +80,7 @@ def load_model(variant:str=None, pipeline:str=None, text_encoder:str=None, text_
model['image_encoder'] = split_url(image_encoder)
if transformer is not None:
model['transformer'] = split_url(transformer)
# shared.log.trace(f'FramePack load: {model}')
# logger.log.trace(f'FramePack load: {model}')
try:
import diffusers
@ -137,7 +138,7 @@ def load_model(variant:str=None, pipeline:str=None, text_encoder:str=None, text_
os.environ.pop('HF_HUB_OFFLINE', None)
os.unsetenv('HF_HUB_OFFLINE')
shared.log.debug(f'FramePack load: module=llm {model["text_encoder"]}')
logger.log.debug(f'FramePack load: module=llm {model["text_encoder"]}')
load_args, quant_args = model_quant.get_dit_args({}, module='TE', device_map=True)
text_encoder = LlamaModel.from_pretrained(model["text_encoder"]["repo"], subfolder=model["text_encoder"]["subfolder"], cache_dir=shared.opts.hfcache_dir, **load_args, **quant_args, **offline_config)
tokenizer = LlamaTokenizerFast.from_pretrained(model["tokenizer"]["repo"], subfolder=model["tokenizer"]["subfolder"], cache_dir=shared.opts.hfcache_dir, **offline_config)
@ -145,14 +146,14 @@ def load_model(variant:str=None, pipeline:str=None, text_encoder:str=None, text_
text_encoder.eval()
sd_models.move_model(text_encoder, devices.cpu)
shared.log.debug(f'FramePack load: module=te {model["text_encoder_2"]}')
logger.log.debug(f'FramePack load: module=te {model["text_encoder_2"]}')
text_encoder_2 = CLIPTextModel.from_pretrained(model["text_encoder_2"]["repo"], subfolder=model["text_encoder_2"]["subfolder"], torch_dtype=devices.dtype, cache_dir=shared.opts.hfcache_dir, **offline_config)
tokenizer_2 = CLIPTokenizer.from_pretrained(model["pipeline"]["repo"], subfolder='tokenizer_2', cache_dir=shared.opts.hfcache_dir, **offline_config)
text_encoder_2.requires_grad_(False)
text_encoder_2.eval()
sd_models.move_model(text_encoder_2, devices.cpu)
shared.log.debug(f'FramePack load: module=vae {model["vae"]}')
logger.log.debug(f'FramePack load: module=vae {model["vae"]}')
vae = AutoencoderKLHunyuanVideo.from_pretrained(model["vae"]["repo"], subfolder=model["vae"]["subfolder"], torch_dtype=devices.dtype, cache_dir=shared.opts.hfcache_dir, **offline_config)
vae.requires_grad_(False)
vae.eval()
@ -160,14 +161,14 @@ def load_model(variant:str=None, pipeline:str=None, text_encoder:str=None, text_
vae.enable_tiling()
sd_models.move_model(vae, devices.cpu)
shared.log.debug(f'FramePack load: module=encoder {model["feature_extractor"]} model={model["image_encoder"]}')
logger.log.debug(f'FramePack load: module=encoder {model["feature_extractor"]} model={model["image_encoder"]}')
feature_extractor = SiglipImageProcessor.from_pretrained(model["feature_extractor"]["repo"], subfolder=model["feature_extractor"]["subfolder"], cache_dir=shared.opts.hfcache_dir, **offline_config)
image_encoder = SiglipVisionModel.from_pretrained(model["image_encoder"]["repo"], subfolder=model["image_encoder"]["subfolder"], torch_dtype=devices.dtype, cache_dir=shared.opts.hfcache_dir, **offline_config)
image_encoder.requires_grad_(False)
image_encoder.eval()
sd_models.move_model(image_encoder, devices.cpu)
shared.log.debug(f'FramePack load: module=transformer {model["transformer"]}')
logger.log.debug(f'FramePack load: module=transformer {model["transformer"]}')
dit_repo = model["transformer"]["repo"]
load_args, quant_args = model_quant.get_dit_args({}, module='Model', device_map=True)
transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained(dit_repo, subfolder=model["transformer"]["subfolder"], cache_dir=shared.opts.hfcache_dir, **load_args, **quant_args, **offline_config)
@ -194,12 +195,12 @@ def load_model(variant:str=None, pipeline:str=None, text_encoder:str=None, text_
t1 = time.time()
diffusers.loaders.peft._SET_ADAPTER_SCALE_FN_MAPPING['HunyuanVideoTransformer3DModelPacked'] = lambda model_cls, weights: weights # pylint: disable=protected-access
shared.log.info(f'FramePack load: model={shared.sd_model.__class__.__name__} variant="{variant}" type={shared.sd_model_type} time={t1-t0:.2f}')
logger.log.info(f'FramePack load: model={shared.sd_model.__class__.__name__} variant="{variant}" type={shared.sd_model_type} time={t1-t0:.2f}')
sd_models.apply_balanced_offload(shared.sd_model)
devices.torch_gc(force=True, reason='load')
except Exception as e:
shared.log.error(f'FramePack load: {e}')
logger.log.error(f'FramePack load: {e}')
errors.display(e, 'FramePack')
shared.state.end()
return None

View File

@ -1,6 +1,7 @@
import torch
import einops
from modules import shared, devices
from modules import logger
latent_rgb_factors = [ # from comfyui
@ -45,7 +46,7 @@ def vae_decode_tiny(latents):
if taesd is None:
from modules.vae import sd_vae_taesd
taesd, _variant = sd_vae_taesd.get_model(variant='TAE HunyuanVideo')
shared.log.debug(f'Video VAE: type=Tiny cls={taesd.__class__.__name__} latents={latents.shape}')
logger.log.debug(f'Video VAE: type=Tiny cls={taesd.__class__.__name__} latents={latents.shape}')
with devices.inference_context():
taesd = taesd.to(device=devices.device, dtype=devices.dtype)
latents = latents.transpose(1, 2) # pipe produces NCTHW and tae wants NTCHW

View File

@ -2,6 +2,7 @@ import time
import torch
import rich.progress as rp
from modules import shared, errors ,devices, sd_models, timer, memstats
from modules import logger
from modules.framepack import framepack_vae # pylint: disable=wrong-import-order
from modules.framepack import framepack_hijack # pylint: disable=wrong-import-order
from modules.video_models.video_save import save_video # pylint: disable=wrong-import-order
@ -50,7 +51,7 @@ def worker(
timer.process.reset()
memstats.reset_stats()
if stream is None or shared.state.interrupted or shared.state.skipped:
shared.log.error('FramePack: stream is None')
logger.log.error('FramePack: stream is None')
stream.output_queue.push(('end', None))
return
@ -78,7 +79,7 @@ def worker(
image_encoder = shared.sd_model.image_processor
transformer = shared.sd_model.transformer
sd_models.apply_balanced_offload(shared.sd_model)
pbar = rp.Progress(rp.TextColumn('[cyan]Video'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=shared.console)
pbar = rp.Progress(rp.TextColumn('[cyan]Video'), rp.BarColumn(), rp.MofNCompleteColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=logger.console)
task = pbar.add_task('starting', total=steps * len(latent_paddings))
t_last = time.time()
if not is_f1:
@ -89,7 +90,7 @@ def worker(
pbar.update(task, description=f'text encode section={i}')
t0 = time.time()
torch.manual_seed(seed)
# shared.log.debug(f'FramePack: section={i} prompt="{prompt}"')
# logger.log.debug(f'FramePack: section={i} prompt="{prompt}"')
shared.state.textinfo = 'Text encode'
stream.output_queue.push(('progress', (None, 'Text encoding...')))
sd_models.apply_balanced_offload(shared.sd_model)
@ -110,7 +111,7 @@ def worker(
def latents_encode(input_image, end_image):
jobid = shared.state.begin('VAE Encode')
pbar.update(task, description='image encode')
# shared.log.debug(f'FramePack: image encode init={input_image.shape} end={end_image.shape if end_image is not None else None}')
# logger.log.debug(f'FramePack: image encode init={input_image.shape} end={end_image.shape if end_image is not None else None}')
t0 = time.time()
torch.manual_seed(seed)
stream.output_queue.push(('progress', (None, 'VAE encoding...')))
@ -135,7 +136,7 @@ def worker(
def vision_encode(input_image, end_image):
pbar.update(task, description='vision encode')
# shared.log.debug(f'FramePack: vision encode init={input_image.shape} end={end_image.shape if end_image is not None else None}')
# logger.log.debug(f'FramePack: vision encode init={input_image.shape} end={end_image.shape if end_image is not None else None}')
t0 = time.time()
shared.state.textinfo = 'Vision encode'
stream.output_queue.push(('progress', (None, 'Vision encoding...')))
@ -165,7 +166,7 @@ def worker(
stream.output_queue.push(('end', None))
raise AssertionError('Interrupted...')
if shared.state.paused:
shared.log.debug('Sampling paused')
logger.log.debug('Sampling paused')
while shared.state.paused:
if shared.state.interrupted or shared.state.skipped:
raise AssertionError('Interrupted...')
@ -215,7 +216,7 @@ def worker(
sammplejob = shared.state.begin('Sample')
lattent_padding_loop += 1
# shared.log.trace(f'FramePack: op=sample section={lattent_padding_loop}/{len(latent_paddings)} frames={total_generated_frames}/{num_frames*len(latent_paddings)} window={latent_window_size} size={num_frames}')
# logger.log.trace(f'FramePack: op=sample section={lattent_padding_loop}/{len(latent_paddings)} frames={total_generated_frames}/{num_frames*len(latent_paddings)} window={latent_window_size} size={num_frames}')
if is_f1:
is_first_section, is_last_section = False, False
else:
@ -329,7 +330,7 @@ def worker(
)
except AssertionError:
shared.log.info('FramePack: interrupted')
logger.log.info('FramePack: interrupted')
if shared.opts.keep_incomplete:
save_video(
p=None,
@ -349,11 +350,11 @@ def worker(
metadata=metadata,
)
except Exception as e:
shared.log.error(f'FramePack: {e}')
logger.log.error(f'FramePack: {e}')
errors.display(e, 'FramePack')
sd_models.apply_balanced_offload(shared.sd_model)
stream.output_queue.push(('end', None))
t1 = time.time()
shared.log.info(f'Processed: frames={total_generated_frames} fps={total_generated_frames/(t1-t0):.2f} its={(shared.state.sampling_step)/(t1-t0):.2f} time={t1-t0:.2f} timers={timer.process.dct()} memory={memstats.memory_stats()}')
logger.log.info(f'Processed: frames={total_generated_frames} fps={total_generated_frames/(t1-t0):.2f} its={(shared.state.sampling_step)/(t1-t0):.2f} time={t1-t0:.2f} timers={timer.process.dct()} memory={memstats.memory_stats()}')
shared.state.end(videojob)

View File

@ -6,6 +6,7 @@ import torch
import gradio as gr
from PIL import Image
from modules import shared, processing, timer, paths, extra_networks, progress, ui_video_vlm, call_queue
from modules import logger
from modules.video_models.video_utils import check_av
from modules.framepack import framepack_install # pylint: disable=wrong-import-order
from modules.framepack import framepack_load # pylint: disable=wrong-import-order
@ -41,7 +42,7 @@ def prepare_image(image, resolution):
image = resize_and_center_crop(image, target_height=scaled_h, target_width=scaled_w)
h0, w0, _c = image.shape
shared.log.debug(f'FramePack prepare: input="{w}x{h}" resized="{w0}x{h0}" resolution={resolution} scale={scale_factor}')
logger.log.debug(f'FramePack prepare: input="{w}x{h}" resized="{w0}x{h0}" resolution={resolution} scale={scale_factor}')
return image
@ -60,7 +61,7 @@ def interpolate_prompts(prompts, steps):
for i in range(steps):
prompt_index = int(i / factor)
interpolated_prompts[i] = prompts[prompt_index]
# shared.log.trace(f'FramePack interpolate: section={i} prompt="{interpolated_prompts[i]}"')
# logger.log.trace(f'FramePack interpolate: section={i} prompt="{interpolated_prompts[i]}"')
return interpolated_prompts
@ -108,7 +109,7 @@ def load_model(variant, attention):
def unload_model():
shared.log.debug('FramePack unload')
logger.log.debug('FramePack unload')
framepack_load.unload_model()
yield gr.update(), gr.update(), 'Model unloaded'
@ -149,8 +150,8 @@ def run_framepack(task_id, _ui_state, init_image, end_image, start_weight, end_w
torch.manual_seed(seed)
num_sections = len(framepack_worker.get_latent_paddings(mp4_fps, mp4_interpolate, latent_ws, duration, variant))
num_frames = (latent_ws * 4 - 3) * num_sections + 1
shared.log.info(f'FramePack start: mode={mode} variant="{variant}" frames={num_frames} sections={num_sections} resolution={resolution} seed={seed} duration={duration} teacache={use_teacache} thres={shared.opts.teacache_thresh} cfgzero={use_cfgzero}')
shared.log.info(f'FramePack params: steps={steps} start={start_weight} end={end_weight} vision={vision_weight} scale={cfg_scale} distilled={cfg_distilled} rescale={cfg_rescale} shift={shift}')
logger.log.info(f'FramePack start: mode={mode} variant="{variant}" frames={num_frames} sections={num_sections} resolution={resolution} seed={seed} duration={duration} teacache={use_teacache} thres={shared.opts.teacache_thresh} cfgzero={use_cfgzero}')
logger.log.info(f'FramePack params: steps={steps} start={start_weight} end={end_weight} vision={vision_weight} scale={cfg_scale} distilled={cfg_distilled} rescale={cfg_rescale} shift={shift}')
init_image = prepare_image(init_image, resolution)
if end_image is not None:
end_image = prepare_image(end_image, resolution)

View File

@ -4,6 +4,7 @@
# Attribution-ShareAlike 4.0 International Licence
from modules import logger
import torch
import numpy as np
from tqdm.auto import trange
@ -24,7 +25,7 @@ def test_solver():
_x = torch.linalg.solve(a, b)
return True
except Exception as e:
shared.log.debug(f'FramePack: solver=cpu {e}')
logger.log.debug(f'FramePack: solver=cpu {e}')
return False

View File

@ -5,6 +5,7 @@ import os
from PIL import Image
import gradio as gr
from modules import shared, gr_tempdir, script_callbacks, images
from modules import logger
from modules.infotext import parse, mapping # pylint: disable=unused-import
@ -12,7 +13,7 @@ type_of_gr_update = type(gr.update())
paste_fields: dict[str, dict] = {}
field_names = {}
registered_param_bindings: list[ParamBinding] = []
debug = shared.log.trace if os.environ.get('SD_PASTE_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_PASTE_DEBUG', None) is not None else lambda *args, **kwargs: None
debug('Trace: PASTE')
parse_generation_parameters = parse # compatibility
infotext_to_setting_name_mapping = mapping # compatibility
@ -50,7 +51,7 @@ def image_from_url_text(filedata):
if is_in_right_dir:
filename = filename.rsplit('?', 1)[0]
if not os.path.exists(filename):
shared.log.error(f'Image file not found: {filename}')
logger.log.error(f'Image file not found: {filename}')
image = Image.new('RGB', (512, 512))
image.info['parameters'] = f'Image file not found: {filename}'
return image
@ -59,14 +60,14 @@ def image_from_url_text(filedata):
image.info['parameters'] = geninfo
return image
else:
shared.log.warning(f'File access denied: {filename}')
logger.log.warning(f'File access denied: {filename}')
return None
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if not isinstance(filedata, str):
shared.log.warning('Incorrect filedata received')
logger.log.warning('Incorrect filedata received')
return None
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
@ -88,7 +89,7 @@ def add_paste_fields(tabname: str, init_img: gr.Image | gr.HTML | None, fields:
try:
field_names[tabname] = [f[1] for f in fields if f[1] is not None and not callable(f[1])] if fields is not None else [] # tuple (component, label)
except Exception as e:
shared.log.error(f"Paste fields: tab={tabname} fields={fields} {e}")
logger.log.error(f"Paste fields: tab={tabname} fields={fields} {e}")
field_names[tabname] = []
# Build param_aliases automatically from component labels and elem_ids
@ -261,11 +262,11 @@ def connect_paste(button, local_paste_fields, input_comp, override_settings_comp
if os.path.exists(params_path):
with open(params_path, encoding="utf8") as file:
prompt = file.read()
shared.log.debug(f'Prompt parse: type="params" prompt="{prompt}"')
logger.log.debug(f'Prompt parse: type="params" prompt="{prompt}"')
else:
prompt = ''
else:
shared.log.debug(f'Prompt parse: type="current" prompt="{prompt}"')
logger.log.debug(f'Prompt parse: type="current" prompt="{prompt}"')
params = parse(prompt)
script_callbacks.infotext_pasted_callback(prompt, params)
res = []
@ -306,10 +307,10 @@ def connect_paste(button, local_paste_fields, input_comp, override_settings_comp
res.append(gr.update(value=val))
applied[key] = val
except Exception as e:
shared.log.error(f'Paste param: key="{key}" value="{v}" error="{e}"')
logger.log.error(f'Paste param: key="{key}" value="{v}" error="{e}"')
res.append(gr.update())
list_applied = [{k: v} for k, v in applied.items() if not callable(v) and not callable(k)]
shared.log.debug(f"Prompt restore: apply={list_applied} skip={skipped}")
logger.log.debug(f"Prompt restore: apply={list_applied} skip={skipped}")
return res
if override_settings_component is not None:
@ -338,7 +339,7 @@ def connect_paste(button, local_paste_fields, input_comp, override_settings_comp
vals[param_name] = v
vals_pairs = [f"{k}: {v}" for k, v in vals.items()]
if len(vals_pairs) > 0:
shared.log.debug(f'Settings overrides: {vals_pairs}')
logger.log.debug(f'Settings overrides: {vals_pairs}')
return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=len(vals_pairs) > 0)
local_paste_fields = local_paste_fields + [(override_settings_component, paste_settings)]

View File

@ -1,3 +1,4 @@
from modules import logger
import os
import time
import torch
@ -22,7 +23,7 @@ def install_gguf():
transformers.utils.import_utils._gguf_version = ver # pylint: disable=protected-access
diffusers.utils.import_utils._is_gguf_available = True # pylint: disable=protected-access
diffusers.utils.import_utils._gguf_version = ver # pylint: disable=protected-access
shared.log.debug(f'Load GGUF: version={ver}')
logger.log.debug(f'Load GGUF: version={ver}')
return gguf

View File

@ -3,6 +3,7 @@ from PIL import Image
import gradio as gr
import gradio.processing_utils
from modules import scripts_manager, patches, gr_tempdir
from modules import logger
hijacked = False
@ -35,7 +36,7 @@ def process_kanvas(self, x): # only used when kanvas overrides gr.Image object
# mask = Image.merge("RGB", [alpha, alpha, alpha])
mask = mask.convert('L')
t1 = time.time()
errors.log.debug(f'Kanvas: image={image} mask={mask} time={t1-t0:.2f}')
logger.log.debug(f'Kanvas: image={image} mask={mask} time={t1-t0:.2f}')
if image is None:
return None
if mask is None:

View File

@ -4,10 +4,11 @@ from collections import namedtuple
from pathlib import Path
from PIL import Image, PngImagePlugin
from modules import shared, errors, paths
from modules import logger
Savedfile = namedtuple("Savedfile", ["name"])
debug = errors.log.trace if os.environ.get('SD_PATH_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_PATH_DEBUG', None) is not None else lambda *args, **kwargs: None
def register_tmp_file(gradio, filename):
@ -77,13 +78,13 @@ def pil_to_temp_file(self, img: Image, dir: str, format="png") -> str: # pylint:
use_metadata = True
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
shared.log.debug(f'Created temp folder: path="{folder}"')
logger.log.debug(f'Created temp folder: path="{folder}"')
with tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=folder) as tmp:
name = tmp.name
img.save(name, pnginfo=(metadata if use_metadata else None))
img.already_saved_as = name
size = os.path.getsize(name)
shared.log.debug(f'Save temp: image="{name}" width={img.width} height={img.height} size={size}')
logger.log.debug(f'Save temp: image="{name}" width={img.width} height={img.height} size={size}')
shared.state.image_history += 1
params = ', '.join([f'{k}: {v}' for k, v in img.info.items()])
params = params[12:] if params.startswith('parameters: ') else params
@ -105,7 +106,7 @@ def cleanup_tmpdr():
temp_dir = shared.opts.temp_dir
if temp_dir == "" or not os.path.isdir(temp_dir):
temp_dir = os.path.join(paths.temp_dir, "gradio")
shared.log.debug(f'Temp folder: path="{temp_dir}"')
logger.log.debug(f'Temp folder: path="{temp_dir}"')
if not os.path.isdir(temp_dir):
return
for root, _dirs, files in os.walk(temp_dir, topdown=False):

View File

@ -1,7 +1,9 @@
from modules import logger
import hashlib
import os.path
from rich import progress, errors
from installer import log, console
from installer import console
from modules.logger import log
from modules.json_helpers import readfile, writefile
from modules.paths import data_path
@ -81,7 +83,7 @@ def sha256(filename, title, use_addnet_hash=False):
if use_addnet_hash:
if progress_ok:
try:
with progress.open(filename, 'rb', description=f'[cyan]Calculating hash: [yellow]{filename}', auto_refresh=True, console=shared.console) as f:
with progress.open(filename, 'rb', description=f'[cyan]Calculating hash: [yellow]{filename}', auto_refresh=True, console=logger.console) as f:
sha256_value = addnet_hash_safetensors(f)
except errors.LiveError:
log.warning('Hash: attempting to use function in a thread')

View File

@ -2,12 +2,13 @@
import time
from modules import shared
from modules import logger
from modules.hidiffusion import hidiffusion
def apply(p, model_type):
if model_type not in ['sd', 'sdxl'] and p.hidiffusion:
shared.log.warning(f'HiDiffusion: class={shared.sd_model.__class__.__name__} not supported')
logger.log.warning(f'HiDiffusion: class={shared.sd_model.__class__.__name__} not supported')
return
unapply()
pipe = shared.sd_model.pipe if hasattr(shared.sd_model, 'pipe') else shared.sd_model
@ -34,9 +35,9 @@ def apply(p, model_type):
hidiffusion.apply_hidiffusion(pipe, apply_raunet=shared.opts.hidiffusion_raunet, apply_window_attn=shared.opts.hidiffusion_attn, model_type=model_type, steps=p.steps)
p.extra_generation_params['HiDiffusion'] = f'{shared.opts.hidiffusion_raunet}/{shared.opts.hidiffusion_attn}/{shared.opts.hidiffusion_steps > 0}:{shared.opts.hidiffusion_steps}'
t1 = time.time()
shared.log.debug(f'Applying HiDiffusion: raunet={shared.opts.hidiffusion_raunet} attn={shared.opts.hidiffusion_attn} aggressive={shared.opts.hidiffusion_steps > 0}:{shared.opts.hidiffusion_steps} t1={shared.opts.hidiffusion_t1} t2={shared.opts.hidiffusion_t2} time={t1-t0:.2f} type={shared.sd_model_type} width={p.width} height={p.height}')
logger.log.debug(f'Applying HiDiffusion: raunet={shared.opts.hidiffusion_raunet} attn={shared.opts.hidiffusion_attn} aggressive={shared.opts.hidiffusion_steps > 0}:{shared.opts.hidiffusion_steps} t1={shared.opts.hidiffusion_t1} t2={shared.opts.hidiffusion_t2} time={t1-t0:.2f} type={shared.sd_model_type} width={p.width} height={p.height}')
elif hasattr(pipe, 'unet') and getattr(pipe.unet, 'hidiffusion', False):
shared.log.warning('HiDiffusion: model reload recomended')
logger.log.warning('HiDiffusion: model reload recomended')
def unapply():

View File

@ -7,6 +7,7 @@ import datetime
from collections import deque
import torch
from modules import shared, devices
from modules import logger
class Item:
@ -40,7 +41,7 @@ class History:
@property
def list(self):
shared.log.info(f'History: items={self.count}/{shared.opts.latent_history} size={self.size}')
logger.log.info(f'History: items={self.count}/{shared.opts.latent_history} size={self.size}')
return [item.name for item in self.latents]
@property
@ -51,7 +52,7 @@ class History:
else:
current_index = 0
item = self.latents[current_index]
shared.log.debug(f'History get: index={current_index} time={item.ts} shape={list(item.latent.shape)} dtype={item.latent.dtype} count={self.count}')
logger.log.debug(f'History get: index={current_index} time={item.ts} shape={list(item.latent.shape)} dtype={item.latent.dtype} count={self.count}')
return item.latent.to(devices.device), current_index
def find(self, name):
@ -74,7 +75,7 @@ class History:
def clear(self):
self.latents.clear()
# shared.log.debug(f'History clear: count={self.count}')
# logger.log.debug(f'History clear: count={self.count}')
def load(self):
pass

View File

@ -2,7 +2,7 @@ import sys
import torch
import numpy as np
from PIL import Image
from installer import log
from modules.logger import log
def to_tensor(image: Image.Image | np.ndarray):

View File

@ -3,6 +3,7 @@ from collections import namedtuple
import numpy as np
from PIL import Image, ImageFont, ImageDraw
from modules import shared, script_callbacks
from modules import logger
Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
@ -21,7 +22,7 @@ def check_grid_size(imgs):
mp = round(mp / 1000000)
ok = mp <= shared.opts.img_max_size_mp
if not ok:
shared.log.warning(f'Maximum image size exceded: size={mp} maximum={shared.opts.img_max_size_mp} MPixels')
logger.log.warning(f'Maximum image size exceded: size={mp} maximum={shared.opts.img_max_size_mp} MPixels')
return ok

View File

@ -4,6 +4,7 @@ import json
import piexif
from PIL import Image, ExifTags
from modules import shared, errors, sd_samplers
from modules import logger
from modules.image.watermark import get_watermark
@ -57,7 +58,7 @@ def parse_comfy_metadata(data: dict):
prompt = parse_prompt()
if len(workflow) > 0 or len(prompt) > 0:
parsed = f'App: ComfyUI{workflow}{prompt}'
shared.log.info(f'Image metadata: {parsed}')
logger.log.info(f'Image metadata: {parsed}')
return parsed
return ''
@ -79,7 +80,7 @@ def parse_invoke_metadata(data: dict):
metadata = parse_metadtaa()
if len(metadata) > 0:
parsed = f'App: InvokeAI{metadata}'
shared.log.info(f'Image metadata: {parsed}')
logger.log.info(f'Image metadata: {parsed}')
return parsed
return ''
@ -118,7 +119,7 @@ def read_info_from_image(image: Image.Image, watermark: bool = False) -> tuple[s
try:
exif = piexif.load(items["exif"])
except Exception as e:
shared.log.error(f'Error loading EXIF data: {e}')
logger.log.error(f'Error loading EXIF data: {e}')
exif = {}
for _key, subkey in exif.items():
if isinstance(subkey, dict):
@ -172,18 +173,18 @@ def image_data(data):
image = Image.open(io.BytesIO(data))
image.load()
info, _ = read_info_from_image(image)
errors.log.debug(f'Decoded object: image={image} metadata={info}')
logger.log.debug(f'Decoded object: image={image} metadata={info}')
return info, None
except Exception as e:
err1 = e
try:
if len(data) > 1024 * 10:
errors.log.warning(f'Error decoding object: data too long: {len(data)}')
logger.log.warning(f'Error decoding object: data too long: {len(data)}')
return gr.update(), None
info = data.decode('utf8')
errors.log.debug(f'Decoded object: data={len(data)} metadata={info}')
logger.log.debug(f'Decoded object: data={len(data)} metadata={info}')
return info, None
except Exception as e:
err2 = e
errors.log.error(f'Error decoding object: {err1 or err2}')
logger.log.error(f'Error decoding object: {err1 or err2}')
return gr.update(), None

View File

@ -8,10 +8,11 @@ import hashlib
import datetime
from pathlib import Path
from modules import shared, errors
from modules import logger
debug= os.environ.get('SD_NAMEGEN_DEBUG', None) is not None
debug_log = errors.log.trace if debug else lambda *args, **kwargs: None
debug_log = logger.log.trace if debug else lambda *args, **kwargs: None
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
@ -241,7 +242,7 @@ class FilenameGenerator:
fn = self.replacements.get(k, None)
debug_log(f'Namegen: key={k} value={fn(self)}')
except Exception as e:
shared.log.error(f'Namegen: key={k} {e}')
logger.log.error(f'Namegen: key={k} {e}')
errors.display(e, 'namegen')
for m in re_pattern.finditer(x):
text, pattern = m.groups()
@ -270,7 +271,7 @@ class FilenameGenerator:
except Exception as e:
replacement = None
errors.display(e, 'namegen')
shared.log.error(f'Filename apply pattern: {x} {e}')
logger.log.error(f'Filename apply pattern: {x} {e}')
if replacement == NOTHING:
continue
if replacement is not None:

View File

@ -4,6 +4,7 @@ import numpy as np
import torch
from PIL import Image
from modules import shared, upscaler
from modules import logger
from modules.image import sharpfin
@ -19,7 +20,7 @@ def resize_image(resize_mode: int, im: Image.Image | torch.Tensor, width: int, h
image = (255.0 * image).astype(np.uint8)
image = Image.fromarray(image)
except Exception as e:
shared.log.error(f"Image verification failed: {e}")
logger.log.error(f"Image verification failed: {e}")
return image
def latent(im, scale: float, selected_upscaler: upscaler.UpscalerData):
@ -50,8 +51,8 @@ def resize_image(resize_mode: int, im: Image.Image | torch.Tensor, width: int, h
else:
im = selected_upscaler.scaler.upscale(im, scale, selected_upscaler.name)
else:
shared.log.warning(f"Resize upscaler: invalid={upscaler_name} fallback={selected_upscaler.name}")
shared.log.debug(f"Resize upscaler: available={[u.name for u in shared.sd_upscalers]}")
logger.log.warning(f"Resize upscaler: invalid={upscaler_name} fallback={selected_upscaler.name}")
logger.log.debug(f"Resize upscaler: available={[u.name for u in shared.sd_upscalers]}")
if isinstance(im, Image.Image) and (im.width != w or im.height != h): # probably downsample after upscaler created larger image
im = sharpfin.resize(im, (w, h))
return im
@ -136,7 +137,7 @@ def resize_image(resize_mode: int, im: Image.Image | torch.Tensor, width: int, h
return res
im = verify_image(im)
if not isinstance(im, Image.Image):
shared.log.error(f'Resize image: image={type(im)} invalid type')
logger.log.error(f'Resize image: image={type(im)} invalid type')
return im
if (resize_mode == 0) or ((im.width == width) and (im.height == height)) or (width == 0 and height == 0): # none
res = im.copy()
@ -154,9 +155,9 @@ def resize_image(resize_mode: int, im: Image.Image | torch.Tensor, width: int, h
res = context_aware(im, width, height, context)
else:
res = im.copy()
shared.log.error(f'Invalid resize mode: {resize_mode}')
logger.log.error(f'Invalid resize mode: {resize_mode}')
t1 = time.time()
fn = f'{sys._getframe(2).f_code.co_name}:{sys._getframe(1).f_code.co_name}' # pylint: disable=protected-access
if im.width != width or im.height != height:
shared.log.debug(f'Resize image: source={im.width}:{im.height} target={width}:{height} mode="{shared.resize_modes[resize_mode]}" upscaler="{upscaler_name}" type={output_type} time={t1-t0:.2f} fn={fn}') # pylint: disable=protected-access
logger.log.debug(f'Resize image: source={im.width}:{im.height} target={width}:{height} mode="{shared.resize_modes[resize_mode]}" upscaler="{upscaler_name}" type={output_type} time={t1-t0:.2f} fn={fn}') # pylint: disable=protected-access
return np.array(res) if output_type == 'np' else res

View File

@ -6,13 +6,14 @@ import threading
import piexif.helper
from PIL import Image, PngImagePlugin
from modules import shared, script_callbacks, errors, paths
from modules import logger
from modules.image.grid import check_grid_size
from modules.image.namegen import FilenameGenerator
from modules.image.watermark import set_watermark
debug = errors.log.trace if os.environ.get('SD_PATH_DEBUG', None) is not None else lambda *args, **kwargs: None
debug_save = errors.log.trace if os.environ.get('SD_SAVE_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_PATH_DEBUG', None) is not None else lambda *args, **kwargs: None
debug_save = logger.log.trace if os.environ.get('SD_SAVE_DEBUG', None) is not None else lambda *args, **kwargs: None
def sanitize_filename_part(text, replace_spaces=True):
@ -46,7 +47,7 @@ def atomically_save_image():
try:
image_format = Image.registered_extensions()[extension]
except Exception:
shared.log.warning(f'Save: unknown image format: {extension}')
logger.log.warning(f'Save: unknown image format: {extension}')
image_format = 'JPEG'
exifinfo = (exifinfo or "") if shared.opts.image_metadata else ""
# additional metadata saved in files
@ -54,9 +55,9 @@ def atomically_save_image():
try:
with open(filename_txt, "w", encoding="utf8") as file:
file.write(f"{exifinfo}\n")
shared.log.info(f'Save: text="{filename_txt}" len={len(exifinfo)}')
logger.log.info(f'Save: text="{filename_txt}" len={len(exifinfo)}')
except Exception as e:
shared.log.warning(f'Save failed: description={filename_txt} {e}')
logger.log.warning(f'Save failed: description={filename_txt} {e}')
# actual save
if image_format == 'PNG':
@ -67,7 +68,7 @@ def atomically_save_image():
save_args = { 'compress_level': 6, 'pnginfo': pnginfo_data if shared.opts.image_metadata else None }
elif image_format == 'JPEG':
if image.mode == 'RGBA':
shared.log.warning('Save: removing alpha channel')
logger.log.warning('Save: removing alpha channel')
image = image.convert("RGB")
elif image.mode == 'I;16':
image = image.point(lambda p: p * 0.0038910505836576).convert("L")
@ -97,11 +98,11 @@ def atomically_save_image():
debug_save(f'Save args: {save_args}')
image.save(fn, format=image_format, **save_args)
except Exception as e:
shared.log.error(f'Save failed: file="{fn}" format={image_format} args={save_args} {e}')
logger.log.error(f'Save failed: file="{fn}" format={image_format} args={save_args} {e}')
errors.display(e, 'Image save')
size = os.path.getsize(fn) if os.path.exists(fn) else 0
what = 'grid' if is_grid else 'image'
shared.log.info(f'Save: {what}="{fn}" type={image_format} width={image.width} height={image.height} size={size}')
logger.log.info(f'Save: {what}="{fn}" type={image_format} width={image.width} height={image.height} size={size}')
if shared.opts.save_log_fn != '' and len(exifinfo) > 0:
fn = os.path.join(paths.data_path, shared.opts.save_log_fn)
@ -114,7 +115,7 @@ def atomically_save_image():
entry = { 'id': idx, 'filename': filename, 'time': datetime.datetime.now().isoformat(), 'info': exifinfo }
entries.append(entry)
shared.writefile(entries, fn, mode='w', silent=True)
shared.log.info(f'Save: json="{fn}" records={len(entries)}')
logger.log.info(f'Save: json="{fn}" records={len(entries)}')
shared.state.outputs(filename)
shared.state.end(jobid)
save_queue.task_done()
@ -143,11 +144,11 @@ def save_image(image,
fn = f'{sys._getframe(2).f_code.co_name}:{sys._getframe(1).f_code.co_name}' # pylint: disable=protected-access
debug_save(f'Save: fn={fn}') # pylint: disable=protected-access
if image is None:
shared.log.warning('Image is none')
logger.log.warning('Image is none')
return None, None, None
if isinstance(image, list):
if len(image) > 1:
shared.log.warning(f'Save: images={image} multiple images provided only the first one will be saved')
logger.log.warning(f'Save: images={image} multiple images provided only the first one will be saved')
image = image[0]
if not check_grid_size([image]):
return None, None, None

View File

@ -10,7 +10,7 @@ Non-CUDA devices fall back to PIL/torch.nn.functional automatically.
import sys
import torch
from PIL import Image
from installer import log
from modules.logger import log
from modules.image.convert import to_tensor, to_pil

View File

@ -2,6 +2,7 @@ import random
import numpy as np
from PIL import Image
from modules import shared
from modules import logger
def set_watermark(image, wm_text: str | None = None, wm_image: Image.Image | None = None):
@ -10,7 +11,7 @@ def set_watermark(image, wm_text: str | None = None, wm_image: Image.Image | Non
try:
wm_image = Image.open(wm_image)
except Exception as e:
shared.log.warning(f'Set image watermark: image={wm_image} {e}')
logger.log.warning(f'Set image watermark: image={wm_image} {e}')
return image
if isinstance(wm_image, Image.Image):
if wm_image.mode != 'RGBA':
@ -39,9 +40,9 @@ def set_watermark(image, wm_text: str | None = None, wm_image: Image.Image | Non
b = int(rgba[2] * a + orig[2] * (1 - a))
if not a == 0:
image.putpixel((x+position[0], y+position[1]), (r, g, b))
shared.log.debug(f'Set image watermark: image={wm_image} position={position}')
logger.log.debug(f'Set image watermark: image={wm_image} position={position}')
except Exception as e:
shared.log.warning(f'Set image watermark: image={wm_image} {e}')
logger.log.warning(f'Set image watermark: image={wm_image} {e}')
if shared.opts.image_watermark_enabled and wm_text is not None: # invisible watermark
from imwatermark import WatermarkEncoder
@ -59,9 +60,9 @@ def set_watermark(image, wm_text: str | None = None, wm_image: Image.Image | Non
encoded = encoder.encode(data, wm_method)
image = Image.fromarray(encoded)
image.info = info
shared.log.debug(f'Set invisible watermark: {wm_text} method={wm_method} bits={wm_length}')
logger.log.debug(f'Set invisible watermark: {wm_text} method={wm_method} bits={wm_length}')
except Exception as e:
shared.log.warning(f'Set invisible watermark error: {wm_text} method={wm_method} bits={wm_length} {e}')
logger.log.warning(f'Set invisible watermark error: {wm_text} method={wm_method} bits={wm_length} {e}')
return image

View File

@ -4,13 +4,14 @@ import numpy as np
import filetype
from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
from modules import scripts_manager, shared, processing, images, errors
from modules import logger
from modules.generation_parameters_copypaste import create_override_settings_dict
from modules.ui_common import plaintext_to_html
from modules.memstats import memory_stats
from modules.paths import resolve_output_path
debug = shared.log.trace if os.environ.get('SD_PROCESS_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_PROCESS_DEBUG', None) is not None else lambda *args, **kwargs: None
debug('Trace: PROCESS')
@ -20,45 +21,45 @@ def validate_inputs(inputs):
if filetype.is_image(image):
outputs.append(image)
else:
shared.log.warning(f'Input skip: file="{image}" filetype={filetype.guess(image)}')
logger.log.warning(f'Input skip: file="{image}" filetype={filetype.guess(image)}')
return outputs
def process_batch(p, input_files, input_dir, output_dir, inpaint_mask_dir, args):
# shared.log.debug(f'batch: {input_files}|{input_dir}|{output_dir}|{inpaint_mask_dir}')
# logger.log.debug(f'batch: {input_files}|{input_dir}|{output_dir}|{inpaint_mask_dir}')
processing.fix_seed(p)
image_files = []
if input_files is not None and len(input_files) > 0:
image_files = [f.name for f in input_files]
image_files = validate_inputs(image_files)
shared.log.info(f'Process batch: input images={len(image_files)}')
logger.log.info(f'Process batch: input images={len(image_files)}')
elif os.path.isdir(input_dir):
image_files = [os.path.join(input_dir, f) for f in os.listdir(input_dir)]
image_files = validate_inputs(image_files)
shared.log.info(f'Process batch: input folder="{input_dir}" images={len(image_files)}')
logger.log.info(f'Process batch: input folder="{input_dir}" images={len(image_files)}')
is_inpaint_batch = False
if inpaint_mask_dir and os.path.isdir(inpaint_mask_dir):
inpaint_masks = [os.path.join(inpaint_mask_dir, f) for f in os.listdir(inpaint_mask_dir)]
inpaint_masks = validate_inputs(inpaint_masks)
is_inpaint_batch = len(inpaint_masks) > 0
shared.log.info(f'Process batch: mask folder="{input_dir}" images={len(inpaint_masks)}')
logger.log.info(f'Process batch: mask folder="{input_dir}" images={len(inpaint_masks)}')
p.do_not_save_grid = True
p.do_not_save_samples = True
p.default_prompt = p.prompt
if p.n_iter > 1:
p.n_iter = 1
shared.log.warning(f'Process batch: batch_count={p.n_iter} forced to 1')
logger.log.warning(f'Process batch: batch_count={p.n_iter} forced to 1')
shared.state.job_count = len(image_files) * p.n_iter
if shared.opts.batch_frame_mode: # SBM Frame mode is on, process each image in batch with same seed
window_size = p.batch_size
btcrept = 1
p.seed = [p.seed] * window_size # SBM MONKEYPATCH: Need to change processing to support a fixed seed value.
p.subseed = [p.subseed] * window_size # SBM MONKEYPATCH
shared.log.info(f"Process batch: inputs={len(image_files)} outputs={p.n_iter}x{len(image_files)} parallel={window_size}")
logger.log.info(f"Process batch: inputs={len(image_files)} outputs={p.n_iter}x{len(image_files)} parallel={window_size}")
else: # SBM Frame mode is off, standard operation of repeating same images with sequential seed.
window_size = 1
btcrept = p.batch_size
shared.log.info(f"Process batch: inputs={len(image_files)} outputs={p.n_iter*p.batch_size}x{len(image_files)}")
logger.log.info(f"Process batch: inputs={len(image_files)} outputs={p.n_iter*p.batch_size}x{len(image_files)}")
for i in range(0, len(image_files), window_size):
if shared.state.skipped:
shared.state.skipped = False
@ -86,11 +87,11 @@ def process_batch(p, input_files, input_dir, output_dir, inpaint_mask_dir, args)
p.all_negative_prompts = None
p.all_seeds = None
p.all_subseeds = None
shared.log.debug(f'Process batch: image="{image_file}" prompt={prompt_type} i={i+1}/{len(image_files)}')
logger.log.debug(f'Process batch: image="{image_file}" prompt={prompt_type} i={i+1}/{len(image_files)}')
except UnidentifiedImageError as e:
shared.log.error(f'Process batch: image="{image_file}" {e}')
logger.log.error(f'Process batch: image="{image_file}" {e}')
if len(batch_images) == 0:
shared.log.warning("Process batch: no images found in batch")
logger.log.warning("Process batch: no images found in batch")
continue
batch_images = batch_images * btcrept # Standard mode sends the same image per batchsize.
p.init_images = batch_images
@ -115,12 +116,12 @@ def process_batch(p, input_files, input_dir, output_dir, inpaint_mask_dir, args)
if processed is None:
processed = processing.process_images(p)
except Exception as e:
shared.log.error(f'Process batch: {e}')
logger.log.error(f'Process batch: {e}')
errors.display(e, 'batch')
processed = None
if processed is None or len(processed.images) == 0:
shared.log.warning(f'Process batch: i={i+1}/{len(image_files)} no images processed')
logger.log.warning(f'Process batch: i={i+1}/{len(image_files)} no images processed')
continue
for n, (image, image_file) in enumerate(itertools.zip_longest(processed.images, batch_image_files)):
@ -144,7 +145,7 @@ def process_batch(p, input_files, input_dir, output_dir, inpaint_mask_dir, args)
image.info[k] = v
images.save_image(image, path=output_dir, basename=basename, seed=None, prompt=None, extension=ext, info=info, grid=False, pnginfo_section_name="extras", existing_info=image.info, forced_filename=forced_filename)
processed = scripts_manager.scripts_img2img.after(p, processed, *args)
shared.log.debug(f'Processed: images={len(batch_image_files)} memory={memory_stats()} batch')
logger.log.debug(f'Processed: images={len(batch_image_files)} memory={memory_stats()} batch')
def img2img(id_task: str, state: str, mode: int,
@ -183,11 +184,11 @@ def img2img(id_task: str, state: str, mode: int,
debug(f'img2img: {id_task}')
if shared.sd_model is None:
shared.log.warning('Aborted: op=img model not loaded')
logger.log.warning('Aborted: op=img model not loaded')
return [], '', '', 'Error: model not loaded'
if sampler_index is None:
shared.log.warning('Sampler: invalid')
logger.log.warning('Sampler: invalid')
sampler_index = 0
mode = int(mode)
@ -230,7 +231,7 @@ def img2img(id_task: str, state: str, mode: int,
elif mode == 5: # process batch
pass # handled later
else:
shared.log.error(f'Image processing unknown mode: {mode}')
logger.log.error(f'Image processing unknown mode: {mode}')
if image is not None:
image = ImageOps.exif_transpose(image)

View File

@ -20,6 +20,7 @@ from hashlib import sha256
import functools
from modules import shared, devices, sd_models
from modules import logger
# importing openvino.runtime forces DeprecationWarning to "always"
@ -77,7 +78,7 @@ warned = False
def warn_once(msg):
global warned
if not warned:
shared.log.warning(msg)
logger.log.warning(msg)
warned = True
class OpenVINOGraphModule(torch.nn.Module):
@ -159,7 +160,7 @@ def cached_model_name(model_hash_str, device, args, cache_root, reversed = False
os.makedirs(model_cache_dir, exist_ok=True)
file_name = model_cache_dir + model_hash_str + "_" + device
except OSError as error:
shared.log.error(f"Cache directory {cache_root} cannot be created. Model caching is disabled. Error: {error}")
logger.log.error(f"Cache directory {cache_root} cannot be created. Model caching is disabled. Error: {error}")
return None
inputs_str = ""

View File

@ -13,6 +13,7 @@ from typing import TYPE_CHECKING
from PIL import Image
import transformers
from modules import processing, shared, devices, sd_models, errors, model_quant
from modules import logger
if TYPE_CHECKING:
from diffusers import DiffusionPipeline
@ -75,10 +76,10 @@ def get_adapters():
def get_images(input_images):
output_images = []
if input_images is None or len(input_images) == 0:
shared.log.error('IP adapter: no init images')
logger.log.error('IP adapter: no init images')
return None
if shared.sd_model_type not in ['sd', 'sdxl', 'sd3', 'f1']:
shared.log.error('IP adapter: base model not supported')
logger.log.error('IP adapter: base model not supported')
return None
if isinstance(input_images, str):
from modules.api.api import decode_base64_to_image
@ -102,7 +103,7 @@ def get_images(input_images):
pil_image.load()
output_images.append(pil_image)
else:
shared.log.error(f'IP adapter: unknown input: {image}')
logger.log.error(f'IP adapter: unknown input: {image}')
return output_images
@ -133,9 +134,9 @@ def crop_images(images, crops):
if len(cropped) == len(images[i]):
images[i] = cropped
else:
shared.log.error(f'IP adapter: failed to crop image: source={len(images[i])} faces={len(cropped)}')
logger.log.error(f'IP adapter: failed to crop image: source={len(images[i])} faces={len(cropped)}')
except Exception as e:
shared.log.error(f'IP adapter: failed to crop image: {e}')
logger.log.error(f'IP adapter: failed to crop image: {e}')
if shared.sd_model_type == 'sd3' and len(images) == 1:
return images[0]
return images
@ -148,7 +149,7 @@ def unapply(pipe, unload: bool = False): # pylint: disable=arguments-differ
if hasattr(pipe, 'set_ip_adapter_scale'):
pipe.set_ip_adapter_scale(0)
if unload:
shared.log.debug('IP adapter unload')
logger.log.debug('IP adapter unload')
pipe.unload_ip_adapter()
if hasattr(pipe, 'unet') and pipe.unet is not None:
module = pipe.unet
@ -187,7 +188,7 @@ def load_image_encoder(pipe: DiffusionPipeline, adapter_names: list[str]):
clip_repo = OPEN_ID
clip_subfolder = None
else:
shared.log.error(f'IP adapter: unknown model type: {adapter_name}')
logger.log.error(f'IP adapter: unknown model type: {adapter_name}')
return False
# load image encoder used by ip adapter
@ -200,10 +201,10 @@ def load_image_encoder(pipe: DiffusionPipeline, adapter_names: list[str]):
else:
if clip_subfolder is None:
image_encoder = transformers.CLIPVisionModelWithProjection.from_pretrained(clip_repo, torch_dtype=devices.dtype, cache_dir=shared.opts.hfcache_dir, use_safetensors=True, **offline_config)
shared.log.debug(f'IP adapter load: encoder="{clip_repo}" cls={pipe.image_encoder.__class__.__name__}')
logger.log.debug(f'IP adapter load: encoder="{clip_repo}" cls={pipe.image_encoder.__class__.__name__}')
else:
image_encoder = transformers.CLIPVisionModelWithProjection.from_pretrained(clip_repo, subfolder=clip_subfolder, torch_dtype=devices.dtype, cache_dir=shared.opts.hfcache_dir, use_safetensors=True, **offline_config)
shared.log.debug(f'IP adapter load: encoder="{clip_repo}/{clip_subfolder}" cls={pipe.image_encoder.__class__.__name__}')
logger.log.debug(f'IP adapter load: encoder="{clip_repo}/{clip_subfolder}" cls={pipe.image_encoder.__class__.__name__}')
sd_models.clear_caches()
image_encoder = model_quant.do_post_load_quant(image_encoder, allow=True)
if hasattr(pipe, 'register_modules'):
@ -212,7 +213,7 @@ def load_image_encoder(pipe: DiffusionPipeline, adapter_names: list[str]):
pipe.image_encoder = image_encoder
clip_loaded = f'{clip_repo}/{clip_subfolder}'
except Exception as e:
shared.log.error(f'IP adapter load: encoder="{clip_repo}/{clip_subfolder}" {e}')
logger.log.error(f'IP adapter load: encoder="{clip_repo}/{clip_subfolder}" {e}')
errors.display(e, 'IP adapter: type=encoder')
return False
shared.state.end(jobid)
@ -235,9 +236,9 @@ def load_feature_extractor(pipe):
else:
pipe.feature_extractor = feature_extractor
sd_models.apply_balanced_offload(pipe.feature_extractor)
shared.log.debug(f'IP adapter load: extractor={pipe.feature_extractor.__class__.__name__}')
logger.log.debug(f'IP adapter load: extractor={pipe.feature_extractor.__class__.__name__}')
except Exception as e:
shared.log.error(f'IP adapter load: extractor {e}')
logger.log.error(f'IP adapter load: extractor {e}')
errors.display(e, 'IP adapter: type=extractor')
return False
shared.state.end(jobid)
@ -268,14 +269,14 @@ def parse_params(p: processing.StableDiffusionProcessing, adapters: list, adapte
adapter_masks[i] = mask_processor.preprocess(adapter_masks[i], height=p.height, width=p.width)
adapter_masks = mask_processor.preprocess(adapter_masks, height=p.height, width=p.width)
if adapter_images is None:
shared.log.error('IP adapter: no image provided')
logger.log.error('IP adapter: no image provided')
return [], [], [], [], [], []
if len(adapters) < len(adapter_images):
adapter_images = adapter_images[:len(adapters)]
if len(adapters) < len(adapter_masks):
adapter_masks = adapter_masks[:len(adapters)]
if len(adapter_masks) > 0 and len(adapter_masks) != len(adapter_images):
shared.log.error('IP adapter: image and mask count mismatch')
logger.log.error('IP adapter: image and mask count mismatch')
return [], [], [], [], [], []
adapter_scales = get_scales(adapter_scales, adapter_images)
p.ip_adapter_scales = adapter_scales.copy()
@ -319,7 +320,7 @@ def apply(pipe, p: processing.StableDiffusionProcessing, adapter_names=None, ada
del p.ip_adapter_images
return False
if shared.sd_model_type not in ['sd', 'sdxl', 'sd3', 'f1']:
shared.log.error(f'IP adapter: model={shared.sd_model_type} class={pipe.__class__.__name__} not supported')
logger.log.error(f'IP adapter: model={shared.sd_model_type} class={pipe.__class__.__name__} not supported')
return False
adapter_images, adapter_masks, adapter_scales, adapter_crops, adapter_starts, adapter_ends = parse_params(p, adapters, adapter_scales, adapter_crops, adapter_starts, adapter_ends, adapter_images)
@ -328,7 +329,7 @@ def apply(pipe, p: processing.StableDiffusionProcessing, adapter_names=None, ada
if pipe is None:
return False
if len(adapter_images) == 0:
shared.log.error('IP adapter: no image provided')
logger.log.error('IP adapter: no image provided')
adapters = [] # unload adapter if previously loaded as it will cause runtime errors
if len(adapters) == 0:
unapply(pipe, getattr(p, 'ip_adapter_unload', False))
@ -336,7 +337,7 @@ def apply(pipe, p: processing.StableDiffusionProcessing, adapter_names=None, ada
del p.ip_adapter_images
return False
if not hasattr(pipe, 'load_ip_adapter'):
shared.log.error(f'IP adapter: pipeline not supported: {pipe.__class__.__name__}')
logger.log.error(f'IP adapter: pipeline not supported: {pipe.__class__.__name__}')
return False
if not load_image_encoder(pipe, adapter_names):
@ -383,16 +384,16 @@ def apply(pipe, p: processing.StableDiffusionProcessing, adapter_names=None, ada
from nunchaku.models.ip_adapter.diffusers_adapters import apply_IPA_on_pipe
apply_IPA_on_pipe(pipe, ip_adapter_scale=adapter_scales[0], repo_id=repos)
pipe = sd_models.apply_balanced_offload(pipe)
shared.log.debug(f'IP adapter load: engine=nunchaku scale={adapter_scales[0]} repo="{repos}"')
logger.log.debug(f'IP adapter load: engine=nunchaku scale={adapter_scales[0]} repo="{repos}"')
else:
shared.log.error('IP adapter: Nunchaku only supports single adapter')
logger.log.error('IP adapter: Nunchaku only supports single adapter')
p.task_args['ip_adapter_image'] = crop_images(adapter_images, adapter_crops)
if len(adapter_masks) > 0:
p.cross_attention_kwargs = { 'ip_adapter_masks': adapter_masks }
p.extra_generation_params["IP Adapter"] = ';'.join(ip_str)
t1 = time.time()
shared.log.info(f'IP adapter: {ip_str} image={adapter_images} mask={adapter_masks is not None} time={t1-t0:.2f}')
logger.log.info(f'IP adapter: {ip_str} image={adapter_images} mask={adapter_masks is not None} time={t1-t0:.2f}')
except Exception as e:
shared.log.error(f'IP adapter load: adapters={adapter_names} repo={repos} folders={subfolders} names={names} {e}')
logger.log.error(f'IP adapter load: adapters={adapter_names} repo={repos} folders={subfolders} names={names} {e}')
errors.display(e, 'IP adapter: type=adapter')
return True

View File

@ -5,7 +5,7 @@ import json
from typing import overload, Literal
import fasteners
import orjson
from installer import log
from modules.logger import log
locking_available = True # used by file read/write locking

View File

@ -7,7 +7,7 @@ from torch.hub import download_url_to_file, get_dir
from PIL import Image
from modules import devices
from modules.image import convert
from installer import log
from modules.logger import log
LAMA_MODEL_URL = "https://github.com/enesmsahin/simple-lama-inpainting/releases/download/v0.1.0/big-lama.pt"

View File

@ -1,3 +1,4 @@
from modules import logger
from modules import shared, sd_models, devices, attention
from .linfusion import LinFusion
from .attention import GeneralizedLinearAttention
@ -29,18 +30,18 @@ def apply(pipeline, pretrained: bool = True):
else:
model_path = detect(pipeline)
if model_path is None:
shared.log.error('LinFusion: unsupported model type')
logger.log.error('LinFusion: unsupported model type')
return
applied = LinFusion.from_pretrained(model_path, cache_dir=shared.opts.hfcache_dir).to(device=pipeline.unet.device, dtype=pipeline.unet.dtype)
applied.mount_to(unet=pipeline.unet)
shared.log.info(f'Applying LinFusion: class={applied.__class__.__name__} model="{model_path}" modules={len(applied.modules_dict)}')
logger.log.info(f'Applying LinFusion: class={applied.__class__.__name__} model="{model_path}" modules={len(applied.modules_dict)}')
def unapply(pipeline):
global applied # pylint: disable=global-statement
if applied is None:
return
# shared.log.debug('LinFusion: unapply')
# logger.log.debug('LinFusion: unapply')
attention.set_diffusers_attention(pipeline)
devices.torch_gc()
applied = None

View File

@ -7,13 +7,27 @@ import logging
import warnings
import urllib3
from modules import timer, errors
from modules import logger
try:
import math
cores = os.cpu_count()
affinity = len(os.sched_getaffinity(0)) # pylint: disable=no-member
threads = torch.get_num_threads()
if threads < (affinity / 2):
torch.set_num_threads(math.floor(affinity / 2))
threads = torch.get_num_threads()
logger.log.debug(f'System: cores={cores} affinity={affinity} threads={threads}')
except Exception:
pass
initialized = False
errors.install()
logging.getLogger("DeepSpeed").disabled = True
timer.startup.record("loader")
errors.log.debug('Initializing: libraries')
logger.log.debug('Initializing: libraries')
np = None
try:
@ -32,8 +46,8 @@ try:
return npwarn_decorator
np._no_nep50_warning = getattr(np, '_no_nep50_warning', dummy_npwarn_decorator_factory) # pylint: disable=protected-access
except Exception as e:
errors.log.error(f'Loader: numpy=={np.__version__ if np is not None else None} {e}')
errors.log.error('Please restart the app to fix this issue')
logger.log.error(f'Loader: numpy=={np.__version__ if np is not None else None} {e}')
logger.log.error('Please restart the app to fix this issue')
sys.exit(1)
timer.startup.record("numpy")
@ -41,8 +55,8 @@ scipy = None
try:
import scipy # pylint: disable=W0611,C0411
except Exception as e:
errors.log.error(f'Loader: scipy=={scipy.__version__ if scipy is not None else None} {e}')
errors.log.error('Please restart the app to fix this issue')
logger.log.error(f'Loader: scipy=={scipy.__version__ if scipy is not None else None} {e}')
logger.log.error('Please restart the app to fix this issue')
sys.exit(1)
timer.startup.record("scipy")
@ -55,17 +69,17 @@ except Exception:
import torch # pylint: disable=C0411
if torch.__version__.startswith('2.5.0'):
errors.log.warning(f'Disabling cuDNN for SDP on torch={torch.__version__}')
logger.log.warning(f'Disabling cuDNN for SDP on torch={torch.__version__}')
torch.backends.cuda.enable_cudnn_sdp(False)
try:
import intel_extension_for_pytorch as ipex # pylint: disable=import-error,unused-import
errors.log.debug(f'Load IPEX=={ipex.__version__}')
logger.log.debug(f'Load IPEX=={ipex.__version__}')
except Exception:
pass
try:
pass # pylint: disable=unused-import,ungrouped-imports
except Exception:
errors.log.warning('Loader: torch is not built with distributed support')
logger.log.warning('Loader: torch is not built with distributed support')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@ -74,10 +88,10 @@ torchvision = None
try:
import torchvision # pylint: disable=W0611,C0411
except Exception as e:
errors.log.error(f'Loader: torchvision=={torchvision.__version__ if "torchvision" in sys.modules else None} {e}')
logger.log.error(f'Loader: torchvision=={torchvision.__version__ if "torchvision" in sys.modules else None} {e}')
if '_no_nep' in str(e):
errors.log.error('Loaded versions of packaged are not compatible')
errors.log.error('Please restart the app to fix this issue')
logger.log.error('Loaded versions of packaged are not compatible')
logger.log.error('Please restart the app to fix this issue')
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
logging.getLogger("pytorch_lightning").disabled = True
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
@ -92,7 +106,7 @@ try:
torch._dynamo.config.verbose = False # pylint: disable=protected-access
torch._dynamo.config.suppress_errors = True # pylint: disable=protected-access
except Exception as e:
errors.log.warning(f'Torch logging: {e}')
logger.log.warning(f'Torch logging: {e}')
if ".dev" in torch.__version__ or "+git" in torch.__version__:
torch.__long_version__ = torch.__version__
torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
@ -128,7 +142,7 @@ try:
onnxruntime.set_default_logger_verbosity(1)
onnxruntime.disable_telemetry_events()
except Exception as e:
errors.log.warning(f'Torch onnxruntime: {e}')
logger.log.warning(f'Torch onnxruntime: {e}')
timer.startup.record("onnx")
timer.startup.record("fastapi")
@ -154,8 +168,8 @@ try:
diffusers.loaders.single_file.logging.tqdm = partial(tqdm, unit='C')
timer.startup.record("diffusers")
except Exception as e:
errors.log.error(f'Loader: diffusers=={diffusers.__version__ if "diffusers" in sys.modules else None} {e}')
errors.log.error('Please restart re-run the installer')
logger.log.error(f'Loader: diffusers=={diffusers.__version__ if "diffusers" in sys.modules else None} {e}')
logger.log.error('Please restart re-run the installer')
sys.exit(1)
try:
@ -196,18 +210,6 @@ def get_packages():
"hub": huggingface_hub.__version__,
}
try:
import math
cores = os.cpu_count()
affinity = len(os.sched_getaffinity(0)) # pylint: disable=no-member
threads = torch.get_num_threads()
if threads < (affinity / 2):
torch.set_num_threads(math.floor(affinity / 2))
threads = torch.get_num_threads()
errors.log.debug(f'System: cores={cores} affinity={affinity} threads={threads}')
except Exception:
pass
try:
import torchvision.transforms.functional_tensor # pylint: disable=unused-import, ungrouped-imports
except ImportError:
@ -223,7 +225,7 @@ def deprecate_warn(*args, **kwargs):
try:
deprecate_diffusers(*args, **kwargs)
except Exception as e:
errors.log.warning(f'Deprecation: {e}')
logger.log.warning(f'Deprecation: {e}')
diffusers.utils.deprecation_utils.deprecate = deprecate_warn
diffusers.utils.deprecate = deprecate_warn
@ -238,5 +240,5 @@ class VersionString(str): # support both string and tuple for version check
torch.__version__ = VersionString(torch.__version__)
errors.log.info(f'Torch: torch=={torch.__version__} torchvision=={torchvision.__version__}')
errors.log.info(f'Packages: diffusers=={diffusers.__version__} transformers=={transformers.__version__} accelerate=={accelerate.__version__} gradio=={gradio.__version__} pydantic=={pydantic.__version__} numpy=={np.__version__} cv2=={cv2.__version__}')
logger.log.info(f'Torch: torch=={torch.__version__} torchvision=={torchvision.__version__}')
logger.log.info(f'Packages: diffusers=={diffusers.__version__} transformers=={transformers.__version__} accelerate=={accelerate.__version__} gradio=={gradio.__version__} pydantic=={pydantic.__version__} numpy=={np.__version__} cv2=={cv2.__version__}')

View File

@ -1,5 +1,6 @@
import json
import modules.errors as errors
from modules import logger
localizations = {}
@ -31,7 +32,7 @@ def localization_js(current_localization_name):
with open(fn, encoding="utf8") as file:
data = json.load(file)
except Exception as e:
errors.log.error(f"Error loading localization from {fn}:")
logger.log.error(f"Error loading localization from {fn}:")
errors.display(e, 'localization')
return f"var localization = {json.dumps(data)}\n"

237
modules/logger.py Normal file
View File

@ -0,0 +1,237 @@
import os
import sys
import logging
import socket
import time
from functools import partial, partialmethod
from logging.handlers import RotatingFileHandler
# rich imports
from rich.theme import Theme
from rich.logging import RichHandler
from rich.console import Console
from rich.padding import Padding
from rich.segment import Segment
from rich import box
from rich import print as rprint
from rich.pretty import install as pretty_install
from rich.traceback import install as traceback_install
# Global logger and console instances
log = logging.getLogger("sd")
console = None
log_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'sdnext.log') # adjusted path relative to modules/
hostname = socket.gethostname()
log_rolled = False
def str_to_bool(val: str | bool | None) -> bool | None:
if isinstance(val, str):
if val.strip() and val.strip().lower() in ("1", "true"):
return True
return False
return val
def get_console():
return console
def get_log():
return log
def install_traceback(suppress: list = None):
if suppress is None:
suppress = []
width = os.environ.get("SD_TRACEWIDTH", console.width if console else None)
if width is not None:
width = int(width)
log.excepthook = traceback_install(
console=console,
extra_lines=int(os.environ.get("SD_TRACELINES", 1)),
max_frames=int(os.environ.get("SD_TRACEFRAMES", 16)),
width=width,
word_wrap=str_to_bool(os.environ.get("SD_TRACEWRAP", False)),
indent_guides=str_to_bool(os.environ.get("SD_TRACEINDENT", False)),
show_locals=str_to_bool(os.environ.get("SD_TRACELOCALS", False)),
locals_hide_dunder=str_to_bool(os.environ.get("SD_TRACEDUNDER", True)),
locals_hide_sunder=str_to_bool(os.environ.get("SD_TRACESUNDER", None)),
suppress=suppress,
)
pretty_install(console=console)
_log_config = {'debug': False, 'trace': False, 'log_filename': None}
def setup_logging(debug=None, trace=None, log_filename=None):
global log_file, console, log_rolled, _log_config # pylint: disable=global-statement
if debug is not None: _log_config['debug'] = debug
if trace is not None: _log_config['trace'] = trace
if log_filename is not None: _log_config['log_filename'] = log_filename
debug = _log_config['debug']
trace = _log_config['trace']
log_filename = _log_config['log_filename']
class RingBuffer(logging.StreamHandler):
def __init__(self, capacity):
super().__init__()
self.capacity = capacity
self.buffer = []
self.formatter = logging.Formatter('{ "asctime":"%(asctime)s", "created":%(created)f, "facility":"%(name)s", "pid":%(process)d, "tid":%(thread)d, "level":"%(levelname)s", "module":"%(module)s", "func":"%(funcName)s", "msg":"%(message)s" }')
def emit(self, record):
if record.msg is not None and not isinstance(record.msg, str):
record.msg = str(record.msg)
try:
record.msg = record.msg.replace('"', "'")
except Exception:
pass
msg = self.format(record)
self.buffer.append(msg)
if len(self.buffer) > self.capacity:
self.buffer.pop(0)
def get(self):
return self.buffer
class LogFilter(logging.Filter):
def __init__(self):
super().__init__()
def filter(self, record):
return len(record.getMessage()) > 2
def override_padding(self, console, options): # pylint: disable=redefined-outer-name
style = console.get_style(self.style)
width = options.max_width
self.left = 0
render_options = options.update_width(width - self.left - self.right)
if render_options.height is not None:
render_options = render_options.update_height(height=render_options.height - self.top - self.bottom)
lines = console.render_lines(self.renderable, render_options, style=style, pad=False)
_Segment = Segment
left = _Segment(" " * self.left, style) if self.left else None
right = [_Segment.line()]
blank_line: list[Segment] | None = None
if self.top:
blank_line = [_Segment(f'{" " * width}\\n', style)]
yield from blank_line * self.top
if left:
for line in lines:
yield left
yield from line
yield from right
else:
for line in lines:
yield from line
yield from right
if self.bottom:
blank_line = blank_line or [_Segment(f'{" " * width}\\n', style)]
yield from blank_line * self.bottom
if log_filename:
log_file = log_filename
logging.TRACE = 25
logging.addLevelName(logging.TRACE, 'TRACE')
logging.Logger.trace = partialmethod(logging.Logger.log, logging.TRACE)
logging.trace = partial(logging.log, logging.TRACE)
def exception_hook(e: Exception, suppress=None):
from rich.traceback import Traceback
if suppress is None:
suppress = []
tb = Traceback.from_exception(type(e), e, e.__traceback__, show_locals=False, max_frames=16, extra_lines=1, suppress=suppress, theme="ansi_dark", word_wrap=False, width=console.width)
# print-to-console, does not get printed-to-file
exc_type, exc_value, exc_traceback = sys.exc_info()
log.excepthook(exc_type, exc_value, exc_traceback)
# print-to-file, temporarily disable-console-handler
for handler in log.handlers.copy():
if isinstance(handler, RichHandler):
log.removeHandler(handler)
with console.capture() as capture:
console.print(tb)
log.critical(capture.get())
log.addHandler(rh)
log.traceback = exception_hook
level = logging.DEBUG if (debug or trace) else logging.INFO
log.setLevel(logging.DEBUG) # log to file is always at level debug for facility `sd`
log.print = rprint
theme = Theme({
"traceback.border": "black",
"inspect.value.border": "black",
"traceback.border.syntax_error": "dark_red",
"logging.level.info": "blue_violet",
"logging.level.debug": "purple4",
"logging.level.trace": "dark_blue",
})
Padding.__rich_console__ = override_padding
box.ROUNDED = box.SIMPLE
console = Console(
log_time=True,
log_time_format='%H:%M:%S-%f',
tab_size=4,
soft_wrap=True,
safe_box=True,
theme=theme,
)
logging.basicConfig(level=logging.ERROR, format='%(asctime)s | %(name)s | %(levelname)s | %(module)s | %(message)s', handlers=[logging.NullHandler()]) # redirect default logger to null
pretty_install(console=console)
install_traceback()
while log.hasHandlers() and len(log.handlers) > 0:
log.removeHandler(log.handlers[0])
log_filter = LogFilter()
# handlers
rh = RichHandler(show_time=True, omit_repeated_times=False, show_level=True, show_path=False, markup=False, rich_tracebacks=True, log_time_format='%H:%M:%S-%f', level=level, console=console)
if trace:
rh.formatter = logging.Formatter('[%(module)s][%(pathname)s:%(lineno)d] %(message)s')
rh.addFilter(log_filter)
rh.setLevel(level)
log.addHandler(rh)
fh = RotatingFileHandler(log_file, maxBytes=32*1024*1024, backupCount=9, encoding='utf-8', delay=True) # 10MB default for log rotation
if trace:
fh.formatter = logging.Formatter(f'%(asctime)s | {hostname} | %(name)s | %(levelname)s | %(module)s | | %(pathname)s:%(lineno)d | %(message)s')
else:
fh.formatter = logging.Formatter(f'%(asctime)s | {hostname} | %(name)s | %(levelname)s | %(module)s | %(message)s')
fh.addFilter(log_filter)
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
if not log_rolled and debug and not log_filename:
try:
fh.doRollover()
except Exception:
pass
log_rolled = True
rb = RingBuffer(100) # 100 entries default in log ring buffer
rb.addFilter(log_filter)
rb.setLevel(level)
log.addHandler(rb)
log.buffer = rb.buffer
def quiet_log(quiet: bool=False, *args, **kwargs): # pylint: disable=redefined-outer-name,keyword-arg-before-vararg
if not quiet:
log.debug(*args, **kwargs)
log.quiet = quiet_log
# overrides
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("httpx").setLevel(logging.ERROR)
logging.getLogger("diffusers").setLevel(logging.ERROR)
logging.getLogger("torch").setLevel(logging.ERROR)
logging.getLogger("ControlNet").handlers = log.handlers
logging.getLogger("lycoris").handlers = log.handlers

View File

@ -4,10 +4,11 @@ import numpy as np
from modules.lora import networks, lora_overrides, lora_load, lora_diffusers
from modules.lora import lora_common as l
from modules import extra_networks, shared, sd_models
from modules import logger
debug = os.environ.get('SD_LORA_DEBUG', None) is not None
debug_log = shared.log.trace if debug else lambda *args, **kwargs: None
debug_log = logger.log.trace if debug else lambda *args, **kwargs: None
def get_stepwise(param, step, steps): # from https://github.com/cheald/sd-webui-loractl/blob/master/loractl/lib/utils.py
@ -53,7 +54,7 @@ def prompt(p):
all_tags = list(set(all_tags))
all_tags = [t for t in all_tags if t not in p.prompt]
if len(all_tags) > 0:
shared.log.debug(f"Network load: type=LoRA tags={all_tags} max={shared.opts.lora_apply_tags} apply")
logger.log.debug(f"Network load: type=LoRA tags={all_tags} max={shared.opts.lora_apply_tags} apply")
all_tags = ', '.join(all_tags)
p.extra_generation_params["LoRA tags"] = all_tags
if '_tags_' in p.prompt:
@ -236,7 +237,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
if has_changed:
jobid = shared.state.begin('LoRA')
if len(l.previously_loaded_networks) > 0:
shared.log.info(f'Network unload: type=LoRA networks={[n.name for n in l.previously_loaded_networks]} mode={"fuse" if shared.opts.lora_fuse_native else "backup"}')
logger.log.info(f'Network unload: type=LoRA networks={[n.name for n in l.previously_loaded_networks]} mode={"fuse" if shared.opts.lora_fuse_native else "backup"}')
networks.network_deactivate(include, exclude)
networks.network_activate(include, exclude)
debug_log(f'Network change: type=LoRA previous={[n.name for n in l.previously_loaded_networks]} current={[n.name for n in l.loaded_networks]}')
@ -248,7 +249,7 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
infotext(p)
prompt(p)
if has_changed and len(include) == 0: # print only once
shared.log.info(f'Network load: type=LoRA networks={[n.name for n in l.loaded_networks]} method={load_method} mode={"fuse" if shared.opts.lora_fuse_native else "backup"} te={te_multipliers} unet={unet_multipliers} time={l.timer.summary}')
logger.log.info(f'Network load: type=LoRA networks={[n.name for n in l.loaded_networks]} method={load_method} mode={"fuse" if shared.opts.lora_fuse_native else "backup"} te={te_multipliers} unet={unet_multipliers} time={l.timer.summary}')
def deactivate(self, p, force=False):
if len(lora_diffusers.diffuser_loaded) > 0 and (shared.opts.lora_force_reload or force):
@ -256,8 +257,8 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
if force:
networks.network_deactivate()
if self.active and l.debug:
shared.log.debug(f"Network end: type=LoRA time={l.timer.summary}")
logger.log.debug(f"Network end: type=LoRA time={l.timer.summary}")
if self.errors:
for k, v in self.errors.items():
shared.log.error(f'Network: type=LoRA name="{k}" errors={v}')
logger.log.error(f'Network: type=LoRA name="{k}" errors={v}')
self.errors.clear()

View File

@ -5,6 +5,7 @@ import diffusers.models.lora
from modules.errorlimiter import ErrorLimiter
from modules.lora import lora_common as l
from modules import shared, devices, errors, model_quant
from modules import logger
bnb = None
@ -137,7 +138,7 @@ def network_calc_weights(self: torch.nn.Conv2d | torch.nn.Linear | torch.nn.Grou
except RuntimeError as e:
l.extra_network_lora.errors[net.name] = l.extra_network_lora.errors.get(net.name, 0) + 1
module_name = net.modules.get(network_layer_name, None)
shared.log.error(f'Network: type=LoRA name="{net.name}" module="{module_name}" layer="{network_layer_name}" apply weight: {e}')
logger.log.error(f'Network: type=LoRA name="{net.name}" module="{module_name}" layer="{network_layer_name}" apply weight: {e}')
if l.debug:
errors.display(e, 'LoRA')
raise RuntimeError('LoRA apply weight') from e
@ -163,7 +164,7 @@ def network_add_weights(self: torch.nn.Conv2d | torch.nn.Linear | torch.nn.Group
# TODO lora: maybe force imediate quantization
# weight._quantize(devices.device) / weight.to(device=device)
except Exception as e:
shared.log.error(f'Network load: type=LoRA quant=bnb cls={self.__class__.__name__} type={self.quant_type} blocksize={self.blocksize} state={vars(self.quant_state)} weight={self.weight} bias={lora_weights} {e}')
logger.log.error(f'Network load: type=LoRA quant=bnb cls={self.__class__.__name__} type={self.quant_type} blocksize={self.blocksize} state={vars(self.quant_state)} weight={self.weight} bias={lora_weights} {e}')
elif not bias and hasattr(self, "sdnq_dequantizer"):
try:
from modules.sdnq import sdnq_quantize_layer
@ -218,14 +219,14 @@ def network_add_weights(self: torch.nn.Conv2d | torch.nn.Linear | torch.nn.Group
weight = None
del dequant_weight
except Exception as e:
shared.log.error(f'Network load: type=LoRA quant=sdnq cls={self.__class__.__name__} weight={self.weight} lora_weights={lora_weights} {e}')
logger.log.error(f'Network load: type=LoRA quant=sdnq cls={self.__class__.__name__} weight={self.weight} lora_weights={lora_weights} {e}')
else:
try:
new_weight = model_weights.to(devices.device) + lora_weights.to(devices.device)
except Exception as e:
shared.log.warning(f'Network load: {e}')
logger.log.warning(f'Network load: {e}')
if 'The size of tensor' in str(e):
shared.log.error(f'Network load: type=LoRA model={shared.sd_model.__class__.__name__} incompatible lora shape')
logger.log.error(f'Network load: type=LoRA model={shared.sd_model.__class__.__name__} incompatible lora shape')
new_weight = model_weights
else:
new_weight = model_weights + lora_weights # try without device cast

View File

@ -2,6 +2,7 @@ import os
import time
import diffusers
from modules import shared, errors
from modules import logger
from modules.lora import network
from modules.lora import lora_common as l
@ -11,7 +12,7 @@ diffuser_scales = []
def load_per_module(sd_model: diffusers.DiffusionPipeline, filename: str, adapter_name: str, lora_modules: list[str]):
shared.log.debug(f'LoRA load: modules={lora_modules}')
logger.log.debug(f'LoRA load: modules={lora_modules}')
try:
state_dict = sd_model.lora_state_dict(filename)
if isinstance(state_dict, tuple) and len(state_dict) == 2:
@ -19,7 +20,7 @@ def load_per_module(sd_model: diffusers.DiffusionPipeline, filename: str, adapte
else:
network_alphas = {}
except Exception as e:
shared.log.error(f'LoRA load: {e}')
logger.log.error(f'LoRA load: {e}')
if l.debug:
errors.display(e, "LoRA")
return None
@ -28,24 +29,24 @@ def load_per_module(sd_model: diffusers.DiffusionPipeline, filename: str, adapte
if hasattr(sd_model, 'transformer') and sd_model.transformer is not None:
sd_model.load_lora_into_transformer(state_dict, transformer=sd_model.transformer, adapter_name=adapter_name)
else:
shared.log.warning(f'LoRA load: requested={lora_module} missing')
logger.log.warning(f'LoRA load: requested={lora_module} missing')
elif lora_module == 'transformer_2':
if hasattr(sd_model, 'transformer_2') and sd_model.transformer_2 is not None:
sd_model.load_lora_into_transformer(state_dict, transformer=sd_model.transformer_2, adapter_name=adapter_name)
else:
shared.log.warning(f'LoRA load: requested={lora_module} missing')
logger.log.warning(f'LoRA load: requested={lora_module} missing')
elif lora_module == 'unet':
if hasattr(sd_model, 'unet') and sd_model.unet is not None:
sd_model.load_lora_into_unet(state_dict, network_alphas, unet=sd_model.unet, adapter_name=adapter_name)
else:
shared.log.warning(f'LoRA load: requested={lora_module} missing')
logger.log.warning(f'LoRA load: requested={lora_module} missing')
elif lora_module == 'text_encoder' or lora_module == 'te':
if hasattr(sd_model, 'text_encoder') and sd_model.text_encoder is not None:
sd_model.load_lora_into_text_encoder(state_dict, network_alphas, text_encoder=sd_model.text_encoder, adapter_name=adapter_name)
else:
shared.log.warning(f'LoRA load: requested={lora_module} missing')
logger.log.warning(f'LoRA load: requested={lora_module} missing')
else:
shared.log.warning(f'LoRA load: requested={lora_module} unknown')
logger.log.warning(f'LoRA load: requested={lora_module} unknown')
return adapter_name
@ -53,9 +54,9 @@ def load_diffusers(name: str, network_on_disk: network.NetworkOnDisk, lora_scale
t0 = time.time()
name = name.replace(".", "_")
sd_model: diffusers.DiffusionPipeline = getattr(shared.sd_model, "pipe", shared.sd_model)
shared.log.debug(f'Network load: type=LoRA name="{name}" file="{network_on_disk.filename}" detected={network_on_disk.sd_version} method=diffusers scale={lora_scale} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers}')
logger.log.debug(f'Network load: type=LoRA name="{name}" file="{network_on_disk.filename}" detected={network_on_disk.sd_version} method=diffusers scale={lora_scale} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers}')
if not hasattr(sd_model, 'load_lora_weights'):
shared.log.error(f'Network load: type=LoRA class={sd_model.__class__} does not implement load lora')
logger.log.error(f'Network load: type=LoRA class={sd_model.__class__} does not implement load lora')
return None
try:
if lora_module is not None and isinstance(lora_module, list) and len(lora_module) > 0:
@ -68,11 +69,11 @@ def load_diffusers(name: str, network_on_disk: network.NetworkOnDisk, lora_scale
pass
else:
if 'following keys have not been correctly renamed' in str(e):
shared.log.error(f'Network load: type=LoRA name="{name}" diffusers unsupported format')
logger.log.error(f'Network load: type=LoRA name="{name}" diffusers unsupported format')
elif 'object has no attribute' in str(e):
shared.log.error(f'Network load: type=LoRA name="{name}" diffusers empty module')
logger.log.error(f'Network load: type=LoRA name="{name}" diffusers empty module')
else:
shared.log.error(f'Network load: type=LoRA name="{name}" {e}')
logger.log.error(f'Network load: type=LoRA name="{name}" {e}')
if l.debug:
errors.display(e, "LoRA")
return None
@ -82,7 +83,7 @@ def load_diffusers(name: str, network_on_disk: network.NetworkOnDisk, lora_scale
list_adapters = sd_model.get_list_adapters()
list_adapters = [adapter for adapters in list_adapters.values() for adapter in adapters]
if name not in list_adapters:
shared.log.error(f'Network load: type=LoRA name="{name}" adapters={list_adapters} not loaded')
logger.log.error(f'Network load: type=LoRA name="{name}" adapters={list_adapters} not loaded')
else:
diffuser_loaded.append(name)
diffuser_scales.append(lora_scale)

View File

@ -7,6 +7,7 @@ from safetensors.torch import save_file
import gradio as gr
from rich import progress as rp
from modules import shared, devices
from modules import logger
from modules.ui_common import create_refresh_button
from modules.call_queue import wrap_gradio_gpu_call
@ -118,26 +119,26 @@ def make_meta(fn, maxrank, rank_ratio):
def make_lora(fn, maxrank, auto_rank, rank_ratio, modules, overwrite):
if not shared.sd_loaded:
msg = "LoRA extract: model not loaded"
shared.log.warning(msg)
logger.log.warning(msg)
yield msg
return
if loaded_lora() == "":
msg = "LoRA extract: no LoRA detected"
shared.log.warning(msg)
logger.log.warning(msg)
yield msg
return
if not fn:
msg = "LoRA extract: target filename required"
shared.log.warning(msg)
logger.log.warning(msg)
yield msg
return
t0 = time.time()
maxrank = int(maxrank)
rank_ratio = 1 if not auto_rank else rank_ratio
shared.log.debug(f'LoRA extract: modules={modules} maxrank={maxrank} auto={auto_rank} ratio={rank_ratio} fn="{fn}"')
logger.log.debug(f'LoRA extract: modules={modules} maxrank={maxrank} auto={auto_rank} ratio={rank_ratio} fn="{fn}"')
jobid = shared.state.begin('LoRA extract')
with rp.Progress(rp.TextColumn('[cyan]LoRA extract'), rp.BarColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=shared.console) as progress:
with rp.Progress(rp.TextColumn('[cyan]LoRA extract'), rp.BarColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=logger.console) as progress:
if 'te' in modules and getattr(shared.sd_model, 'text_encoder', None) is not None:
modules = shared.sd_model.text_encoder.named_modules()
@ -222,25 +223,25 @@ def make_lora(fn, maxrank, auto_rank, rank_ratio, modules, overwrite):
os.remove(fn)
else:
msg = f'LoRA extract: fn="{fn}" file exists'
shared.log.warning(msg)
logger.log.warning(msg)
yield msg
return
shared.state.end(jobid)
meta = make_meta(fn, maxrank, rank_ratio)
shared.log.debug(f'LoRA metadata: {meta}')
logger.log.debug(f'LoRA metadata: {meta}')
try:
save_file(tensors=lora_state_dict, metadata=meta, filename=fn)
except Exception as e:
msg = f'LoRA extract error: fn="{fn}" {e}'
shared.log.error(msg)
logger.log.error(msg)
yield msg
return
t5 = time.time()
shared.log.debug(f'LoRA extract: time={t5-t0:.2f} te1={t1-t0:.2f} te2={t2-t1:.2f} unet={t3-t2:.2f} save={t5-t4:.2f}')
logger.log.debug(f'LoRA extract: time={t5-t0:.2f} te1={t1-t0:.2f} te2={t2-t1:.2f} unet={t3-t2:.2f} save={t5-t4:.2f}')
keys = list(lora_state_dict.keys())
msg = f'LoRA extract: fn="{fn}" keys={len(keys)}'
shared.log.info(msg)
logger.log.info(msg)
yield msg

View File

@ -2,6 +2,7 @@ import os
import time
import concurrent
from modules import shared, errors, sd_models, sd_models_compile, files_cache
from modules import logger
from modules.lora import network, lora_overrides, lora_convert, lora_diffusers
from modules.lora import lora_common as l
@ -22,18 +23,18 @@ def lora_dump(lora, dct):
sd_model = getattr(shared.sd_model, "pipe", shared.sd_model)
ty = shared.sd_model_type
cn = sd_model.__class__.__name__
shared.log.trace(f'LoRA dump: type={ty} model={cn} fn="{lora}"')
logger.log.trace(f'LoRA dump: type={ty} model={cn} fn="{lora}"')
bn = os.path.splitext(os.path.basename(lora))[0]
fn = os.path.join(tempfile.gettempdir(), f'LoRA-{ty}-{cn}-{bn}.txt')
with open(fn, 'w', encoding='utf8') as f:
keys = sorted(dct.keys())
shared.log.trace(f'LoRA dump: type=LoRA fn="{fn}" keys={len(keys)}')
logger.log.trace(f'LoRA dump: type=LoRA fn="{fn}" keys={len(keys)}')
for line in keys:
f.write(line + "\n")
fn = os.path.join(tempfile.gettempdir(), f'Model-{ty}-{cn}.txt')
with open(fn, 'w', encoding='utf8') as f:
keys = sd_model.network_layer_mapping.keys()
shared.log.trace(f'LoRA dump: type=Mapping fn="{fn}" keys={len(keys)}')
logger.log.trace(f'LoRA dump: type=Mapping fn="{fn}" keys={len(keys)}')
for line in keys:
f.write(line + "\n")
@ -45,7 +46,7 @@ def load_safetensors(name, network_on_disk: network.NetworkOnDisk) -> network.Ne
sd_model = getattr(shared.sd_model, "pipe", shared.sd_model)
cached = lora_cache.get(name, None)
if l.debug:
shared.log.debug(f'Network load: type=LoRA name="{name}" file="{network_on_disk.filename}" type=lora {"cached" if cached else ""}')
logger.log.debug(f'Network load: type=LoRA name="{name}" file="{network_on_disk.filename}" type=lora {"cached" if cached else ""}')
if cached is not None:
return cached
net = network.Network(name, network_on_disk)
@ -117,17 +118,17 @@ def load_safetensors(name, network_on_disk: network.NetworkOnDisk) -> network.Ne
if net_module is None:
module_errors += 1
if l.debug:
shared.log.error(f'LoRA unhandled: name={name} key={key} weights={weights.w.keys()}')
logger.log.error(f'LoRA unhandled: name={name} key={key} weights={weights.w.keys()}')
else:
net.modules[key] = net_module
if module_errors > 0:
shared.log.error(f'Network load: type=LoRA name="{name}" file="{network_on_disk.filename}" errors={module_errors} empty modules')
logger.log.error(f'Network load: type=LoRA name="{name}" file="{network_on_disk.filename}" errors={module_errors} empty modules')
if len(keys_failed_to_match) > 0:
shared.log.warning(f'Network load: type=LoRA name="{name}" type={set(network_types)} unmatched={len(keys_failed_to_match)} matched={len(matched_networks)}')
logger.log.warning(f'Network load: type=LoRA name="{name}" type={set(network_types)} unmatched={len(keys_failed_to_match)} matched={len(matched_networks)}')
if l.debug:
shared.log.debug(f'Network load: type=LoRA name="{name}" unmatched={keys_failed_to_match}')
logger.log.debug(f'Network load: type=LoRA name="{name}" unmatched={keys_failed_to_match}')
else:
shared.log.debug(f'Network load: type=LoRA name="{name}" type={set(network_types)} keys={len(matched_networks)} dtypes={dtypes} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers}')
logger.log.debug(f'Network load: type=LoRA name="{name}" type={set(network_types)} keys={len(matched_networks)} dtypes={dtypes} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers}')
if len(matched_networks) == 0:
return None
lora_cache[name] = net
@ -150,14 +151,14 @@ def maybe_recompile_model(names, te_multipliers):
if not recompile_model:
skip_lora_load = True
if len(l.loaded_networks) > 0 and l.debug:
shared.log.debug('Model Compile: Skipping LoRa loading')
logger.log.debug('Model Compile: Skipping LoRa loading')
return recompile_model, skip_lora_load
else:
recompile_model = True
shared.compiled_model_state.lora_model = []
if recompile_model:
current_task = sd_models.get_diffusers_task(shared.sd_model)
shared.log.debug(f'Compile: task={current_task} force model reload')
logger.log.debug(f'Compile: task={current_task} force model reload')
backup_cuda_compile = shared.opts.cuda_compile
backup_scheduler = getattr(sd_model, "scheduler", None)
sd_models.unload_model_weights(op='model')
@ -178,7 +179,7 @@ def list_available_networks():
available_network_hash_lookup.clear()
forbidden_network_aliases.update({"none": 1, "Addams": 1})
if not os.path.exists(shared.cmd_opts.lora_dir):
shared.log.warning(f'LoRA directory not found: path="{shared.cmd_opts.lora_dir}"')
logger.log.warning(f'LoRA directory not found: path="{shared.cmd_opts.lora_dir}"')
def add_network(filename):
if not os.path.isfile(filename):
@ -194,7 +195,7 @@ def list_available_networks():
if entry.shorthash:
available_network_hash_lookup[entry.shorthash] = entry
except OSError as e: # should catch FileNotFoundError and PermissionError etc.
shared.log.error(f'LoRA: filename="{filename}" {e}')
logger.log.error(f'LoRA: filename="{filename}" {e}')
candidates = sorted(files_cache.list_files(shared.cmd_opts.lora_dir, ext_filter=[".pt", ".ckpt", ".safetensors"]))
with concurrent.futures.ThreadPoolExecutor(max_workers=shared.max_workers) as executor:
@ -202,7 +203,7 @@ def list_available_networks():
executor.submit(add_network, fn)
t1 = time.time()
l.timer.list = t1 - t0
shared.log.info(f'Available LoRAs: path="{shared.cmd_opts.lora_dir}" items={len(available_networks)} folders={len(forbidden_network_aliases)} time={t1 - t0:.2f}')
logger.log.info(f'Available LoRAs: path="{shared.cmd_opts.lora_dir}" items={len(available_networks)} folders={len(forbidden_network_aliases)} time={t1 - t0:.2f}')
def network_download(name):
@ -245,7 +246,7 @@ def network_load(names, te_multipliers=None, unet_multipliers=None, dyn_dims=Non
if network_on_disk is not None:
shorthash = getattr(network_on_disk, 'shorthash', '').lower()
if l.debug:
shared.log.debug(f'Network load: type=LoRA name="{name}" file="{network_on_disk.filename}" hash="{shorthash}"')
logger.log.debug(f'Network load: type=LoRA name="{name}" file="{network_on_disk.filename}" hash="{shorthash}"')
try:
lora_scale = te_multipliers[i] if te_multipliers else shared.opts.extra_networks_default_multiplier
lora_module = lora_modules[i] if lora_modules and len(lora_modules) > i else None
@ -262,13 +263,13 @@ def network_load(names, te_multipliers=None, unet_multipliers=None, dyn_dims=Non
net.mentioned_name = name
network_on_disk.read_hash()
except Exception as e:
shared.log.error(f'Network load: type=LoRA file="{network_on_disk.filename}" {e}')
logger.log.error(f'Network load: type=LoRA file="{network_on_disk.filename}" {e}')
if l.debug:
errors.display(e, 'LoRA')
continue
if net is None:
failed_to_load_networks.append(name)
shared.log.error(f'Network load: type=LoRA name="{name}" detected={network_on_disk.sd_version if network_on_disk is not None else None} not found')
logger.log.error(f'Network load: type=LoRA name="{name}" detected={network_on_disk.sd_version if network_on_disk is not None else None} not found')
continue
if hasattr(sd_model, 'embedding_db'):
sd_model.embedding_db.load_diffusers_embedding(None, net.bundle_embeddings)
@ -282,16 +283,16 @@ def network_load(names, te_multipliers=None, unet_multipliers=None, dyn_dims=Non
lora_cache.pop(name, None)
if not skip_lora_load and len(lora_diffusers.diffuser_loaded) > 0:
shared.log.debug(f'Network load: type=LoRA loaded={lora_diffusers.diffuser_loaded} available={sd_model.get_list_adapters()} active={sd_model.get_active_adapters()} scales={lora_diffusers.diffuser_scales}')
logger.log.debug(f'Network load: type=LoRA loaded={lora_diffusers.diffuser_loaded} available={sd_model.get_list_adapters()} active={sd_model.get_active_adapters()} scales={lora_diffusers.diffuser_scales}')
try:
t1 = time.time()
if l.debug:
shared.log.trace(f'Network load: type=LoRA list={sd_model.get_list_adapters()}')
shared.log.trace(f'Network load: type=LoRA active={sd_model.get_active_adapters()}')
logger.log.trace(f'Network load: type=LoRA list={sd_model.get_list_adapters()}')
logger.log.trace(f'Network load: type=LoRA active={sd_model.get_active_adapters()}')
sd_model.set_adapters(adapter_names=lora_diffusers.diffuser_loaded, adapter_weights=lora_diffusers.diffuser_scales)
except Exception as e:
if str(e) not in exclude_errors:
shared.log.error(f'Network load: type=LoRA action=strength {str(e)}')
logger.log.error(f'Network load: type=LoRA action=strength {str(e)}')
if l.debug:
errors.display(e, 'LoRA')
try:
@ -300,16 +301,16 @@ def network_load(names, te_multipliers=None, unet_multipliers=None, dyn_dims=Non
sd_model.unload_lora_weights()
l.timer.activate += time.time() - t1
except Exception as e:
shared.log.error(f'Network load: type=LoRA action=fuse {str(e)}')
logger.log.error(f'Network load: type=LoRA action=fuse {str(e)}')
if l.debug:
errors.display(e, 'LoRA')
shared.sd_model = sd_models.apply_balanced_offload(shared.sd_model, force=True, silent=True) # some layers may end up on cpu without hook
if len(l.loaded_networks) > 0 and l.debug:
shared.log.debug(f'Network load: type=LoRA loaded={[n.name for n in l.loaded_networks]} cache={list(lora_cache)} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers}')
logger.log.debug(f'Network load: type=LoRA loaded={[n.name for n in l.loaded_networks]} cache={list(lora_cache)} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers}')
if recompile_model:
shared.log.info("Network load: type=LoRA recompiling model")
logger.log.info("Network load: type=LoRA recompiling model")
if shared.compiled_model_state is not None:
backup_lora_model = shared.compiled_model_state.lora_model
else:

View File

@ -1,5 +1,6 @@
import time
from modules import shared, errors
from modules import logger
from modules.lora import lora_load, lora_common
@ -16,7 +17,7 @@ def load_nunchaku(names, strengths):
if not is_changed:
return False
if not hasattr(shared.sd_model, 'transformer') or not hasattr(shared.sd_model.transformer, 'update_lora_params'):
shared.log.error(f'Network load: type=LoRA method=nunchaku model={shared.sd_model.__class__.__name__} unsupported')
logger.log.error(f'Network load: type=LoRA method=nunchaku model={shared.sd_model.__class__.__name__} unsupported')
return False
previously_loaded = loras
@ -28,9 +29,9 @@ def load_nunchaku(names, strengths):
lora_common.loaded_networks = [n[0] for n in networks] # used by infotext
t1 = time.time()
lora_common.timer.load = t1 - t0
shared.log.debug(f"Network load: type=LoRA method=nunchaku loras={names} strength={strengths} time={t1-t0:.3f}")
logger.log.debug(f"Network load: type=LoRA method=nunchaku loras={names} strength={strengths} time={t1-t0:.3f}")
except Exception as e:
shared.log.error(f'Network load: type=LoRA method=nunchaku {e}')
logger.log.error(f'Network load: type=LoRA method=nunchaku {e}')
if lora_common.debug:
errors.display(e, 'LoRA')
return is_changed

View File

@ -5,6 +5,7 @@ from modules.errorlimiter import limit_errors
from modules.lora import lora_common as l
from modules.lora.lora_apply import network_apply_weights, network_apply_direct, network_backup_weights, network_calc_weights
from modules import shared, devices, sd_models
from modules import logger
applied_layers: list[str] = []
@ -34,7 +35,7 @@ def network_activate(include=None, exclude=None):
modules[name] = list(component.named_modules())
total = sum(len(x) for x in modules.values())
if len(l.loaded_networks) > 0:
pbar = rp.Progress(rp.TextColumn('[cyan]Network: type=LoRA action=activate'), rp.BarColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=shared.console)
pbar = rp.Progress(rp.TextColumn('[cyan]Network: type=LoRA action=activate'), rp.BarColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=logger.console)
task = pbar.add_task(description='' , total=total)
else:
task = None
@ -75,7 +76,7 @@ def network_activate(include=None, exclude=None):
pbar.remove_task(task) # hide progress bar for no action
l.timer.activate += time.time() - t0
if l.debug and len(l.loaded_networks) > 0:
shared.log.debug(f'Network load: type=LoRA networks={[n.name for n in l.loaded_networks]} modules={active_components} layers={total} weights={applied_weight} bias={applied_bias} backup={round(backup_size/1024/1024/1024, 2)} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers} device={device} time={l.timer.summary}')
logger.log.debug(f'Network load: type=LoRA networks={[n.name for n in l.loaded_networks]} modules={active_components} layers={total} weights={applied_weight} bias={applied_bias} backup={round(backup_size/1024/1024/1024, 2)} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers} device={device} time={l.timer.summary}')
modules.clear()
if len(applied_layers) > 0 or shared.opts.diffusers_offload_mode == "sequential":
sd_models.set_diffuser_offload(sd_model, op="model")
@ -108,7 +109,7 @@ def network_deactivate(include=None, exclude=None):
active_components.append(name)
total = sum(len(x) for x in modules.values())
if len(l.previously_loaded_networks) > 0 and l.debug:
pbar = rp.Progress(rp.TextColumn('[cyan]Network: type=LoRA action=deactivate'), rp.BarColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=shared.console)
pbar = rp.Progress(rp.TextColumn('[cyan]Network: type=LoRA action=deactivate'), rp.BarColumn(), rp.TaskProgressColumn(), rp.TimeRemainingColumn(), rp.TimeElapsedColumn(), rp.TextColumn('[cyan]{task.description}'), console=logger.console)
task = pbar.add_task(description='', total=total)
else:
task = None
@ -136,7 +137,7 @@ def network_deactivate(include=None, exclude=None):
pbar.update(task, advance=1, description=f'networks={len(l.previously_loaded_networks)} modules={active_components} layers={total} unapply={len(applied_layers)}')
l.timer.deactivate = time.time() - t0
if l.debug and len(l.previously_loaded_networks) > 0:
shared.log.debug(f'Network deactivate: type=LoRA networks={[n.name for n in l.previously_loaded_networks]} modules={active_components} layers={total} apply={len(applied_layers)} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers} time={l.timer.summary}')
logger.log.debug(f'Network deactivate: type=LoRA networks={[n.name for n in l.previously_loaded_networks]} modules={active_components} layers={total} apply={len(applied_layers)} fuse={shared.opts.lora_fuse_native}:{shared.opts.lora_fuse_diffusers} time={l.timer.summary}')
modules.clear()
if len(applied_layers) > 0 or shared.opts.diffusers_offload_mode == "sequential":
sd_models.set_diffuser_offload(sd_model, op="model")

View File

@ -4,6 +4,7 @@ import torch
from PIL import Image
from modules import shared, errors, timer, memstats, progress, processing, sd_models, sd_samplers, extra_networks, call_queue
from modules import logger
from modules.video_models.video_vae import set_vae_params
from modules.video_models.video_save import save_video
from modules.video_models.video_utils import check_av
@ -11,7 +12,7 @@ from modules.processing_callbacks import diffusers_callback
from modules.ltx.ltx_util import get_bucket, get_frames, load_model, load_upsample, get_conditions, get_generator, get_prompts, vae_decode
debug = shared.log.trace if os.environ.get('SD_VIDEO_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_VIDEO_DEBUG', None) is not None else lambda *args, **kwargs: None
# engine, model = 'LTX Video', 'LTXVideo 0.9.7 13B'
upsample_repo_id = "a-r-r-o-w/LTX-Video-0.9.7-Latent-Spatial-Upsampler-diffusers"
upsample_pipe = None
@ -56,9 +57,9 @@ def run_ltx(task_id,
def abort(e, ok:bool=False, p=None):
if ok:
shared.log.info(e)
logger.log.info(e)
else:
shared.log.error(f'Video: cls={shared.sd_model.__class__.__name__} op=base {e}')
logger.log.error(f'Video: cls={shared.sd_model.__class__.__name__} op=base {e}')
errors.display(e, 'LTX')
if p is not None:
extra_networks.deactivate(p)
@ -121,7 +122,7 @@ def run_ltx(task_id,
prompt, negative, networks = get_prompts(prompt, negative, styles)
sampler_name = processing.get_sampler_name(sampler_index)
sd_samplers.create_sampler(sampler_name, shared.sd_model)
shared.log.debug(f'Video: cls={shared.sd_model.__class__.__name__} op=init styles={styles} networks={networks} sampler={shared.sd_model.scheduler.__class__.__name__}')
logger.log.debug(f'Video: cls={shared.sd_model.__class__.__name__} op=init styles={styles} networks={networks} sampler={shared.sd_model.scheduler.__class__.__name__}')
extra_networks.activate(p, networks)
framewise = 'LTX2' not in shared.sd_model.__class__.__name__
@ -151,10 +152,10 @@ def run_ltx(task_id,
base_args["image_cond_noise_scale"] = image_cond_noise_scale
if len(conditions) > 0:
base_args["conditions"] = conditions
shared.log.debug(f'Video: cls={shared.sd_model.__class__.__name__} op=base {base_args}')
logger.log.debug(f'Video: cls={shared.sd_model.__class__.__name__} op=base {base_args}')
if debug:
shared.log.trace(f'LTX args: {base_args}')
logger.log.trace(f'LTX args: {base_args}')
yield None, 'LTX: Generate in progress...'
samplejob = shared.state.begin('Sample')
try:
@ -172,7 +173,7 @@ def run_ltx(task_id,
audio = None
try:
if debug:
shared.log.trace(f'LTX result frames={latents.shape if latents is not None else None} audio={audio.shape if audio is not None else None}')
logger.log.trace(f'LTX result frames={latents.shape if latents is not None else None} audio={audio.shape if audio is not None else None}')
except Exception:
pass
@ -198,7 +199,7 @@ def run_ltx(task_id,
}
if latents.ndim == 4:
latents = latents.unsqueeze(0) # add batch dimension
shared.log.debug(f'Video: cls={shared.sd_model.__class__.__name__} op=upsample latents={latents.shape} {upscale_args}')
logger.log.debug(f'Video: cls={shared.sd_model.__class__.__name__} op=upsample latents={latents.shape} {upscale_args}')
yield None, 'LTX: Upsample in progress...'
try:
upsampled_latents = upsample_pipe(latents=latents, **upscale_args).frames[0]
@ -236,7 +237,7 @@ def run_ltx(task_id,
if latents.ndim == 4:
latents = latents.unsqueeze(0) # add batch dimension
shared.log.debug(f'Video: cls={shared.sd_model.__class__.__name__} op=refine latents={latents.shape} {refine_args}')
logger.log.debug(f'Video: cls={shared.sd_model.__class__.__name__} op=refine latents={latents.shape} {refine_args}')
if len(conditions) > 0:
refine_args["conditions"] = conditions
yield None, 'LTX: Refine in progress...'
@ -316,5 +317,5 @@ def run_ltx(task_id,
shared.state.end(videojob)
progress.finish_task(task_id)
shared.log.info(f'Processed: fn="{video_file}" frames={num_frames} fps={fps} its={its} resolution={resolution} time={t_end-t0:.2f} timers={timer.process.dct()} memory={memstats.memory_stats()}')
logger.log.info(f'Processed: fn="{video_file}" frames={num_frames} fps={fps} its={its} resolution={resolution} time={t_end-t0:.2f} timers={timer.process.dct()} memory={memstats.memory_stats()}')
yield video_file, f'LTX: Generation completed | File {video_file} | Frames {len(frames)} | Resolution {resolution} | f/s {fps} | it/s {its} '+ f"<div class='performance'><p>{summary} {memory}</p></div>"

View File

@ -1,11 +1,12 @@
import os
import gradio as gr
from modules import shared, ui_sections
from modules import logger
from modules.video_models.models_def import models
from modules.ltx import ltx_process
debug = shared.log.trace if os.environ.get('SD_VIDEO_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_VIDEO_DEBUG', None) is not None else lambda *args, **kwargs: None
def create_ui(prompt, negative, styles, overrides, init_image, init_strength, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf, width, height, frames, seed):

View File

@ -2,6 +2,7 @@ import time
import torch
from PIL import Image
from modules import devices, shared, sd_models, timer, extra_networks
from modules import logger
loaded_model: str = None
@ -28,7 +29,7 @@ def load_model(engine: str, model: str):
t0 = time.time()
from modules.video_models import models_def, video_load
selected: models_def.Model = [m for m in models_def.models[engine] if m.name == model][0]
shared.log.info(f'Video load: engine="{engine}" selected="{model}" {selected}')
logger.log.info(f'Video load: engine="{engine}" selected="{model}" {selected}')
video_load.load_model(selected)
loaded_model = model
t1 = time.time()
@ -42,7 +43,7 @@ def load_upsample(upsample_pipe, upsample_repo_id):
if upsample_pipe is None:
t0 = time.time()
from diffusers.pipelines.ltx.pipeline_ltx_latent_upsample import LTXLatentUpsamplePipeline
shared.log.info(f'Video load: cls={LTXLatentUpsamplePipeline.__class__.__name__} repo="{upsample_repo_id}"')
logger.log.info(f'Video load: cls={LTXLatentUpsamplePipeline.__class__.__name__} repo="{upsample_repo_id}"')
upsample_pipe = LTXLatentUpsamplePipeline.from_pretrained(
upsample_repo_id,
vae=shared.sd_model.vae,
@ -65,9 +66,9 @@ def get_conditions(width, height, condition_strength, condition_images, conditio
condition_image = decode_base64_to_image(condition_image)
condition_image = condition_image.convert('RGB').resize((width, height), resample=Image.Resampling.LANCZOS)
conditions.append(LTXVideoCondition(image=condition_image, frame_index=0, strength=condition_strength))
shared.log.debug(f'Video condition: image={condition_image.size} strength={condition_strength}')
logger.log.debug(f'Video condition: image={condition_image.size} strength={condition_strength}')
except Exception as e:
shared.log.error(f'LTX condition image: {e}')
logger.log.error(f'LTX condition image: {e}')
if condition_files is not None:
condition_images = []
for fn in condition_files:
@ -78,10 +79,10 @@ def get_conditions(width, height, condition_strength, condition_images, conditio
condition_image = fn.convert('RGB').resize((width, height), resample=Image.Resampling.LANCZOS)
condition_images.append(condition_image)
except Exception as e:
shared.log.error(f'LTX condition files: {e}')
logger.log.error(f'LTX condition files: {e}')
if len(condition_images) > 0:
conditions.append(LTXVideoCondition(video=condition_images, frame_index=0, strength=condition_strength))
shared.log.debug(f'Video condition: files={len(condition_images)} size={condition_images[0].size} strength={condition_strength}')
logger.log.debug(f'Video condition: files={len(condition_images)} size={condition_images[0].size} strength={condition_strength}')
if condition_video is not None:
from modules.video_models.video_utils import get_video_frames
try:
@ -89,9 +90,9 @@ def get_conditions(width, height, condition_strength, condition_images, conditio
condition_frames = [f.convert('RGB').resize((width, height), resample=Image.Resampling.LANCZOS) for f in condition_frames]
if len(condition_frames) > 0:
conditions.append(LTXVideoCondition(video=condition_frames, frame_index=0, strength=condition_strength))
shared.log.debug(f'Video condition: frames={len(condition_frames)} size={condition_frames[0].size} strength={condition_strength}')
logger.log.debug(f'Video condition: frames={len(condition_frames)} size={condition_frames[0].size} strength={condition_strength}')
except Exception as e:
shared.log.error(f'LTX condition video: {e}')
logger.log.error(f'LTX condition video: {e}')
return conditions
@ -113,7 +114,7 @@ def get_generator(seed):
def vae_decode(latents, decode_timestep, seed):
t0 = time.time()
shared.log.debug(f'Video: cls={shared.sd_model.vae.__class__.__name__} op=vae latents={latents.shape} timestep={decode_timestep}')
logger.log.debug(f'Video: cls={shared.sd_model.vae.__class__.__name__} op=vae latents={latents.shape} timestep={decode_timestep}')
from diffusers.utils.torch_utils import randn_tensor
latents = shared.sd_model._denormalize_latents( # pylint: disable=protected-access
latents,

View File

@ -8,10 +8,11 @@ import cv2
from PIL import Image, ImageFilter, ImageOps
from transformers import SamModel, SamImageProcessor, MaskGenerationPipeline
from modules import shared, errors, devices, paths, sd_models
from modules import logger
from modules.memstats import memory_stats
debug = shared.log.trace if os.environ.get('SD_MASK_DEBUG', None) is not None else lambda *args, **kwargs: None
debug = logger.log.trace if os.environ.get('SD_MASK_DEBUG', None) is not None else lambda *args, **kwargs: None
debug('Trace: MASK')
@ -177,7 +178,7 @@ def init_model(selected_model: str):
model_path = MODELS[selected_model]
if model_path is None: # none
if generator is not None:
shared.log.debug('Mask segment unloading model')
logger.log.debug('Mask segment unloading model')
opts.model = None
generator = None
devices.torch_gc()
@ -190,7 +191,7 @@ def init_model(selected_model: str):
if opts.model != selected_model or generator is None: # sam pipeline
busy = True
t0 = time.time()
shared.log.debug(f'Mask segment loading: model="{selected_model}" path={model_path}')
logger.log.debug(f'Mask segment loading: model="{selected_model}" path={model_path}')
model = SamModel.from_pretrained(model_path, cache_dir=cache_dir).to(device=devices.device)
processor = SamImageProcessor.from_pretrained(model_path, cache_dir=cache_dir)
generator = MaskGenerationPipeline(
@ -201,7 +202,7 @@ def init_model(selected_model: str):
# output_rle_masks=False,
)
devices.torch_gc()
shared.log.debug(f'Mask segment loaded: model="{selected_model}" path={model_path} time={time.time()-t0:.2f}s')
logger.log.debug(f'Mask segment loaded: model="{selected_model}" path={model_path} time={time.time()-t0:.2f}s')
opts.model = selected_model
busy = False
return selected_model
@ -222,7 +223,7 @@ def run_segment(input_image: gr.Image, input_mask: np.ndarray):
crop_n_points_downscale_factor=1,
)
except Exception as e:
shared.log.error(f'Mask segment error: {e}')
logger.log.error(f'Mask segment error: {e}')
errors.display(e, 'Mask segment')
return outputs
devices.torch_gc()
@ -260,7 +261,7 @@ def run_rembg(input_image: Image, input_mask: np.ndarray):
try:
import rembg
except Exception as e:
shared.log.error(f'Mask Rembg load failed: {e}')
logger.log.error(f'Mask Rembg load failed: {e}')
return input_mask
if "U2NET_HOME" not in os.environ:
os.environ["U2NET_HOME"] = os.path.join(paths.models_path, "Rembg")
@ -432,21 +433,21 @@ def run_mask(input_image: Image.Image, input_mask: Image.Image = None, return_ty
mask = cv2.erode(mask, kernel, iterations=opts.kernel_iterations) # remove noise
debug(f'Mask erode={opts.mask_erode:.3f} kernel={kernel.shape} mask={mask.shape}')
except Exception as e:
shared.log.error(f'Mask erode: {e}')
logger.log.error(f'Mask erode: {e}')
if opts.mask_dilate > 0:
try:
kernel = np.ones((int(opts.mask_dilate * size / 4) + 1, int(opts.mask_dilate * size / 4) + 1), np.uint8)
mask = cv2.dilate(mask, kernel, iterations=opts.kernel_iterations) # expand area
debug(f'Mask dilate={opts.mask_dilate:.3f} kernel={kernel.shape} mask={mask.shape}')
except Exception as e:
shared.log.error(f'Mask dilate: {e}')
logger.log.error(f'Mask dilate: {e}')
if opts.mask_blur > 0:
try:
sigmax, sigmay = 1 + int(opts.mask_blur * size / 4), 1 + int(opts.mask_blur * size / 4)
mask = cv2.GaussianBlur(mask, (0, 0), sigmaX=sigmax, sigmaY=sigmay) # blur mask
debug(f'Mask blur={opts.mask_blur:.3f} x={sigmax} y={sigmay} mask={mask.shape}')
except Exception as e:
shared.log.error(f'Mask blur: {e}')
logger.log.error(f'Mask blur: {e}')
if opts.invert:
mask = np.invert(mask)
@ -476,7 +477,7 @@ def run_mask(input_image: Image.Image, input_mask: Image.Image = None, return_ty
combined_image = cv2.addWeighted(orig, opts.weight_original, colored_mask, opts.weight_mask, 0)
return Image.fromarray(combined_image)
else:
shared.log.error(f'Mask unknown return type: {return_type}')
logger.log.error(f'Mask unknown return type: {return_type}')
return input_mask
@ -490,9 +491,9 @@ def run_lama(input_image: gr.Image, input_mask: gr.Image = None):
input_mask = run_mask(input_image, input_mask, return_type='Grayscale')
if lama_model is None:
import modules.lama
shared.log.debug(f'Mask LaMa loading: model={modules.lama.LAMA_MODEL_URL}')
logger.log.debug(f'Mask LaMa loading: model={modules.lama.LAMA_MODEL_URL}')
lama_model = modules.lama.SimpleLama()
shared.log.debug(f'Mask LaMa loaded: {memory_stats()}')
logger.log.debug(f'Mask LaMa loaded: {memory_stats()}')
sd_models.move_model(lama_model.model, devices.device)
result = lama_model(input_image, input_mask)
@ -574,7 +575,7 @@ def process_kanvas(kanvas_data):
if kanvas_data is None or 'kanvas' not in kanvas_data:
return None
input_image, input_mask = ui_control_helpers.process_kanvas(kanvas_data)
shared.log.debug(f'Kanvas mask: opts={vars(opts)}')
logger.log.debug(f'Kanvas mask: opts={vars(opts)}')
output_mask = run_mask(input_image, input_mask)
return output_mask
@ -584,7 +585,7 @@ def process_kanvas_lama(kanvas_data):
if kanvas_data is None or 'kanvas' not in kanvas_data:
return None
input_image, input_mask = ui_control_helpers.process_kanvas(kanvas_data)
shared.log.debug(f'Kanvas LaMa: opts={vars(opts)}')
logger.log.debug(f'Kanvas LaMa: opts={vars(opts)}')
output_mask = run_lama(input_image, input_mask)
return output_mask

View File

@ -4,6 +4,7 @@ import os
import psutil
import torch
from modules import shared, errors
from modules import logger
fail_once = False
@ -60,7 +61,7 @@ def ram_stats():
ram['rss'] = 0
ram['error'] = str(e)
if not fail_once:
shared.log.error(f'RAM stats: {e}')
logger.log.error(f'RAM stats: {e}')
errors.display(e, 'RAM stats')
fail_once = True
try:
@ -78,7 +79,7 @@ def ram_stats():
ram['cached'] = 0
ram['error'] = str(e)
if not fail_once:
shared.log.error(f'RAM stats: {e}')
logger.log.error(f'RAM stats: {e}')
errors.display(e, 'RAM stats')
fail_once = True
return ram
@ -102,7 +103,7 @@ def gpu_stats():
gpu['used'] = 0
gpu['error'] = str(e)
if not fail_once:
shared.log.warning(f'GPU stats: {e}')
logger.log.warning(f'GPU stats: {e}')
# errors.display(e, 'GPU stats')
fail_once = True
return gpu
@ -168,6 +169,6 @@ def get_objects(gcl=None, threshold:int=0):
objects = sorted(objects, key=lambda x: x.size, reverse=True)
for obj in objects:
shared.log.trace(obj)
logger.log.trace(obj)
return objects

View File

@ -5,7 +5,8 @@ import safetensors.torch
import torch
import modules.memstats
import modules.devices as devices
from installer import log, console
from installer import console
from modules.logger import log
from modules.sd_models import read_state_dict
from modules.merging import merge_methods
from modules.merging.merge_utils import WeightClass

View File

@ -4,7 +4,7 @@ from random import shuffle
from typing import NamedTuple
import torch
from scipy.optimize import linear_sum_assignment
from installer import log
from modules.logger import log
SPECIAL_KEYS = [

View File

@ -9,6 +9,7 @@ from safetensors.torch import load_file
import diffusers
import transformers
from modules import shared, devices, errors
from modules import logger
class Recipe:
@ -57,9 +58,9 @@ status = ''
def msg(text, err:bool=False):
global status # pylint: disable=global-statement
if err:
shared.log.error(f'Modules merge: {text}')
logger.log.error(f'Modules merge: {text}')
else:
shared.log.info(f'Modules merge: {text}')
logger.log.info(f'Modules merge: {text}')
status += text + '<br>'
return status
@ -271,12 +272,12 @@ def save_model(pipe: diffusers.StableDiffusionXLPipeline):
if len(recipe.version) > 0:
folder += f'-{recipe.version}'
if not (recipe.diffusers or recipe.safetensors):
shared.log.debug(f'Modules merge: type=sdxl {recipe} skipping save')
logger.log.debug(f'Modules merge: type=sdxl {recipe} skipping save')
return
try:
yield msg('save')
yield msg(f'pretrained={folder}')
shared.log.info(f'Modules merge save: type=sdxl diffusers="{folder}"')
logger.log.info(f'Modules merge save: type=sdxl diffusers="{folder}"')
pipe.save_pretrained(folder, safe_serialization=True, push_to_hub=False)
with open(os.path.join(folder, 'vae', 'config.json'), encoding='utf8') as f:
vae_config = json.load(f)
@ -293,7 +294,7 @@ def save_model(pipe: diffusers.StableDiffusionXLPipeline):
fn = os.path.join(shared.opts.ckpt_dir, fn)
if not fn.endswith('.safetensors'):
fn += '.safetensors'
shared.log.info(f'Modules merge save: type=sdxl safetensors="{fn}"')
logger.log.info(f'Modules merge save: type=sdxl safetensors="{fn}"')
yield msg(f'safetensors={fn}')
from modules.merging import convert_sdxl
metadata = convert_sdxl.convert(model_path=folder, checkpoint_path=fn, metadata=get_metadata())
@ -301,7 +302,7 @@ def save_model(pipe: diffusers.StableDiffusionXLPipeline):
metadata['modelspec.thumbnail'] = f"{metadata['modelspec.thumbnail'].split(',')[0]}:{len(metadata['modelspec.thumbnail'])}" # pylint: disable=use-maxsplit-arg
yield msg(f'metadata={metadata}')
except Exception as e:
shared.log.error(f'Modules merge save: {e}')
logger.log.error(f'Modules merge save: {e}')
errors.display(e, 'merge')
yield msg(f'save: {e}')
@ -311,7 +312,7 @@ def merge():
yield from load_base()
if pipeline is None:
return
shared.log.info(f'Modules merge: type=sdxl {recipe}')
logger.log.info(f'Modules merge: type=sdxl {recipe}')
pipeline = pipeline.to(device=devices.device, dtype=recipe.dtype)
yield from load_scheduler(pipeline)
yield from load_unet(pipeline)

View File

@ -1,6 +1,6 @@
import os
from modules.paths import data_path
from installer import log
from modules.logger import log
files = [

View File

@ -1,6 +1,7 @@
# MIT-Han-Lab Nunchaku: <https://github.com/mit-han-lab/nunchaku>
from installer import log, pip
from installer import pip
from modules.logger import log
from modules import devices

Some files were not shown because too many files have changed in this diff Show More