add sampler api endpoints

Signed-off-by: vladmandic <mandic00@live.com>
pull/4619/head
vladmandic 2026-02-04 13:08:31 +01:00
parent d7ca4f63a7
commit d9a2a21c8c
14 changed files with 160 additions and 32 deletions

View File

@ -39,6 +39,9 @@ For full list of changes, see full changelog.
- **API**
- add `/sdapi/v1/xyz-grid` to enumerate xyz-grid axis options and their choices
see `/cli/api-xyzenum.py` for example usage
- add `/sdapi/v1/sampler` to get current sampler config
- modify `/sdapi/v1/samplers` to enumerate available samplers possible options
see `/cli/api-samplers.py` for example usage
- **Internal**
- tagged release history: <https://github.com/vladmandic/sdnext/tags>
each major for the past year is now tagged for easier reference

35
cli/api-samplers.py Normal file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env python
"""
get list of all samplers and details of current sampler
"""
import sys
import logging
import urllib3
import requests
url = "http://127.0.0.1:7860"
user = ""
password = ""
log_format = '%(asctime)s %(levelname)s: %(message)s'
logging.basicConfig(level = logging.INFO, format = log_format)
log = logging.getLogger("sd")
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
log.info('available samplers')
auth = requests.auth.HTTPBasicAuth(user, password) if len(user) > 0 and len(password) > 0 else None
req = requests.get(f'{url}/sdapi/v1/samplers', verify=False, auth=auth, timeout=60)
if req.status_code != 200:
log.error({ 'url': req.url, 'request': req.status_code, 'reason': req.reason })
exit(1)
res = req.json()
for item in res:
log.info(item)
log.info('current sampler')
req = requests.get(f'{url}/sdapi/v1/sampler', verify=False, auth=auth, timeout=60)
res = req.json()
log.info(res)

View File

@ -1,12 +1,11 @@
import os
import sys
import time
import inspect
import numpy as np
import torch
# Ensure we can import modules
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from modules.errors import log
from modules.res4lyf import (
@ -20,12 +19,21 @@ from modules.res4lyf import (
BongTangentScheduler, CommonSigmaScheduler, RadauIIAScheduler,
LangevinDynamicsScheduler
)
from modules.schedulers.scheduler_vdm import VDMScheduler
from modules.schedulers.scheduler_unipc_flowmatch import FlowUniPCMultistepScheduler
from modules.schedulers.scheduler_ufogen import UFOGenScheduler
from modules.schedulers.scheduler_tdd import TDDScheduler
from modules.schedulers.scheduler_tcd import TCDScheduler
from modules.schedulers.scheduler_flashflow import FlashFlowMatchEulerDiscreteScheduler
from modules.schedulers.scheduler_dpm_flowmatch import FlowMatchDPMSolverMultistepScheduler
from modules.schedulers.scheduler_dc import DCSolverMultistepScheduler
from modules.schedulers.scheduler_bdia import BDIA_DDIMScheduler
def test_scheduler(name, scheduler_class, config):
try:
scheduler = scheduler_class(**config)
except Exception as e:
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} error="Init failed: {e}"')
log.error(f'scheduler="{name}" cls={scheduler_class} config={config} error="Init failed: {e}"')
return False
num_steps = 20
@ -42,12 +50,19 @@ def test_scheduler(name, scheduler_class, config):
model_output = torch.randn_like(sample)
# Scaling Check
sigma = scheduler.sigmas[scheduler.step_index] if scheduler.step_index is not None else scheduler.sigmas[0] # Handle potential index mismatch if step_index is updated differently, usually step_index matches i for these tests
step_idx = scheduler.step_index if hasattr(scheduler, "step_index") and scheduler.step_index is not None else i
# Clamp index
if hasattr(scheduler, 'sigmas'):
step_idx = min(step_idx, len(scheduler.sigmas) - 1)
sigma = scheduler.sigmas[step_idx]
else:
sigma = torch.tensor(1.0) # Dummy for non-sigma schedulers
# Re-introduce scaling calculation first
scaled_sample = scheduler.scale_model_input(sample, t)
if config.get("prediction_type") == "flow_prediction":
if config.get("prediction_type") == "flow_prediction" or name in ["UFOGenScheduler", "TDDScheduler", "TCDScheduler", "BDIA_DDIMScheduler", "DCSolverMultistepScheduler"]:
# Some new schedulers don't use K-diffusion scaling
expected_scale = 1.0
else:
expected_scale = 1.0 / ((sigma**2 + 1) ** 0.5)
@ -55,8 +70,12 @@ def test_scheduler(name, scheduler_class, config):
# Simple check with loose tolerance due to float precision
expected_scaled_sample = sample * expected_scale
if not torch.allclose(scaled_sample, expected_scaled_sample, atol=1e-4):
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} step={i} expected={expected_scale} error="scaling mismatch"')
return False
# If failed, double check if it's just 'sample' (no scaling)
if torch.allclose(scaled_sample, sample, atol=1e-4):
messages.append('warning="scaling is identity"')
else:
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} step={i} expected={expected_scale} error="scaling mismatch"')
return False
if torch.isnan(scaled_sample).any():
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} step={i} error="NaN in scaled_sample"')
@ -70,15 +89,15 @@ def test_scheduler(name, scheduler_class, config):
# Shape and Dtype check
if output.prev_sample.shape != sample.shape:
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} step={i} error="Shape mismatch: {output.prev_sample.shape} vs {sample.shape}"')
return False
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} step={i} error="Shape mismatch: {output.prev_sample.shape} vs {sample.shape}"')
return False
if output.prev_sample.dtype != sample.dtype:
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} step={i} error="Dtype mismatch: {output.prev_sample.dtype} vs {sample.dtype}"')
return False
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} step={i} error="Dtype mismatch: {output.prev_sample.dtype} vs {sample.dtype}"')
return False
# Update check: Did the sample change?
if not torch.equal(sample, output.prev_sample):
has_changed = True
has_changed = True
# Sample Evolution Check
step_diff = (sample - output.prev_sample).abs().mean().item()
@ -121,9 +140,6 @@ def test_scheduler(name, scheduler_class, config):
return False
final_std = sample.std().item()
with open("std_log.txt", "a") as f:
f.write(f"STD_LOG: {name} config={config} std={final_std}\n")
if final_std > 50.0 or final_std < 0.1:
log.error(f'scheduler="{name}" cls={scheduler.__class__.__name__} config={config} std={final_std} error="variance drift"')
@ -149,7 +165,7 @@ def run_tests():
rk_types = ["res_2m", "res_3m", "res_2s", "res_3s", "res_5s", "res_6s", "deis_1s", "deis_2m", "deis_3m"]
for rk in rk_types:
for pt in prediction_types:
configs.append({"rk_type": rk, "prediction_type": pt})
configs.append({"rk_type": rk, "prediction_type": pt})
elif cls == RESMultistepScheduler:
variants = ["res_2m", "res_3m", "deis_2m", "deis_3m"]
@ -158,9 +174,9 @@ def run_tests():
configs.append({"variant": v, "prediction_type": pt})
elif cls == RESDEISMultistepScheduler:
for order in range(1, 6):
for pt in prediction_types:
configs.append({"solver_order": order, "prediction_type": pt})
for order in range(1, 6):
for pt in prediction_types:
configs.append({"solver_order": order, "prediction_type": pt})
elif cls == ETDRKScheduler:
variants = ["etdrk2_2s", "etdrk3_a_3s", "etdrk3_b_3s", "etdrk4_4s", "etdrk4_4s_alt"]
@ -187,9 +203,9 @@ def run_tests():
configs.append({"variant": v, "prediction_type": pt})
elif cls == RiemannianFlowScheduler:
metrics = ["euclidean", "hyperbolic", "spherical", "lorentzian"]
for m in metrics:
configs.append({"metric_type": m, "prediction_type": "epsilon"}) # Flow usually uses v or raw, but epsilon check matches others
metrics = ["euclidean", "hyperbolic", "spherical", "lorentzian"]
for m in metrics:
configs.append({"metric_type": m, "prediction_type": "epsilon"}) # Flow usually uses v or raw, but epsilon check matches others
if not configs:
for pt in prediction_types:
@ -207,11 +223,12 @@ def run_tests():
for name, cls in VARIANTS:
# these classes preset their variants/rk_types in __init__ so we just test prediction types
for pt in prediction_types:
test_scheduler(name, cls, {"prediction_type": pt})
test_scheduler(name, cls, {"prediction_type": pt})
# Extra robustness check: Flow Prediction Type
log.warning('type="flow"')
flow_schedulers = [
# res4lyf schedulers
RESUnifiedScheduler, RESMultistepScheduler, ABNorsettScheduler,
RESSinglestepScheduler, RESSinglestepSDEScheduler, RESDEISMultistepScheduler,
RESMultistepSDEScheduler, ETDRKScheduler, LawsonScheduler, PECScheduler,
@ -219,10 +236,27 @@ def run_tests():
GaussLegendreScheduler, RungeKutta44Scheduler, RungeKutta57Scheduler,
RungeKutta67Scheduler, SpecializedRKScheduler, BongTangentScheduler,
CommonSigmaScheduler, RadauIIAScheduler, LangevinDynamicsScheduler,
RiemannianFlowScheduler
RiemannianFlowScheduler,
# sdnext schedulers
FlowUniPCMultistepScheduler, FlashFlowMatchEulerDiscreteScheduler, FlowMatchDPMSolverMultistepScheduler,
]
for cls in flow_schedulers:
test_scheduler(cls.__name__, cls, {"prediction_type": "flow_prediction", "use_flow_sigmas": True})
log.warning('type="sdnext"')
extended_schedulers = [
VDMScheduler,
UFOGenScheduler,
TDDScheduler,
TCDScheduler,
DCSolverMultistepScheduler,
BDIA_DDIMScheduler
]
for cls in extended_schedulers:
# Most of these support standard prediction types, try epsilon as default safest bet
# Some might be flow matching specific, we can try robust default list
# For now, just test default init
test_scheduler(cls.__name__, cls, {"prediction_type": "epsilon"})
if __name__ == "__main__":
run_tests()

View File

@ -103,6 +103,7 @@ class Api:
self.add_api_route("/sdapi/v1/latents", endpoints.get_latent_history, methods=["GET"], response_model=List[str])
self.add_api_route("/sdapi/v1/latents", endpoints.post_latent_history, methods=["POST"], response_model=int)
self.add_api_route("/sdapi/v1/modules", endpoints.get_modules, methods=["GET"])
self.add_api_route("/sdapi/v1/sampler", endpoints.get_sampler, methods=["GET"], response_model=dict)
# lora api
from modules.api import loras

View File

@ -6,8 +6,28 @@ from modules.api import models, helpers
def get_samplers():
from modules import sd_samplers
return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers]
from modules import sd_samplers_diffusers
all_samplers = []
for k, v in sd_samplers_diffusers.config.items():
if k in ['All', 'Default', 'Res4Lyf']:
continue
all_samplers.append({
'name': k,
'options': v,
})
return all_samplers
def get_sampler():
if not shared.sd_loaded or shared.sd_model is None:
return {}
if hasattr(shared.sd_model, 'scheduler'):
scheduler = shared.sd_model.scheduler
config = {k: v for k, v in scheduler.config.items() if not k.startswith('_')}
return {
'name': scheduler.__class__.__name__,
'options': config
}
return {}
def get_sd_vaes():
from modules.sd_vae import vae_dict
@ -75,6 +95,13 @@ def get_interrogate():
from modules.interrogate.openclip import refresh_clip_models
return ['deepdanbooru'] + refresh_clip_models()
def get_schedulers():
from modules.sd_samplers import list_samplers
all_schedulers = list_samplers()
for s in all_schedulers:
shared.log.critical(s)
return all_schedulers
def post_interrogate(req: models.ReqInterrogate):
if req.image is None or len(req.image) < 64:
raise HTTPException(status_code=404, detail="Image not found")

View File

@ -86,8 +86,7 @@ class PydanticModelGenerator:
class ItemSampler(BaseModel):
name: str = Field(title="Name")
aliases: List[str] = Field(title="Aliases")
options: Dict[str, str] = Field(title="Options")
options: dict
class ItemVae(BaseModel):
model_name: str = Field(title="Model Name")
@ -199,6 +198,11 @@ class ItemExtension(BaseModel):
commit_date: Union[str, int] = Field(title="Commit Date", description="Extension Repository Commit Date")
enabled: bool = Field(title="Enabled", description="Flag specifying whether this extension is enabled")
class ItemScheduler(BaseModel):
name: str = Field(title="Name", description="Scheduler name")
cls: str = Field(title="Class", description="Scheduler class name")
options: Dict[str, Any] = Field(title="Options", description="Dictionary of scheduler options")
### request/response classes
ReqTxt2Img = PydanticModelGenerator(

View File

@ -155,6 +155,8 @@ class FlowMatchDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
algorithm_type: str = "dpmsolver++2M",
solver_type: str = "midpoint",
sigma_schedule: Optional[str] = None,
prediction_type: str = "flow_prediction",
use_flow_sigmas: bool = True,
shift: float = 3.0,
midpoint_ratio: Optional[float] = 0.5,
s_noise: Optional[float] = 1.0,

View File

@ -69,6 +69,8 @@ class FlashFlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
num_train_timesteps: int = 1000,
shift: float = 1.0,
use_dynamic_shifting=False,
prediction_type: str = "flow_prediction",
use_flow_sigmas: bool = True,
base_shift: Optional[float] = 0.5,
max_shift: Optional[float] = 1.15,
base_image_seq_len: Optional[int] = 256,
@ -261,6 +263,22 @@ class FlashFlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
else:
self._step_index = self._begin_index
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.FloatTensor`):
The input sample.
timestep (`int`, *optional*):
The current timestep in the diffusion chain.
Returns:
`torch.FloatTensor`:
A scaled input sample.
"""
return sample
def step(
self,
model_output: torch.FloatTensor,

View File

@ -497,7 +497,7 @@ class TCDScheduler(SchedulerMixin, ConfigMixin):
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
eta: float,
eta: float = 0.0,
generator: Optional[torch.Generator] = None,
return_dict: bool = True,
) -> Union[TCDSchedulerOutput, Tuple]:

View File

@ -224,7 +224,7 @@ class TDDScheduler(DPMSolverSinglestepScheduler):
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
eta: float,
eta: float = 0.0,
generator: Optional[torch.Generator] = None,
return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:

View File

@ -86,6 +86,7 @@ class FlowUniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
lower_order_final: bool = True,
disable_corrector: List[int] = [],
solver_p: SchedulerMixin = None,
use_flow_sigmas: bool = True,
timestep_spacing: str = "linspace",
steps_offset: int = 0,
final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min"

View File

@ -141,7 +141,7 @@ class VDMScheduler(SchedulerMixin, ConfigMixin):
# For linear beta schedule, equivalent to torch.exp(-1e-4 - 10 * t ** 2)
self.alphas_cumprod = lambda t: torch.sigmoid(self.log_snr(t)) # Equivalent to 1 - self.sigmas
self.sigmas = lambda t: torch.sigmoid(-self.log_snr(t)) # Equivalent to 1 - self.alphas_cumprod
self.sigmas = []
self.num_inference_steps = None
self.timesteps = torch.from_numpy(self.get_timesteps(len(self)))
@ -240,6 +240,8 @@ class VDMScheduler(SchedulerMixin, ConfigMixin):
self.num_inference_steps = num_inference_steps
timesteps += self.config.steps_offset
self.timesteps = torch.from_numpy(timesteps).to(device)
self.sigmas = [torch.sigmoid(-self.log_snr(t)) for t in self.timesteps]
self.sigmas = torch.stack(self.sigmas)
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:

View File

@ -37,6 +37,7 @@ def list_samplers():
samplers = all_samplers
samplers_for_img2img = all_samplers
samplers_map = {}
return all_samplers
# shared.log.debug(f'Available samplers: {[x.name for x in all_samplers]}')

2
wiki

@ -1 +1 @@
Subproject commit da7620df144de8d2af259eff2b7a4522783f38cc
Subproject commit 850c155e238f369dd135d79e138470d7822ad5b6