RUF013 updates and formatting

pull/4706/head
awsr 2026-03-24 06:07:34 -07:00
parent c4ebef29a9
commit 09ab19c438
No known key found for this signature in database
9 changed files with 45 additions and 44 deletions

View File

@ -148,7 +148,7 @@ class State:
return job
return None
def history(self, op:str, task_id:str=None, results:list=None):
def history(self, op: str, task_id: str | None = None, results: list | None = None):
if results is None:
results = []
job = {
@ -174,7 +174,7 @@ class State:
if len(self.results) > 0:
self.history('output', self.id, results=self.results)
def get_id(self, task_id:str=None):
def get_id(self, task_id: str | None = None):
if task_id is None or task_id == 0:
task_id = uuid.uuid4().hex[:15]
if not isinstance(task_id, str):

View File

@ -76,8 +76,8 @@ class AutoencoderSmall(ModelMixin, ConfigMixin, FromOriginalModelMixin):
down_block_types: tuple[str] = ("DownEncoderBlock2D",),
up_block_types: tuple[str] = ("UpDecoderBlock2D",),
block_out_channels: tuple[int] = (64,),
encoder_block_out_channels: tuple[int] = None,
decoder_block_out_channels: tuple[int] = None,
encoder_block_out_channels: tuple[int] | None = None,
decoder_block_out_channels: tuple[int] | None = None,
layers_per_block: int = 1,
act_fn: str = "silu",
latent_channels: int = 4,

View File

@ -254,7 +254,7 @@ class EmbeddingDatabase:
self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True)
return embedding
def load_diffusers_embedding(self, filename: str | list[str] = None, data: dict = None):
def load_diffusers_embedding(self, filename: str | list[str] | None = None, data: dict | None = None):
"""
File names take precidence over bundled embeddings passed as a dict.
Bundled embeddings are automatically set to overwrite previous embeddings.

View File

@ -38,7 +38,7 @@ def init_generator(device: torch.device, fallback: torch.Generator = None):
return fallback
def do_nothing(x: torch.Tensor, mode: str = None): # pylint: disable=unused-argument
def do_nothing(x: torch.Tensor, mode: str | None = None): # pylint: disable=unused-argument
return x

View File

@ -19,7 +19,7 @@ debug('Trace: CONTROL')
use_generator = os.environ.get('SD_USE_GENERATOR', None) is not None
def return_stats(t: float = None):
def return_stats(t: float | None = None):
if t is None:
elapsed_text = ''
else:
@ -48,7 +48,7 @@ def return_stats(t: float = None):
return f"<div class='performance'><p>{elapsed_text} {summary} {gpu} {cpu}</p></div>"
def return_controls(res, t: float = None):
def return_controls(res, t: float | None = None):
# return preview, image, video, gallery, text
debug(f'Control received: type={type(res)} {res}')
if t is None:

View File

@ -5,7 +5,7 @@ from modules.ui_components import ToolButton
from modules.caption import caption
def create_toprow(is_img2img: bool = False, id_part: str = None, generate_visible: bool = True, negative_visible: bool = True, reprocess_visible: bool = True):
def create_toprow(is_img2img: bool = False, id_part: str | None = None, generate_visible: bool = True, negative_visible: bool = True, reprocess_visible: bool = True):
def apply_styles(prompt, prompt_neg, styles):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles, wildcards=not shared.opts.extra_networks_apply_unparsed)
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles, wildcards=not shared.opts.extra_networks_apply_unparsed)
@ -92,7 +92,7 @@ def create_resolution_inputs(tab, default_width=1024, default_height=1024):
return width, height
def create_caption_button(tab: str, inputs: list = None, outputs: str = None, what: str = ''):
def create_caption_button(tab: str, inputs: list | None = None, outputs: str | None = None, what: str = ''):
button_caption = gr.Button(ui_symbols.caption, elem_id=f"{tab}_caption_{what}", elem_classes=['caption'])
if inputs is not None and outputs is not None:
button_caption.click(fn=caption.caption, inputs=inputs, outputs=[outputs])

View File

@ -21,7 +21,7 @@ system_prompts = {
}
def enhance_prompt(enable:bool, model:str=None, image=None, prompt:str='', system_prompt:str='', nsfw:bool=True):
def enhance_prompt(enable: bool, model: str | None = None, image=None, prompt: str = "", system_prompt: str = "", nsfw: bool = True):
from modules.caption import vqa
if not enable:
return prompt

View File

@ -50,12 +50,12 @@ class Flux2TinyAutoEncoder(ModelMixin, ConfigMixin):
in_channels: int = 3,
out_channels: int = 3,
latent_channels: int = 128,
encoder_block_out_channels: list[int] = None,
decoder_block_out_channels: list[int] = None,
encoder_block_out_channels: list[int] | None = None,
decoder_block_out_channels: list[int] | None = None,
act_fn: str = "silu",
upsampling_scaling_factor: int = 2,
num_encoder_blocks: list[int] = None,
num_decoder_blocks: list[int] = None,
num_encoder_blocks: list[int] | None = None,
num_decoder_blocks: list[int] | None = None,
latent_magnitude: float = 3.0,
latent_shift: float = 0.5,
force_upcast: bool = False,

View File

@ -29,7 +29,7 @@ def get_video_filename(p:processing.StableDiffusionProcessingVideo):
return filename
def save_params(p, filename: str = None):
def save_params(p, filename: str | None = None):
from modules.paths import params_path
if p is None:
dct = {}
@ -129,17 +129,18 @@ def write_audio(
container.mux(packet)
def atomic_save_video(filename: str,
tensor:torch.Tensor,
audio:torch.Tensor=None,
fps:float=24,
codec:str='libx264',
pix_fmt:str='yuv420p',
options:str='',
aac:int=24000,
metadata:dict=None,
pbar=None,
):
def atomic_save_video(
filename: str,
tensor: torch.Tensor,
audio: torch.Tensor | None = None,
fps: float = 24,
codec: str = "libx264",
pix_fmt: str = "yuv420p",
options: str = "",
aac: int = 24000,
metadata: dict | None = None,
pbar=None,
):
if metadata is None:
metadata = {}
av = check_av()
@ -212,23 +213,23 @@ def save_thumbnail(video_path, tensor=None):
def save_video(
p:processing.StableDiffusionProcessingVideo,
pixels:torch.Tensor=None,
audio:torch.Tensor=None,
binary:bytes=None,
mp4_fps:int=24,
mp4_codec:str='libx264',
mp4_opt:str='',
mp4_ext:str='mp4',
mp4_sf:bool=False, # save safetensors
mp4_video:bool=True, # save video
mp4_frames:bool=False, # save frames
mp4_interpolate:int=0, # rife interpolation
aac_sample_rate:int=24000, # audio sample rate
stream=None, # async progress reporting stream
metadata:dict=None, # metadata for video
pbar=None, # progress bar for video
):
p: processing.StableDiffusionProcessingVideo,
pixels: torch.Tensor | None = None,
audio: torch.Tensor | None = None,
binary: bytes | None = None,
mp4_fps: int = 24,
mp4_codec: str = "libx264",
mp4_opt: str = "",
mp4_ext: str = "mp4",
mp4_sf: bool = False, # save safetensors
mp4_video: bool = True, # save video
mp4_frames: bool = False, # save frames
mp4_interpolate: int = 0, # rife interpolation
aac_sample_rate: int = 24000, # audio sample rate
stream=None, # async progress reporting stream
metadata: dict | None = None, # metadata for video
pbar=None, # progress bar for video
):
if metadata is None:
metadata = {}
output_video = None