Merge pull request #4711 from awsr/pep484-plus

Final PEP 484 updates
pull/4713/head
Vladimir Mandic 2026-03-26 07:10:22 +01:00 committed by GitHub
commit 48e8b3a513
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 49 additions and 49 deletions

View File

@ -160,7 +160,7 @@ def list_models():
checkpoints_list = dict(sorted(checkpoints_list.items(), key=lambda cp: cp[1].filename))
def update_model_hashes(model_list: dict = None, model_type: str = 'checkpoint'):
def update_model_hashes(model_list: dict | None = None, model_type: str = 'checkpoint'):
def update_model_hashes_table(rows):
html = """
<table class="simple-table">

View File

@ -237,11 +237,11 @@ class BriaPipeline(FluxPipeline):
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 30,
timesteps: List[int] = None,
timesteps: List[int] | None = None,
guidance_scale: float = 5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,

View File

@ -99,7 +99,7 @@ def get_by_t5_prompt_embeds(
def get_t5_prompt_embeds(
tokenizer: T5TokenizerFast ,
text_encoder: T5EncoderModel,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
num_images_per_prompt: int = 1,
max_sequence_length: int = 128,
device: Optional[torch.device] = None,
@ -184,7 +184,7 @@ def get_env_prefix():
def compute_density_for_timestep_sampling(
weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None
weighting_scheme: str, batch_size: int, logit_mean: float | None = None, logit_std: float | None = None, mode_scale: float | None = None
):
"""Compute the density for sampling the timesteps when doing SD3 training.
@ -236,7 +236,7 @@ def get_clip_prompt_embeds(
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
num_images_per_prompt: int = 1,
max_sequence_length: int = 77,
device: Optional[torch.device] = None,

View File

@ -82,7 +82,7 @@ class BriaTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOrig
attention_head_dim: int = 128,
num_attention_heads: int = 24,
joint_attention_dim: int = 4096,
pooled_projection_dim: int = None,
pooled_projection_dim: int | None = None,
guidance_embeds: bool = False,
axes_dims_rope: List[int] = [16, 56, 56],
rope_theta = 10000,

View File

@ -60,7 +60,7 @@ class Flex2Pipeline(FluxControlPipeline):
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
prompt_2: Optional[Union[str, List[str]]] = None,
inpaint_image: Optional[PipelineImageInput] = None,
inpaint_mask: Optional[PipelineImageInput] = None,

View File

@ -210,7 +210,7 @@ class HiDreamImageEditingPipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin
def _get_t5_prompt_embeds(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
max_sequence_length: int = 128,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
@ -284,7 +284,7 @@ class HiDreamImageEditingPipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin
def _get_llama3_prompt_embeds(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
max_sequence_length: int = 128,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
@ -760,7 +760,7 @@ class HiDreamImageEditingPipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
prompt_2: Optional[Union[str, List[str]]] = None,
prompt_3: Optional[Union[str, List[str]]] = None,
prompt_4: Optional[Union[str, List[str]]] = None,

View File

@ -81,8 +81,8 @@ class HunyuanImage3Wrapper(torch.nn.Module):
def __call__(
self,
prompt: str,
height: int = None,
width: int = None,
height: int | None = None,
width: int | None = None,
num_inference_steps: int = 50,
num_images_per_prompt: int = 1,
guidance_scale: float = 7.5,

View File

@ -10,8 +10,8 @@ from modules import devices
class WanImagePipeline(diffusers.WanPipeline):
def __call__(
self,
prompt: Union[str, List[str]] = None,
negative_prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
negative_prompt: Union[str, List[str]] | None = None,
height: int = 480,
width: int = 832,
num_frames: int = 81,

View File

@ -136,13 +136,13 @@ class CtrlXStableDiffusionXLPipeline(StableDiffusionXLPipeline): # diffusers==0
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
structure_prompt: Optional[Union[str, List[str]]] = None,
appearance_prompt: Optional[Union[str, List[str]]] = None,
structure_image: Optional[PipelineImageInput] = None,
appearance_image: Optional[PipelineImageInput] = None,
num_inference_steps: int = 50,
timesteps: List[int] = None,
timesteps: List[int] | None = None,
negative_prompt: Optional[Union[str, List[str]]] = None,
positive_prompt: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
@ -172,9 +172,9 @@ class CtrlXStableDiffusionXLPipeline(StableDiffusionXLPipeline): # diffusers==0
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
original_size: Tuple[int, int] | None = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
target_size: Tuple[int, int] | None = None,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],

View File

@ -498,7 +498,7 @@ class DemoFusionSDXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderM
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,

View File

@ -631,7 +631,7 @@ class StableDiffusionXLDiffImg2ImgPipeline(DiffusionPipeline, FromSingleFileMixi
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: Union[
torch.FloatTensor,
@ -662,9 +662,9 @@ class StableDiffusionXLDiffImg2ImgPipeline(DiffusionPipeline, FromSingleFileMixi
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
original_size: Tuple[int, int] | None = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
target_size: Tuple[int, int] | None = None,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
map: torch.FloatTensor = None, # pylint: disable=redefined-builtin
@ -1643,7 +1643,7 @@ class StableDiffusionDiffImg2ImgPipeline(DiffusionPipeline):
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
strength: float = 1,
num_inference_steps: Optional[int] = 50,

View File

@ -113,12 +113,12 @@ class FluxInfuseNetPipeline(FluxControlNetPipeline):
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 28,
timesteps: List[int] = None,
timesteps: List[int] | None = None,
guidance_scale: float = 3.5,
id_image: PipelineImageInput = None,
controlnet_guidance_scale: float = 1.0,

View File

@ -60,7 +60,7 @@ class Script(scripts_manager.Script):
return [model, id_image, control_image, scale, start, end, id_guidance, control_guidance, restore]
def run(self, p: processing.StableDiffusionProcessing,
model: str = None,
model: str | None = None,
id_image: Image.Image = None,
control_image: Image.Image = None,
scale: float = 1.0,

View File

@ -779,7 +779,7 @@ class StableDiffusionXLTilingPipeline(
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,

View File

@ -93,7 +93,7 @@ model = None
processor = None
def image_guard(image, policy:str=None) -> str:
def image_guard(image, policy:str | None=None) -> str:
global model, processor # pylint: disable=global-statement
import json
from installer import install

View File

@ -1088,12 +1088,12 @@ class PixelSmithXLPipeline(
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
prompt_2: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
timesteps: List[int] = None,
timesteps: List[int] | None = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
#+#
@ -1101,7 +1101,7 @@ class PixelSmithXLPipeline(
pag_adaptive_scaling: float = 0.0,
pag_drop_rate: float = 0.5,
pag_applied_layers: List[str] = ['mid'], #['down', 'mid', 'up']
pag_applied_layers_index: List[str] = None, #['d4', 'd5', 'm0']
pag_applied_layers_index: List[str] | None = None, #['d4', 'd5', 'm0']
#+#
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,

View File

@ -321,7 +321,7 @@ class Script(scripts_manager.Script):
from modules.sd_models_compile import compile_torch
self.llm = compile_torch(self.llm, apply_to_components=False, op="LLM")
def load(self, name:str=None, model_repo:str=None, model_gguf:str=None, model_type:str=None, model_file:str=None):
def load(self, name:str | None=None, model_repo:str | None=None, model_gguf:str | None=None, model_type:str | None=None, model_file:str | None=None):
# Strip symbols from display name if present
name = get_model_repo_from_display(name) if name else self.options.default
if self.busy:
@ -535,21 +535,21 @@ class Script(scripts_manager.Script):
return current_image
def enhance(self,
model: str=None,
prompt:str=None,
system:str=None,
prefix:str=None,
suffix:str=None,
sample:bool=None,
tokens:int=None,
temperature:float=None,
penalty:float=None,
top_k:int=None,
top_p:float=None,
model: str | None=None,
prompt:str | None=None,
system:str | None=None,
prefix:str | None=None,
suffix:str | None=None,
sample:bool | None=None,
tokens:int | None=None,
temperature:float | None=None,
penalty:float | None=None,
top_k:int | None=None,
top_p:float | None=None,
thinking:bool=False,
seed:int=-1,
image=None,
nsfw:bool=None,
nsfw:bool | None=None,
use_vision:bool=True,
prefill:str='',
keep_prefill:bool=False,

View File

@ -928,14 +928,14 @@ class StableDiffusionXLSoftFillPipeline(
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt: Union[str, List[str]] | None = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: Image.Image = None,
mask: Image.Image = None,
noise_fill_image: bool = True, # Adds noise to the image at the masks >0.8 area.
strength: float = 0.3,
num_inference_steps: int = 50,
timesteps: List[int] = None,
timesteps: List[int] | None = None,
denoising_start: Optional[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
@ -955,9 +955,9 @@ class StableDiffusionXLSoftFillPipeline(
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
original_size: Tuple[int, int] | None = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
target_size: Tuple[int, int] | None = None,
negative_original_size: Optional[Tuple[int, int]] = None,
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
negative_target_size: Optional[Tuple[int, int]] = None,