video tab add params.txt

Signed-off-by: Vladimir Mandic <mandic00@live.com>
pull/4292/head
Vladimir Mandic 2025-10-22 08:39:45 -04:00
parent d431a30204
commit 0faf61f48a
11 changed files with 95 additions and 20 deletions

1
.gitignore vendored
View File

@ -41,6 +41,7 @@ tunableop_results*.csv
/*.txt
/*.mp3
/*.lnk
/*.swp
!webui.bat
!webui.sh
!package.json

View File

@ -21,6 +21,7 @@
- fix `wan-2.2-a14b` stage selection
- fix `wan-2.2-5b` vae decode
- disabling live preview should not disable progress updates
- video tab create `params.txt` with metadata
## Update for 2025-10-18

View File

@ -308,12 +308,26 @@ def worker(
if is_last_section:
break
total_generated_frames, _video_filename = save_video(history_pixels, mp4_fps, mp4_codec, mp4_opt, mp4_ext, mp4_sf, mp4_video, mp4_frames, mp4_interpolate, pbar=pbar, stream=stream, metadata=metadata)
total_generated_frames, _video_filename = save_video(
None,
history_pixels,
mp4_fps,
mp4_codec,
mp4_opt,
mp4_ext,
mp4_sf,
mp4_video,
mp4_frames,
mp4_interpolate,
pbar=pbar,
stream=stream,
metadata=metadata,
)
except AssertionError:
shared.log.info('FramePack: interrupted')
if shared.opts.keep_incomplete:
save_video(history_pixels, mp4_fps, mp4_codec, mp4_opt, mp4_ext, mp4_sf, mp4_video, mp4_frames, mp4_interpolate=0, stream=stream, metadata=metadata)
save_video(None, history_pixels, mp4_fps, mp4_codec, mp4_opt, mp4_ext, mp4_sf, mp4_video, mp4_frames, mp4_interpolate=0, stream=stream, metadata=metadata)
except Exception as e:
shared.log.error(f'FramePack: {e}')
errors.display(e, 'FramePack')

View File

@ -3,7 +3,6 @@ import io
import os
from PIL import Image
import gradio as gr
from modules.paths import params_path
from modules import shared, gr_tempdir, script_callbacks, images
from modules.infotext import parse, mapping, quote, unquote # pylint: disable=unused-import
@ -204,6 +203,7 @@ def create_override_settings_dict(text_pairs):
def connect_paste(button, local_paste_fields, input_comp, override_settings_component, tabname):
def paste_func(prompt):
from modules.paths import params_path
if prompt is None or len(prompt.strip()) == 0:
if os.path.exists(params_path):
with open(params_path, "r", encoding="utf8") as file:

View File

@ -23,8 +23,8 @@ NOTHING = object()
class FilenameGenerator:
replacements = {
'width': lambda self: self.image.width,
'height': lambda self: self.image.height,
'width': lambda self: self.width,
'height': lambda self: self.height,
'batch_number': lambda self: self.batch_number,
'iter_number': lambda self: self.iter_number,
'num': lambda self: NOTHING if self.p.n_iter == 1 and self.p.batch_size == 1 else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
@ -32,8 +32,8 @@ class FilenameGenerator:
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt<prompt1|default><prompt2>..]
'hash': lambda self: self.image_hash(),
'image_hash': lambda self: self.image_hash(),
'hash': lambda self: self.image_hash() if self.image is not None else '',
'image_hash': lambda self: self.image_hash() if self.image is not None else '',
'timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
'epoch': lambda self: int(time.time()),
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
@ -61,7 +61,7 @@ class FilenameGenerator:
}
default_time_format = '%Y%m%d%H%M%S'
def __init__(self, p, seed, prompt, image, grid=False):
def __init__(self, p, seed, prompt, image=None, grid=False, width=None, height=None):
if p is None:
debug('Filename generator init skip')
else:
@ -82,6 +82,8 @@ class FilenameGenerator:
if isinstance(self.prompt, list):
self.prompt = ' '.join(self.prompt)
self.image = image
self.width = width if width is not None else (image.width if image is not None else (p.width if p is not None else 0))
self.height = height if height is not None else (image.height if image is not None else (p.height if p is not None else 0))
if not grid:
self.batch_number = NOTHING if self.p is None or getattr(self.p, 'batch_size', 1) == 1 else (self.p.batch_index + 1 if hasattr(self.p, 'batch_index') else NOTHING)
self.iter_number = NOTHING if self.p is None or getattr(self.p, 'n_iter', 1) == 1 else (self.p.iteration + 1 if hasattr(self.p, 'iteration') else NOTHING)

View File

@ -89,6 +89,8 @@ def run_ltx(task_id,
shared.state.job_count = 1
p = processing.StableDiffusionProcessingVideo(
video_engine=engine,
video_model=model,
prompt=prompt,
negative_prompt=negative,
styles=styles,
@ -247,6 +249,7 @@ def run_ltx(task_id,
timer.process.add('offload', t11 - t10)
num_frames, video_file = save_video(
p=p,
pixels=frames,
mp4_fps=mp4_fps,
mp4_codec=mp4_codec,

View File

@ -428,9 +428,11 @@ class StableDiffusionProcessing:
class StableDiffusionProcessingVideo(StableDiffusionProcessing):
def __init__(self, **kwargs):
self.prompt_template: str = None
self.frames: int = 1
self.frames: int = kwargs.pop('frames', 1)
self.vae_tile_frames: int = kwargs.pop('vae_tile_frames', 0)
self.video_engine: str = kwargs.pop('video_engine', None)
self.video_model: str = kwargs.pop('video_model', None)
self.scheduler_shift: float = 0.0
self.vae_tile_frames: int = 0
debug(f'Process init: mode={self.__class__.__name__} kwargs={kwargs}') # pylint: disable=protected-access
super().__init__(**kwargs)

View File

@ -38,7 +38,7 @@ def create_ui():
mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf = video_ui.create_ui_outputs()
with gr.Tab('Models', id='video-core-tab') as video_core_tab:
from modules.video_models import video_ui
video_ui.create_ui(prompt, negative, styles, overrides, init_image, init_strength, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf, width, height, frames, seed, reuse_seed)
engine, model, steps, sampler_index = video_ui.create_ui(prompt, negative, styles, overrides, init_image, init_strength, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf, width, height, frames, seed, reuse_seed)
with gr.Tab('FramePack', id='framepack-tab') as framepack_tab:
from modules.framepack import framepack_ui
framepack_ui.create_ui(prompt, negative, styles, overrides, init_image, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf)
@ -48,8 +48,18 @@ def create_ui():
paste_fields = [
(prompt, "Prompt"), # cannot add more fields as they are not defined yet
(negative, "Negative prompt"),
(width, "Width"),
(height, "Height"),
(frames, "Frames"),
(seed, "Seed"),
(styles, "Styles"),
(steps, "Steps"),
(sampler_index, "Sampler"),
(engine, "Engine"),
(model, "Model"),
]
generation_parameters_copypaste.add_paste_fields("video", None, paste_fields, overrides)
generation_parameters_copypaste.add_paste_fields("video", None, paste_fields)
bindings = generation_parameters_copypaste.ParamBinding(paste_button=paste, tabname="video", source_text_component=prompt, source_image_component=None)
generation_parameters_copypaste.register_paste_params_button(bindings)

View File

@ -28,6 +28,8 @@ def generate(*args, **kwargs):
p = processing.StableDiffusionProcessingVideo(
sd_model=shared.sd_model,
video_engine=engine,
video_model=model,
prompt=prompt,
negative_prompt=negative,
styles=styles,
@ -138,6 +140,7 @@ def generate(*args, **kwargs):
# video_file = images.save_video(p, filename=None, images=processed.images, video_type=video_type, duration=video_duration, loop=video_loop, pad=video_pad, interpolate=video_interpolate) # legacy video save from list of images
pixels = video_save.images_to_tensor(processed.images)
_num_frames, video_file = video_save.save_video(
p=p,
pixels=pixels,
mp4_fps=mp4_fps,
mp4_codec=mp4_codec,

View File

@ -1,18 +1,52 @@
import os
import time
import datetime
import cv2
import numpy as np
import torch
import einops
from modules import shared, errors ,timer, rife
from modules import shared, errors ,timer, rife, processing
from modules.video_models.video_utils import check_av
def get_video_filename(frames:int, codec:str):
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
output_filename = os.path.join(shared.opts.outdir_video, f'{timestamp}-{codec}-f{frames}')
return output_filename
def get_video_filename(p:processing.StableDiffusionProcessingVideo):
from modules.images_namegen import FilenameGenerator
namegen = FilenameGenerator(p, seed=p.seed if p is not None else 0, prompt=p.prompt if p is not None else '')
filename = namegen.apply(shared.opts.samples_filename_pattern if shared.opts.samples_filename_pattern and len(shared.opts.samples_filename_pattern) > 0 else "[seq]-[prompt_words]")
if shared.opts.save_to_dirs:
dirname = namegen.apply(shared.opts.directories_filename_pattern or "[prompt_words]")
dirname = os.path.join(shared.opts.outdir_video, dirname, filename)
else:
dirname = shared.opts.outdir_video
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
filename = os.path.join(dirname, filename)
filename = namegen.sequence(filename)
filename = namegen.sanitize(filename)
return filename
def save_params(p, filename: str = None):
from modules.paths import params_path
if p is None:
dct = {}
else:
# sampler_index, sampler_shift, dynamic_shift, guidance_scale, guidance_true, init_image, init_strength, last_image, vae_type, vae_tile_frames, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf, vlm_enhance, vlm_model, vlm_system_prompt, override_settings = args
dct = {
"Prompt": p.prompt,
"Negative prompt": p.negative_prompt,
"Steps": p.steps,
"Sampler": p.sampler_name,
"Seed": p.seed,
"Engine": p.video_engine,
"Model": p.video_model,
"Frames": p.frames,
"Size": f"{p.width}x{p.height}",
"Styles": ','.join(p.styles) if isinstance(p.styles, list) else p.styles,
}
params = ', '.join([f'{k}: {v}' for k, v in dct.items() if v is not None and v != ''])
fn = filename if filename is not None else params_path
with open(fn, "w", encoding="utf8") as file:
file.write(params)
def images_to_tensor(images):
@ -71,6 +105,7 @@ def atomic_save_video(filename, tensor:torch.Tensor, fps:float=24, codec:str='li
def save_video(
p:processing.StableDiffusionProcessingVideo,
pixels:torch.Tensor,
mp4_fps:int=24,
mp4_codec:str='libx264',
@ -111,7 +146,10 @@ def save_video(
x = einops.rearrange(x, '(m n) c t h w -> t (m h) (n w) c', n=n)
x = x.contiguous()
output_filename = get_video_filename(t, mp4_codec)
output_filename = get_video_filename(p)
if shared.opts.save_txt:
save_params(p, f'{output_filename}.txt')
save_params(p)
if mp4_sf:
fn = f'{output_filename}.safetensors'

View File

@ -127,7 +127,7 @@ def create_ui(prompt, negative, styles, overrides, init_image, init_strength, la
generate = gr.Button('Generate', elem_id="video_generate_btn", variant='primary', visible=False)
with gr.Row():
engine = gr.Dropdown(label='Engine', choices=list(models_def.models), value='None', elem_id="video_engine")
model = gr.Dropdown(label='Model', choices=[''], value=None, elem_id="video_model")
model = gr.Dropdown(label='Model', choices=[''], value='None', elem_id="video_model")
btn_load = ToolButton(ui_symbols.loading, elem_id="video_model_load")
with gr.Row():
url = gr.HTML(label='Model URL', elem_id='video_model_url', value='<br><br>')
@ -197,3 +197,4 @@ def create_ui(prompt, negative, styles, overrides, init_image, init_strength, la
show_progress=False,
)
generate.click(**video_dict)
return [engine, model, steps, sampler_index]