diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5612d23ad..7deaed7a1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@
- Fix tiny VAE with batched results
- Fix CFG scale not added to metadata and set valid range to >=1.0
- **Other**
+ - Optimized Video tab layout
- Video enable VAE slicing and framewise decoding when possible
- Detect and log `flash-attn` and `sageattention` if installed
- Remove unused UI settings
diff --git a/extensions-builtin/sdnext-modernui b/extensions-builtin/sdnext-modernui
index 840f211be..8f6427aa0 160000
--- a/extensions-builtin/sdnext-modernui
+++ b/extensions-builtin/sdnext-modernui
@@ -1 +1 @@
-Subproject commit 840f211beefeb902ea7ee7ecd5bdd07a2748feb7
+Subproject commit 8f6427aa037b654ae664a0197c794e48fdbbc648
diff --git a/modules/control/run.py b/modules/control/run.py
index b62cad89f..0bdfdc994 100644
--- a/modules/control/run.py
+++ b/modules/control/run.py
@@ -589,7 +589,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
if p.scripts is not None:
processed = p.scripts.after(p, processed, *p.script_args)
output = None
- if processed is not None:
+ if processed is not None and processed.images is not None:
output = processed.images
info_txt = [processed.infotext(p, i) for i in range(len(output))]
diff --git a/modules/devices.py b/modules/devices.py
index 723566656..961ffb384 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -545,11 +545,11 @@ def set_sdpa_params():
from importlib.metadata import version
try:
flash = version('flash-attn')
- except:
+ except Exception:
flash = False
try:
sage = version('sageattention')
- except:
+ except Exception:
sage = False
log.info(f'Torch attention: flashattn={flash} sageattention={sage}')
except Exception as e:
diff --git a/modules/face/instantid.py b/modules/face/instantid.py
index 3cc613b3d..158c2f577 100644
--- a/modules/face/instantid.py
+++ b/modules/face/instantid.py
@@ -22,7 +22,7 @@ def instant_id(p: processing.StableDiffusionProcessing, app, source_images, stre
return None
c = shared.sd_model.__class__.__name__ if shared.sd_loaded else ''
- if c != 'StableDiffusionXLPipeline' and c != 'StableDiffusionXLInstantIDPipeline':
+ if c not in ['StableDiffusionXLPipeline', 'StableDiffusionXLInstantIDPipeline']:
shared.log.warning(f'InstantID invalid base model: current={c} required=StableDiffusionXLPipeline')
return None
diff --git a/modules/framepack/framepack_ui.py b/modules/framepack/framepack_ui.py
index 305fddc38..de1ab5347 100644
--- a/modules/framepack/framepack_ui.py
+++ b/modules/framepack/framepack_ui.py
@@ -1,6 +1,5 @@
import gradio as gr
-from modules import ui_sections, ui_common, ui_video_vlm
-from modules.video_models.video_utils import get_codecs
+from modules import ui_sections, ui_video_vlm
from modules.framepack import framepack_load
from modules.framepack.framepack_worker import get_latent_paddings
from modules.framepack.framepack_wrappers import load_model, unload_model
@@ -13,7 +12,7 @@ def change_sections(duration, mp4_fps, mp4_interpolate, latent_ws, variant):
return gr.update(value=f'Target video: {num_frames} frames in {num_sections} sections'), gr.update(lines=max(2, 2*num_sections//3))
-def create_ui(prompt, negative, styles, _overrides):
+def create_ui(prompt, negative, styles, _overrides, init_image, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf):
with gr.Row():
with gr.Column(variant='compact', elem_id="framepack_settings", elem_classes=['settings-column'], scale=1):
with gr.Row():
@@ -28,25 +27,12 @@ def create_ui(prompt, negative, styles, _overrides):
with gr.Row():
section_html = gr.HTML(show_label=False, elem_id="framepack_section_html")
with gr.Accordion(label="Inputs", open=False):
- with gr.Row():
- input_image = gr.Image(sources='upload', type="numpy", label="FP init image", width=256, height=256, interactive=True, tool="editor", image_mode='RGB', elem_id="framepack_input_image")
- end_image = gr.Image(sources='upload', type="numpy", label="FP end image", width=256, height=256, interactive=True, tool="editor", image_mode='RGB', elem_id="framepack_end_image")
with gr.Row():
start_weight = gr.Slider(label="FP init strength", value=1.0, minimum=0.0, maximum=2.0, step=0.05, elem_id="framepack_start_weight")
end_weight = gr.Slider(label="FP end strength", value=1.0, minimum=0.0, maximum=2.0, step=0.05, elem_id="framepack_end_weight")
vision_weight = gr.Slider(label="FP vision strength", value=1.0, minimum=0.0, maximum=2.0, step=0.05, elem_id="framepack_vision_weight")
with gr.Accordion(label="Sections", open=False):
section_prompt = gr.Textbox(label="FP section prompts", elem_id="framepack_section_prompt", lines=2, placeholder="Optional one-line prompt suffix per each video section", interactive=True)
- with gr.Accordion(label="Video", open=False):
- with gr.Row():
- mp4_codec = gr.Dropdown(label="FP codec", choices=['none', 'libx264'], value='libx264', type='value')
- ui_common.create_refresh_button(mp4_codec, get_codecs, elem_id="framepack_mp4_codec_refresh")
- mp4_ext = gr.Textbox(label="FP format", value='mp4', elem_id="framepack_mp4_ext")
- mp4_opt = gr.Textbox(label="FP options", value='crf:16', elem_id="framepack_mp4_ext")
- with gr.Row():
- mp4_video = gr.Checkbox(label='FP save video', value=True, elem_id="framepack_mp4_video")
- mp4_frames = gr.Checkbox(label='FP save frames', value=False, elem_id="framepack_mp4_frames")
- mp4_sf = gr.Checkbox(label='FP save safetensors', value=False, elem_id="framepack_mp4_sf")
with gr.Accordion(label="Advanced", open=False):
seed = ui_sections.create_seed_inputs('control', reuse_visible=False, subseed_visible=False, accordion=False)[0]
latent_ws = gr.Slider(label="FP latent window size", minimum=1, maximum=33, value=9, step=1)
@@ -58,7 +44,7 @@ def create_ui(prompt, negative, styles, _overrides):
cfg_distilled = gr.Slider(label="FP distilled CFG scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01)
cfg_rescale = gr.Slider(label="FP CFG re-scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01)
- vlm_enhance, vlm_model, vlm_system_prompt = ui_video_vlm.create_ui(prompt_element=prompt, image_element=input_image)
+ vlm_enhance, vlm_model, vlm_system_prompt = ui_video_vlm.create_ui(prompt_element=prompt, image_element=init_image)
with gr.Accordion(label="Model", open=False):
with gr.Row():
@@ -108,7 +94,7 @@ def create_ui(prompt, negative, styles, _overrides):
receipe_reset.click(fn=framepack_load.reset_model, inputs=[], outputs=[receipe])
framepack_inputs=[
- input_image, end_image,
+ init_image, last_image,
start_weight, end_weight, vision_weight,
prompt, system_prompt, optimized_prompt, section_prompt, negative, styles,
seed,
diff --git a/modules/ltx/ltx_process.py b/modules/ltx/ltx_process.py
index 7d9e6ae84..b45298854 100644
--- a/modules/ltx/ltx_process.py
+++ b/modules/ltx/ltx_process.py
@@ -37,6 +37,7 @@ def run_ltx(task_id,
refine_strength:float,
condition_strength: float,
condition_image,
+ condition_last,
condition_files,
condition_video,
condition_video_frames:int,
@@ -100,11 +101,16 @@ def run_ltx(task_id,
)
p.ops.append('video')
+ condition_images = []
+ if condition_image is not None:
+ condition_images.append(condition_image)
+ if condition_last is not None:
+ condition_images.append(condition_last)
conditions = get_conditions(
width,
height,
condition_strength,
- condition_image,
+ condition_images,
condition_files,
condition_video,
condition_video_frames,
diff --git a/modules/ltx/ltx_ui.py b/modules/ltx/ltx_ui.py
index 04cea1d1e..6ac23bf86 100644
--- a/modules/ltx/ltx_ui.py
+++ b/modules/ltx/ltx_ui.py
@@ -1,8 +1,6 @@
import os
import gradio as gr
-from modules import shared, ui_sections, ui_symbols, ui_common
-from modules.ui_components import ToolButton
-from modules.video_models.video_utils import get_codecs
+from modules import shared, ui_sections
from modules.video_models.models_def import models
from modules.ltx import ltx_process
@@ -10,7 +8,7 @@ from modules.ltx import ltx_process
debug = shared.log.trace if os.environ.get('SD_VIDEO_DEBUG', None) is not None else lambda *args, **kwargs: None
-def create_ui(prompt, negative, styles, overrides):
+def create_ui(prompt, negative, styles, overrides, init_image, init_strength, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf, width, height, frames, seed):
with gr.Row():
with gr.Column(variant='compact', elem_id="ltx_settings", elem_classes=['settings-column'], scale=1):
with gr.Row():
@@ -18,18 +16,8 @@ def create_ui(prompt, negative, styles, overrides):
with gr.Row():
ltx_models = [m.name for m in models['LTX Video']]
model = gr.Dropdown(label='LTX model', choices=ltx_models, value=ltx_models[0])
- with gr.Accordion(open=True, label="LTX size", elem_id='ltx_generate_accordion'):
- with gr.Row():
- width, height = ui_sections.create_resolution_inputs('ltx', default_width=832, default_height=480)
- with gr.Row():
- frames = gr.Slider(label='LTX frames', minimum=1, maximum=513, step=1, value=17, elem_id="ltx_frames")
- seed = gr.Number(label='LTX seed', value=-1, elem_id="ltx_seed", container=True)
- random_seed = ToolButton(ui_symbols.random, elem_id="ltx_seed_random")
with gr.Accordion(open=False, label="Condition", elem_id='ltx_condition_accordion'):
- condition_strength = gr.Slider(label='LTX condition strength', minimum=0.1, maximum=1.0, step=0.05, value=0.8, elem_id="ltx_condition_image_strength")
with gr.Tabs():
- with gr.Tab('Image', id='ltx_condition_image_tab'):
- condition_image = gr.Image(sources='upload', type="pil", label="Image", width=256, height=256, interactive=True, tool="editor", image_mode='RGB', elem_id="ltx_condition_image")
with gr.Tab('Video', id='ltx_condition_video_tab'):
condition_video = gr.Video(label='Video', type='filepath', elem_id="ltx_condition_video", width=256, height=256, source='upload')
with gr.Row():
@@ -45,19 +33,6 @@ def create_ui(prompt, negative, styles, overrides):
with gr.Row():
refine_enable = gr.Checkbox(label='LTX enable refine', value=False, elem_id="ltx_refine_enable")
refine_strength = gr.Slider(label='LTX refine strength', minimum=0.1, maximum=1.0, step=0.05, value=0.4, elem_id="ltx_refine_strength")
- with gr.Accordion(label="Video", open=False):
- with gr.Row():
- mp4_fps = gr.Slider(label="FPS", minimum=1, maximum=60, value=24, step=1)
- mp4_interpolate = gr.Slider(label="LTX interpolation", minimum=0, maximum=10, value=0, step=1)
- with gr.Row():
- mp4_codec = gr.Dropdown(label="LTX codec", choices=['none', 'libx264'], value='libx264', type='value')
- ui_common.create_refresh_button(mp4_codec, get_codecs, elem_id="framepack_mp4_codec_refresh")
- mp4_ext = gr.Textbox(label="LTX format", value='mp4', elem_id="framepack_mp4_ext")
- mp4_opt = gr.Textbox(label="LTX options", value='crf:16', elem_id="framepack_mp4_ext")
- with gr.Row():
- mp4_video = gr.Checkbox(label='LTX save video', value=True, elem_id="framepack_mp4_video")
- mp4_frames = gr.Checkbox(label='LTX save frames', value=False, elem_id="framepack_mp4_frames")
- mp4_sf = gr.Checkbox(label='LTX save safetensors', value=False, elem_id="framepack_mp4_sf")
with gr.Accordion(open=False, label="Advanced", elem_id='ltx_parameters_accordion'):
steps, sampler_index = ui_sections.create_sampler_and_steps_selection(None, "ltx", default_steps=50)
with gr.Row():
@@ -71,7 +46,6 @@ def create_ui(prompt, negative, styles, overrides):
with gr.Row():
text = gr.HTML('', elem_id='ltx_generation_info', show_label=False)
- random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
task_id = gr.Textbox(visible=False, value='')
ui_state = gr.Textbox(visible=False, value='')
state_inputs = [task_id, ui_state]
@@ -83,7 +57,7 @@ def create_ui(prompt, negative, styles, overrides):
steps, sampler_index, seed,
upsample_enable, upsample_ratio,
refine_enable, refine_strength,
- condition_strength, condition_image, condition_files, condition_video, condition_video_frames, condition_video_skip,
+ init_strength, init_image, last_image, condition_files, condition_video, condition_video_frames, condition_video_skip,
decode_timestep, image_cond_noise_scale,
mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf,
overrides,
diff --git a/modules/ltx/ltx_util.py b/modules/ltx/ltx_util.py
index ddc53a323..a329373fd 100644
--- a/modules/ltx/ltx_util.py
+++ b/modules/ltx/ltx_util.py
@@ -54,19 +54,20 @@ def load_upsample(upsample_pipe, upsample_repo_id):
return upsample_pipe
-def get_conditions(width, height, condition_strength, condition_image, condition_files, condition_video, condition_video_frames, condition_video_skip):
+def get_conditions(width, height, condition_strength, condition_images, condition_files, condition_video, condition_video_frames, condition_video_skip):
from diffusers.pipelines.ltx.pipeline_ltx_condition import LTXVideoCondition
conditions = []
- if condition_image is not None:
- try:
- if isinstance(condition_image, str):
- from modules.api.api import decode_base64_to_image
- condition_image = decode_base64_to_image(condition_image)
- condition_image = condition_image.convert('RGB').resize((width, height), resample=Image.Resampling.LANCZOS)
- conditions.append(LTXVideoCondition(image=condition_image, frame_index=0, strength=condition_strength))
- shared.log.debug(f'Video condition: image={condition_image.size} strength={condition_strength}')
- except Exception as e:
- shared.log.error(f'LTX condition image: {e}')
+ if condition_images is not None:
+ for condition_image in condition_images:
+ try:
+ if isinstance(condition_image, str):
+ from modules.api.api import decode_base64_to_image
+ condition_image = decode_base64_to_image(condition_image)
+ condition_image = condition_image.convert('RGB').resize((width, height), resample=Image.Resampling.LANCZOS)
+ conditions.append(LTXVideoCondition(image=condition_image, frame_index=0, strength=condition_strength))
+ shared.log.debug(f'Video condition: image={condition_image.size} strength={condition_strength}')
+ except Exception as e:
+ shared.log.error(f'LTX condition image: {e}')
if condition_files is not None:
condition_images = []
for fn in condition_files:
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 9fec9d9e4..63b5dba0d 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -51,7 +51,7 @@ def find_sampler_config(name):
def restore_default(model):
if model is None:
return None
- if getattr(model, "default_scheduler", None) is not None and getattr(model, "scheduler") is not None:
+ if getattr(model, "default_scheduler", None) is not None and getattr(model, "scheduler", None) is not None:
model.scheduler = copy.deepcopy(model.default_scheduler)
if hasattr(model, "prior_pipe") and hasattr(model.prior_pipe, "scheduler"):
model.prior_pipe.scheduler = copy.deepcopy(model.default_scheduler)
diff --git a/modules/ui_video.py b/modules/ui_video.py
index 02c787c6e..c908ac218 100644
--- a/modules/ui_video.py
+++ b/modules/ui_video.py
@@ -27,15 +27,24 @@ def create_ui():
with gr.Row(elem_id="video_interface", equal_height=False):
with gr.Tabs(elem_classes=['video-tabs'], elem_id='video-tabs'):
overrides = ui_common.create_override_inputs('video')
- with gr.Tab('Core', id='video-tab') as video_tab:
+ with gr.Tab('Size', id='video-size-tab') as _video_size_tab:
from modules.video_models import video_ui
- video_ui.create_ui(prompt, negative, styles, overrides)
+ width, height, frames, seed, reuse_seed, random_seed = video_ui.create_ui_size()
+ with gr.Tab('Inputs', id='video-inputs-tab') as _video_inputs_tab:
+ from modules.video_models import video_ui
+ init_image, init_strength, last_image = video_ui.create_ui_inputs()
+ with gr.Tab('Video Output', id='video-outputs-tab') as _video_outputs_tab:
+ from modules.video_models import video_ui
+ mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf = video_ui.create_ui_outputs()
+ with gr.Tab('Models', id='video-core-tab') as video_core_tab:
+ from modules.video_models import video_ui
+ video_ui.create_ui(prompt, negative, styles, overrides, init_image, init_strength, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf, width, height, frames, seed, reuse_seed)
with gr.Tab('FramePack', id='framepack-tab') as framepack_tab:
from modules.framepack import framepack_ui
- framepack_ui.create_ui(prompt, negative, styles, overrides)
+ framepack_ui.create_ui(prompt, negative, styles, overrides, init_image, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf)
with gr.Tab('LTX', id='ltx-tab') as ltx_tab:
from modules.ltx import ltx_ui
- ltx_ui.create_ui(prompt, negative, styles, overrides)
+ ltx_ui.create_ui(prompt, negative, styles, overrides, init_image, init_strength, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf, width, height, frames, seed)
paste_fields = [
(prompt, "Prompt"), # cannot add more fields as they are not defined yet
@@ -45,7 +54,7 @@ def create_ui():
generation_parameters_copypaste.register_paste_params_button(bindings)
current_tab = gr.Textbox(visible=False, value='video')
- video_tab.select(fn=lambda: 'video', inputs=[], outputs=[current_tab])
+ video_core_tab.select(fn=lambda: 'video', inputs=[], outputs=[current_tab])
framepack_tab.select(fn=lambda: 'framepack', inputs=[], outputs=[current_tab])
ltx_tab.select(fn=lambda: 'ltx', inputs=[], outputs=[current_tab])
diff --git a/modules/video_models/video_ui.py b/modules/video_models/video_ui.py
index ec82cd4c7..dda49c6c6 100644
--- a/modules/video_models/video_ui.py
+++ b/modules/video_models/video_ui.py
@@ -77,7 +77,50 @@ def run_video(*args):
return video_utils.queue_err(f'model not found: engine="{engine}" model="{model}"')
-def create_ui(prompt, negative, styles, overrides):
+def create_ui_inputs():
+ with gr.Row():
+ with gr.Column(variant='compact', elem_id="video_inputs", elem_classes=['settings-column'], scale=1):
+ init_strength = gr.Slider(label='Init strength', minimum=0.0, maximum=1.0, step=0.01, value=0.8, elem_id="video_denoising_strength")
+ gr.HTML("
  Init image")
+ init_image = gr.Image(elem_id="video_image", show_label=False, type="pil", image_mode="RGB", width=256, height=256)
+ gr.HTML("
  Last image")
+ last_image = gr.Image(elem_id="video_last", show_label=False, type="pil", image_mode="RGB", width=256, height=256)
+ return init_image, init_strength, last_image
+
+
+def create_ui_outputs():
+ with gr.Row():
+ with gr.Column(variant='compact', elem_id="video_outputs", elem_classes=['settings-column'], scale=1):
+ with gr.Row():
+ mp4_fps = gr.Slider(label="FPS", minimum=1, maximum=60, value=24, step=1)
+ mp4_interpolate = gr.Slider(label="Video interpolation", minimum=0, maximum=10, value=0, step=1)
+ with gr.Row():
+ mp4_codec = gr.Dropdown(label="Video codec", choices=['none', 'libx264'], value='libx264', type='value')
+ ui_common.create_refresh_button(mp4_codec, video_utils.get_codecs, elem_id="framepack_mp4_codec_refresh")
+ mp4_ext = gr.Textbox(label="Video format", value='mp4', elem_id="framepack_mp4_ext")
+ mp4_opt = gr.Textbox(label="Video options", value='crf:16', elem_id="framepack_mp4_ext")
+ with gr.Row():
+ mp4_video = gr.Checkbox(label='Video save video', value=True, elem_id="framepack_mp4_video")
+ mp4_frames = gr.Checkbox(label='Video save frames', value=False, elem_id="framepack_mp4_frames")
+ mp4_sf = gr.Checkbox(label='Video save safetensors', value=False, elem_id="framepack_mp4_sf")
+ return mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf
+
+
+def create_ui_size():
+ with gr.Row():
+ with gr.Column(variant='compact', elem_id="video_size", elem_classes=['settings-column'], scale=1):
+ with gr.Row():
+ width, height = ui_sections.create_resolution_inputs('video', default_width=832, default_height=480)
+ with gr.Row():
+ frames = gr.Slider(label='Frames', minimum=1, maximum=1024, step=1, value=17, elem_id="video_frames")
+ seed = gr.Number(label='Initial seed', value=-1, elem_id="video_seed", container=True)
+ random_seed = ToolButton(ui_symbols.random, elem_id="video_seed_random")
+ reuse_seed = ToolButton(ui_symbols.reuse, elem_id="video_seed_reuse")
+ random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
+ return width, height, frames, seed, reuse_seed, random_seed
+
+
+def create_ui(prompt, negative, styles, overrides, init_image, init_strength, last_image, mp4_fps, mp4_interpolate, mp4_codec, mp4_ext, mp4_opt, mp4_video, mp4_frames, mp4_sf, width, height, frames, seed, reuse_seed):
with gr.Row():
with gr.Column(variant='compact', elem_id="video_settings", elem_classes=['settings-column'], scale=1):
with gr.Row():
@@ -88,14 +131,6 @@ def create_ui(prompt, negative, styles, overrides):
btn_load = ToolButton(ui_symbols.loading, elem_id="video_model_load")
with gr.Row():
url = gr.HTML(label='Model URL', elem_id='video_model_url', value='
')
- with gr.Accordion(open=True, label="Size", elem_id='video_size_accordion'):
- with gr.Row():
- width, height = ui_sections.create_resolution_inputs('video', default_width=832, default_height=480)
- with gr.Row():
- frames = gr.Slider(label='Frames', minimum=1, maximum=1024, step=1, value=17, elem_id="video_frames")
- seed = gr.Number(label='Initial seed', value=-1, elem_id="video_seed", container=True)
- random_seed = ToolButton(ui_symbols.random, elem_id="video_seed_random")
- reuse_seed = ToolButton(ui_symbols.reuse, elem_id="video_seed_reuse")
with gr.Accordion(open=False, label="Parameters", elem_id='video_parameters_accordion'):
steps, sampler_index = ui_sections.create_sampler_and_steps_selection(None, "video", default_steps=50)
with gr.Row():
@@ -108,30 +143,9 @@ def create_ui(prompt, negative, styles, overrides):
with gr.Row():
vae_type = gr.Dropdown(label='VAE decode', choices=['Default', 'Tiny', 'Remote'], value='Default', elem_id="video_vae_type")
vae_tile_frames = gr.Slider(label='Tile frames', minimum=1, maximum=64, step=1, value=16, elem_id="video_vae_tile_frames")
- with gr.Accordion(open=False, label="Init image", elem_id='video_init_accordion'):
- init_strength = gr.Slider(label='Init strength', minimum=0.0, maximum=1.0, step=0.01, value=0.5, elem_id="video_denoising_strength")
- gr.HTML("
  Init image")
- init_image = gr.Image(elem_id="video_image", show_label=False, type="pil", image_mode="RGB", width=256, height=256)
- gr.HTML("
  Last image")
- last_image = gr.Image(elem_id="video_last", show_label=False, type="pil", image_mode="RGB", width=256, height=256)
vlm_enhance, vlm_model, vlm_system_prompt = ui_video_vlm.create_ui(prompt_element=prompt, image_element=init_image)
- with gr.Accordion(label="Video", open=False, elem_id='video_output_accordion'):
- with gr.Row():
- mp4_fps = gr.Slider(label="FPS", minimum=1, maximum=60, value=24, step=1)
- mp4_interpolate = gr.Slider(label="Video interpolation", minimum=0, maximum=10, value=0, step=1)
- with gr.Row():
- mp4_codec = gr.Dropdown(label="Video codec", choices=['none', 'libx264'], value='libx264', type='value')
- ui_common.create_refresh_button(mp4_codec, video_utils.get_codecs, elem_id="framepack_mp4_codec_refresh")
- mp4_ext = gr.Textbox(label="Video format", value='mp4', elem_id="framepack_mp4_ext")
- mp4_opt = gr.Textbox(label="Video options", value='crf:16', elem_id="framepack_mp4_ext")
- with gr.Row():
- mp4_video = gr.Checkbox(label='Video save video', value=True, elem_id="framepack_mp4_video")
- mp4_frames = gr.Checkbox(label='Video save frames', value=False, elem_id="framepack_mp4_frames")
- mp4_sf = gr.Checkbox(label='Video save safetensors', value=False, elem_id="framepack_mp4_sf")
-
-
# output panel with gallery and video tabs
with gr.Column(elem_id='video-output-column', scale=2) as _column_output:
with gr.Tabs(elem_classes=['video-output-tabs'], elem_id='video-output-tabs'):
@@ -142,7 +156,6 @@ def create_ui(prompt, negative, styles, overrides):
# connect reuse seed button
ui_common.connect_reuse_seed(seed, reuse_seed, gen_info, is_subseed=False)
- random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
# handle engine and model change
engine.change(fn=engine_change, inputs=[engine], outputs=[model])
model.change(fn=model_change, inputs=[engine, model], outputs=[url])
diff --git a/scripts/daam/experiment.py b/scripts/daam/experiment.py
index 4465a3054..301efa429 100644
--- a/scripts/daam/experiment.py
+++ b/scripts/daam/experiment.py
@@ -251,7 +251,7 @@ class GenerationExperiment:
try:
path = self.save_heat_map(word, tokenizer, crop=crop)
path_map[word] = path
- except:
+ except Exception:
pass
return path_map
@@ -328,7 +328,7 @@ class GenerationExperiment:
vocab=vocab,
subtype=directory.name
))
- except:
+ except Exception:
pass
return experiments