save txt2vid options as txt (#198)

* save arguments to pass to process_modelscope.py

* save arguments to txt file for reuse

* cleanup

* cleanup

* use webui-formatted infotext

* fixup

* fixup

* fix again cause bad github sync

---------

Co-authored-by: kabachuha <artemkhrapov2001@yandex.ru>
pull/227/head
Derrick Schultz (he/him) 2023-08-30 15:56:33 -04:00 committed by GitHub
parent 20ead1033e
commit 996d9960da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 22 additions and 2 deletions

View File

@ -217,9 +217,10 @@ def process_modelscope(args_dict, extra_args=None):
args.strength = 1
samples, _ = pipe.infer(args.prompt, args.n_prompt, args.steps, args.frames, args.seed + batch if args.seed != -1 else -1, args.cfg_scale,
samples, _, infotext = pipe.infer(args.prompt, args.n_prompt, args.steps, args.frames, args.seed + batch if args.seed != -1 else -1, args.cfg_scale,
args.width, args.height, args.eta, cpu_vae, device, latents, strength=args.strength, skip_steps=skip_steps, mask=mask, is_vid2vid=args.do_vid2vid, sampler=args.sampler)
if batch > 0:
outdir_current = os.path.join(get_outdir(), f"{init_timestring}_{batch}")
print(f'text2video finished, saving frames to {outdir_current}')
@ -230,6 +231,14 @@ def process_modelscope(args_dict, extra_args=None):
cv2.imwrite(outdir_current + os.path.sep +
f"{i:06}.png", samples[i])
# save settings to a file
if opts.data.get("modelscope_save_info_to_file") if opts.data is not None and opts.data.get("modelscope_save_info_to_file") is not None else False:
args_file = os.path.join(outdir_current,'args.txt')
with open(args_file, 'w', encoding='utf-8') as f:
print(f'saving args to {args_file}')
f.write(infotext)
# TODO: add params to the GUI
if not video_args.skip_video_creation:
ffmpeg_stitch_video(ffmpeg_location=video_args.ffmpeg_location, fps=video_args.fps, outmp4_path=outdir_current + os.path.sep + f"vid.mp4", imgs_path=os.path.join(outdir_current,

View File

@ -21,6 +21,7 @@ import cv2
from modelscope.t2v_model import UNetSD, AutoencoderKL, GaussianDiffusion, beta_schedule
from modules import devices, shared
from modules import prompt_parser
from modules import generation_parameters_copypaste
from samplers.uni_pc.sampler import UniPCSampler
from samplers.samplers_common import Txt2VideoSampler
from samplers.samplers_common import available_samplers
@ -381,7 +382,7 @@ class TextToVideoSynthesis():
del video_data
torch_gc()
last_tensor = self.last_tensor
return video_path, last_tensor
return video_path, last_tensor, create_infotext(vars)
def cleanup(self):
pass
@ -458,3 +459,11 @@ def tensor2vid(video, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]):
for image in images] # f h w c
return images
def create_infotext(vars:dict):
prompt = vars.pop('prompt')
n_prompt = vars.pop('n_prompt') if 'n_prompt' in vars else ""
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in vars.items() if v is not None])
negative_prompt_text = "\nNegative prompt: " + n_prompt if len(n_prompt) > 0 else ""
return f"{prompt}{negative_prompt_text}\n{generation_params_text}".strip()

View File

@ -110,6 +110,8 @@ def on_ui_settings():
"GPU (half precision)", "VAE Mode:", gr.Radio, {"interactive": True, "choices": ['GPU (half precision)', 'GPU', 'CPU (Low VRAM)']}, section=section))
shared.opts.add_option("modelscope_deforum_show_n_videos", shared.OptionInfo(
-1, "How many videos to show on the right panel on completion (-1 = show all)", gr.Number, {"interactive": True, "visible": True}, section=section))
shared.opts.add_option("modelscope_save_info_to_file", shared.OptionInfo(
False, "Save generation params to a text file near the video", gr.Checkbox, {'interactive':True, 'visible':True}, section=section))
script_callbacks.on_ui_tabs(on_ui_tabs)
script_callbacks.on_ui_settings(on_ui_settings)