Add music to video

exit_image
Charles Fettinger 2023-05-17 16:38:32 -07:00
parent 938a8aaefc
commit a45e6efa67
4 changed files with 65 additions and 22 deletions

View File

@ -4,6 +4,7 @@ import modules.shared as shared
import modules.sd_models
import gradio as gr
from scripts import postprocessing_upscale
from pkg_resources import resource_filename
from .prompt_util import readJsonPrompt, process_keys
from .static_variables import jsonprompt_schemafile
import asyncio
@ -141,3 +142,18 @@ def value_to_bool(value):
if value in (0, 1):
return bool(value)
return False
def find_ffmpeg_binary():
try:
import google.colab
return 'ffmpeg'
except:
pass
for package in ['imageio_ffmpeg', 'imageio-ffmpeg']:
try:
package_path = resource_filename(package, 'binaries')
files = [os.path.join(package_path, f) for f in os.listdir(package_path) if f.startswith("ffmpeg-")]
files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
return files[0] if files else 'ffmpeg'
except:
return 'ffmpeg'

View File

@ -9,11 +9,11 @@ from .helpers import (
fix_env_Path_ffprobe,
closest_upper_divisible_by_eight,
load_model_from_setting,
do_upscaleImg,value_to_bool
do_upscaleImg,value_to_bool, find_ffmpeg_binary
)
from .sd_helpers import renderImg2Img, renderTxt2Img
from .image import shrink_and_paste_on_blank, open_image, apply_alpha_mask, draw_gradient_ellipse, resize_and_crop_image, crop_fethear_ellipse, crop_inner_image
from .video import write_video
from .video import write_video, add_audio_to_video
def outpaint_steps(
width,
@ -41,7 +41,7 @@ def outpaint_steps(
mask_height,
custom_exit_image,
frame_correction=True, # TODO: add frame_Correction in UI
blend_gradient_size = 61,
blend_gradient_size = 61
):
main_frames = [init_img.convert("RGBA")]
prev_image = init_img.convert("RGBA")
@ -87,7 +87,7 @@ def outpaint_steps(
f"{common_prompt_pre}\n{pr}\n{common_prompt_suf}".strip(),
negative_prompt,
sampler,
num_inference_steps,
int(num_inference_steps),
guidance_scale,
seed,
width,
@ -209,10 +209,11 @@ def create_zoom(
blend_gradient_size,
blend_invert_do,
blend_color,
audio_filename=None,
inpainting_denoising_strength=1,
inpainting_full_res=0,
inpainting_padding=0,
progress=None,
progress=None,
):
for i in range(batchcount):
print(f"Batch {i+1}/{batchcount}")
@ -223,7 +224,7 @@ def create_zoom(
negative_prompt,
num_outpainting_steps,
guidance_scale,
num_inference_steps,
int(num_inference_steps),
custom_init_image,
custom_exit_image,
video_frame_rate,
@ -249,6 +250,7 @@ def create_zoom(
inpainting_full_res,
inpainting_padding,
progress,
audio_filename
)
return result
@ -325,6 +327,7 @@ def create_zoom_single(
inpainting_full_res,
inpainting_padding,
progress,
audio_filename = None
):
# try:
# if gr.Progress() is not None:
@ -429,7 +432,7 @@ def create_zoom_single(
negative_prompt,
current_seed,
sampler,
num_inference_steps,
int(num_inference_steps),
guidance_scale,
inpainting_denoising_strength,
inpainting_mask_blur,
@ -443,7 +446,7 @@ def create_zoom_single(
mask_height,
custom_exit_image,
False,
blend_gradient_size,
blend_gradient_size
)
#for k in range(len(main_frames)):
@ -551,6 +554,9 @@ def create_zoom_single(
blend_gradient_size,
ImageColor.getcolor(blend_color, "RGBA"),
)
if audio_filename is not None:
out_config["video_filename"] = add_audio_to_video(out_config["video_filename"], audio_filename, str.replace(out_config["video_filename"], ".mp4", "_audio.mp4"), find_ffmpeg_binary())
print("Video saved in: " + os.path.join(script_path, out_config["video_filename"]))
return (
out_config["video_filename"],

View File

@ -1,4 +1,5 @@
import json
from msilib.schema import File
import gradio as gr
from .run import create_zoom
import modules.shared as shared
@ -201,7 +202,7 @@ def on_ui_tabs():
info="Frames to freeze at the start of the video",
value=0,
minimum=1,
maximum=60,
maximum=120,
step=1
)
video_last_frame_dupe_amount = gr.Slider(
@ -209,7 +210,7 @@ def on_ui_tabs():
info="Frames to freeze at the end of the video",
value=0,
minimum=1,
maximum=60,
maximum=120,
step=1
)
video_zoom_speed = gr.Slider(
@ -242,16 +243,27 @@ def on_ui_tabs():
label='Blend Edge Color',
default='#ffff00'
)
with gr.Accordion("Blend Info"):
gr.Markdown(
"""# Important Blend Info:
Number of Start and Stop Frame Duplication number of frames used for the blend/wipe effect. At 30 Frames per second, 30 frames is 1 second.
Blend Gradient size determines if blends extend to the border of the images. 61 is typical, higher values may result in frames around steps of your video
with gr.Accordion("Blend Info", open=False):
gr.Markdown(
"""# Important Blend Info:
Number of Start and Stop Frame Duplication number of frames used for the blend/wipe effect. At 30 Frames per second, 30 frames is 1 second.
Blend Gradient size determines if blends extend to the border of the images. 61 is typical, higher values may result in frames around steps of your video
Free to use grayscale blend images can be found here: https://github.com/Oncorporation/obs-studio/tree/master/plugins/obs-transitions/data/luma_wipes
Ideas for custom blend images: https://www.pexels.com/search/gradient/
"""
)
Free to use grayscale blend images can be found here: https://github.com/Oncorporation/obs-studio/tree/master/plugins/obs-transitions/data/luma_wipes
Ideas for custom blend images: https://www.pexels.com/search/gradient/
"""
)
with gr.Tab("Audio"):
with gr.Row():
audio_filename = gr.Textbox(value=None, label="Audio File Name")
audio_file = gr.File(
value=None,
file_count="single",
file_types=["audio"],
type="file",
label="Audio File")
audio_file.change(get_filename, inputs=[audio_file], outputs=[audio_filename])
with gr.Tab("Outpaint"):
inpainting_mask_blur = gr.Slider(
@ -268,7 +280,6 @@ def on_ui_tabs():
type="index",
)
with gr.Tab("Post proccess"):
upscale_do = gr.Checkbox(False, label="Enable Upscale")
upscaler_name = gr.Dropdown(
@ -335,6 +346,7 @@ Our best experience and trade-off is the R-ERSGAn4x upscaler.
blend_gradient_size,
blend_invert_do,
blend_color,
audio_filename,
],
outputs=[output_video, out_image, generation_info, html_info, html_log],
)
@ -352,4 +364,7 @@ def checkPrompts(p):
return gr.Button.update(
interactive=any(0 in sublist for sublist in p)
or any("0" in sublist for sublist in p)
)
)
def get_filename(file):
return file.name

View File

@ -1,5 +1,6 @@
import numpy as np
import imageio
import subprocess
from .image import draw_gradient_ellipse, alpha_composite_images, blend_images, PSLumaWipe_images2
import math
@ -103,4 +104,9 @@ def write_video(file_path, frames, fps, reversed=True, start_frame_dupe_amount=1
for i in range(last_frame_dupe_amount):
self._writer.append_data(np.array(frame))
self._writer.close()
self._writer.close()
def add_audio_to_video(video_path, audio_path, output_path, ffmpeg_location = 'ffmpeg'):
command = [ffmpeg_location, '-i', video_path, '-i', audio_path, '-c:v', 'copy', '-c:a', 'aac', '-map', '0:v:0', '-map', '1:a:0', '-shortest', output_path]
subprocess.run(command)
return output_path