diff --git a/readme.md b/readme.md
index f18b02f..7e158ed 100644
--- a/readme.md
+++ b/readme.md
@@ -56,17 +56,10 @@ All examples you can see here are originally generated at 512x512 resolution usi
## Installing the extension
To install the extension go to 'Extensions' tab in [Automatic1111 web-ui](https://github.com/AUTOMATIC1111/stable-diffusion-webui), then go to 'Install from URL' tab. In 'URL for extension's git repository' field inter the path to this repository, i.e. 'https://github.com/volotat/SD-CN-Animation.git'. Leave 'Local directory name' field empty. Then just press 'Install' button. Restart web-ui, new 'SD-CN-Animation' tab should appear. All generated video will be saved into 'stable-diffusion-webui/outputs/sd-cn-animation' folder.
-## Last version changes: v0.8
-* Better error handling. Fixes an issue when errors may not appear in the console.
-* Fixed an issue with deprecated variables. Should be a resolution of running the extension on other webui forks.
-* Slight improvements in vid2vid processing pipeline.
-* Video preview added to the UI. It will become available at the end of the processing.
-* Time elapsed/left indication added.
-* Fixed an issue with color drifting on some models.
-* Sampler type and sampling steps settings added to text2video mode.
-* Added automatic resizing before processing with RAFT and FloweR models.
-
-
\ No newline at end of file
+* Fixed an issue in vid2vid mode when an occlusion mask computed from the optical flow may include unnecessary parts (where flow is non-zero).
+* Added 'Extra params' in vid2vid mode for more fine-grain controls of the processing pipeline.
+* Better default parameters set for vid2vid pipeline.
+* In txt2vid mode after the first frame is generated the seed is now automatically set to -1 to prevent blurring issues.
+* Added an option to save resulting frames into a folder alongside the video.
diff --git a/scripts/base_ui.py b/scripts/base_ui.py
index 9d2ecad..f4cf4f0 100644
--- a/scripts/base_ui.py
+++ b/scripts/base_ui.py
@@ -67,8 +67,8 @@ def setup_common_values(mode, d):
with gr.Row():
seed = gr.Number(label='Seed (this parameter controls how the first frame looks like and the color distribution of the consecutive frames as they are dependent on the first one)', value = d.seed, Interactive = True, precision=0)
with gr.Row():
- processing_strength = gr.Slider(label="Processing strength", value=d.processing_strength, minimum=0, maximum=1, step=0.05, interactive=True)
- fix_frame_strength = gr.Slider(label="Fix frame strength", value=d.fix_frame_strength, minimum=0, maximum=1, step=0.05, interactive=True)
+ processing_strength = gr.Slider(label="Processing strength (Step 1)", value=d.processing_strength, minimum=0, maximum=1, step=0.05, interactive=True)
+ fix_frame_strength = gr.Slider(label="Fix frame strength (Step 2)", value=d.fix_frame_strength, minimum=0, maximum=1, step=0.05, interactive=True)
with gr.Row():
sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{mode}_sampling", choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index", interactive=True)
steps = gr.Slider(label="Sampling steps", minimum=1, maximum=150, step=1, elem_id=f"{mode}_steps", value=d.steps, interactive=True)
@@ -83,11 +83,37 @@ def inputs_ui():
with gr.Tab('vid2vid') as tab_vid2vid:
with gr.Row():
- gr.HTML('Put your video here')
+ gr.HTML('Put your video here:')
with gr.Row():
v2v_file = gr.File(label="Input video", interactive=True, file_count="single", file_types=["video"], elem_id="vid_to_vid_chosen_file")
v2v_width, v2v_height, v2v_prompt, v2v_n_prompt, v2v_cfg_scale, v2v_seed, v2v_processing_strength, v2v_fix_frame_strength, v2v_sampler_index, v2v_steps = setup_common_values('vid2vid', v2v_args)
+
+ with gr.Accordion("Extra settings",open=False):
+ gr.HTML('# Occlusion mask params:')
+ with gr.Row():
+ with gr.Column(scale=1, variant='compact'):
+ v2v_occlusion_mask_blur = gr.Slider(label='Occlusion blur strength', minimum=0, maximum=10, step=0.1, value=3, interactive=True)
+ gr.HTML('')
+ v2v_occlusion_mask_trailing = gr.Checkbox(label="Occlusion trailing", info="Reduce ghosting but adds more flickering to the video", value=True, interactive=True)
+ with gr.Column(scale=1, variant='compact'):
+ v2v_occlusion_mask_flow_multiplier = gr.Slider(label='Occlusion flow multiplier', minimum=0, maximum=10, step=0.1, value=5, interactive=True)
+ v2v_occlusion_mask_difo_multiplier = gr.Slider(label='Occlusion diff origin multiplier', minimum=0, maximum=10, step=0.1, value=2, interactive=True)
+ v2v_occlusion_mask_difs_multiplier = gr.Slider(label='Occlusion diff styled multiplier', minimum=0, maximum=10, step=0.1, value=0, interactive=True)
+
+ with gr.Row():
+ with gr.Column(scale=1, variant='compact'):
+ gr.HTML('# Step 1 params:')
+ v2v_step_1_seed = gr.Number(label='Seed', value = -1, Interactive = True, precision=0)
+ gr.HTML('
')
+ v2v_step_1_blend_alpha = gr.Slider(label='Warped prev frame vs Current frame blend alpha', minimum=0, maximum=1, step=0.1, value=1, interactive=True)
+ v2v_step_1_processing_mode = gr.Radio(["Process full image then blend in occlusions", "Inpaint occlusions"], type="index", \
+ label="Processing mode", value="Process full image then blend in occlusions", interactive=True)
+
+
+ with gr.Column(scale=1, variant='compact'):
+ gr.HTML('# Step 2 params:')
+ v2v_step_2_seed = gr.Number(label='Seed', value = 8888, Interactive = True, precision=0)
with FormRow(elem_id="vid2vid_override_settings_row") as row:
v2v_override_settings = create_override_settings_dropdown("vid2vid", row)
@@ -150,6 +176,9 @@ def on_ui_tabs():
with gr.Row(variant='compact'):
run_button = gr.Button('Generate', elem_id=f"sdcn_anim_generate", variant='primary')
stop_button = gr.Button('Interrupt', elem_id=f"sdcn_anim_interrupt", variant='primary', interactive=False)
+
+ save_frames_check = gr.Checkbox(label="Save frames into a folder nearby a video (check it before running the generation if you also want to save frames separately)", value=False, interactive=True)
+ gr.HTML('
')
with gr.Column(variant="panel"):
sp_progress = gr.HTML(elem_id="sp_progress", value="")
@@ -171,6 +200,8 @@ def on_ui_tabs():
with gr.Row(variant='compact'):
dummy_component = gr.Label(visible=False)
+ components['glo_save_frames_check'] = save_frames_check
+
# Define parameters for the action methods.
method_inputs = [components[name] for name in utils.get_component_names()] + components['v2v_custom_inputs']
diff --git a/scripts/core/flow_utils.py b/scripts/core/flow_utils.py
index 64b8bf5..f921af6 100644
--- a/scripts/core/flow_utils.py
+++ b/scripts/core/flow_utils.py
@@ -40,7 +40,7 @@ def RAFT_clear_memory():
torch.cuda.empty_cache()
RAFT_model = None
-def RAFT_estimate_flow(frame1, frame2, device='cuda', subtract_background=True):
+def RAFT_estimate_flow(frame1, frame2, device='cuda'):
global RAFT_model
org_size = frame1.shape[1], frame1.shape[0]
@@ -72,10 +72,6 @@ def RAFT_estimate_flow(frame1, frame2, device='cuda', subtract_background=True):
RAFT_model.to(device)
RAFT_model.eval()
- #if subtract_background:
- # frame1 = background_subtractor(frame1, fgbg)
- # frame2 = background_subtractor(frame2, fgbg)
-
with torch.no_grad():
frame1_torch = torch.from_numpy(frame1).permute(2, 0, 1).float()[None].to(device)
frame2_torch = torch.from_numpy(frame2).permute(2, 0, 1).float()[None].to(device)
@@ -98,9 +94,9 @@ def RAFT_estimate_flow(frame1, frame2, device='cuda', subtract_background=True):
next_flow = cv2.resize(next_flow, org_size)
prev_flow = cv2.resize(prev_flow, org_size)
- return next_flow, prev_flow, occlusion_mask #, frame1, frame2
+ return next_flow, prev_flow, occlusion_mask
-def compute_diff_map(next_flow, prev_flow, prev_frame, cur_frame, prev_frame_styled):
+def compute_diff_map(next_flow, prev_flow, prev_frame, cur_frame, prev_frame_styled, args_dict):
h, w = cur_frame.shape[:2]
fl_w, fl_h = next_flow.shape[:2]
@@ -108,9 +104,12 @@ def compute_diff_map(next_flow, prev_flow, prev_frame, cur_frame, prev_frame_sty
next_flow = next_flow / np.array([fl_h,fl_w])
prev_flow = prev_flow / np.array([fl_h,fl_w])
- # remove low value noise (@alexfredo suggestion)
- next_flow[np.abs(next_flow) < 0.05] = 0
- prev_flow[np.abs(prev_flow) < 0.05] = 0
+ # compute occlusion mask
+ fb_flow = next_flow + prev_flow
+ fb_norm = np.linalg.norm(fb_flow , axis=2)
+
+ zero_flow_mask = np.clip(1 - np.linalg.norm(prev_flow, axis=-1)[...,None] * 20, 0, 1)
+ diff_mask_flow = fb_norm[..., None] * zero_flow_mask
# resize flow
next_flow = cv2.resize(next_flow, (w, h))
@@ -137,23 +136,22 @@ def compute_diff_map(next_flow, prev_flow, prev_frame, cur_frame, prev_frame_sty
#warped_frame = cv2.remap(prev_frame, flow_map, None, cv2.INTER_NEAREST, borderMode = cv2.BORDER_REFLECT)
#warped_frame_styled = cv2.remap(prev_frame_styled, flow_map, None, cv2.INTER_NEAREST, borderMode = cv2.BORDER_REFLECT)
- # compute occlusion mask
- fb_flow = next_flow + prev_flow
- fb_norm = np.linalg.norm(fb_flow, axis=2)
-
- occlusion_mask = fb_norm[..., None]
-
+
diff_mask_org = np.abs(warped_frame.astype(np.float32) - cur_frame.astype(np.float32)) / 255
diff_mask_org = diff_mask_org.max(axis = -1, keepdims=True)
- #diff_mask_stl = np.abs(warped_frame_styled.astype(np.float32) - cur_frame.astype(np.float32)) / 255
- #diff_mask_stl = diff_mask_stl.max(axis = -1, keepdims=True)
+ diff_mask_stl = np.abs(warped_frame_styled.astype(np.float32) - cur_frame.astype(np.float32)) / 255
+ diff_mask_stl = diff_mask_stl.max(axis = -1, keepdims=True)
- alpha_mask = np.maximum(occlusion_mask * 0.3, diff_mask_org * 4) #, diff_mask_stl * 2
+ alpha_mask = np.maximum.reduce([diff_mask_flow * args_dict['occlusion_mask_flow_multiplier'] * 10, \
+ diff_mask_org * args_dict['occlusion_mask_difo_multiplier'], \
+ diff_mask_stl * args_dict['occlusion_mask_difs_multiplier']]) #
alpha_mask = alpha_mask.repeat(3, axis = -1)
#alpha_mask_blured = cv2.dilate(alpha_mask, np.ones((5, 5), np.float32))
- alpha_mask = cv2.GaussianBlur(alpha_mask, (51,51), 5, cv2.BORDER_REFLECT)
+ if args_dict['occlusion_mask_blur'] > 0:
+ blur_filter_size = min(w,h) // 15 | 1
+ alpha_mask = cv2.GaussianBlur(alpha_mask, (blur_filter_size, blur_filter_size) , args_dict['occlusion_mask_blur'], cv2.BORDER_REFLECT)
alpha_mask = np.clip(alpha_mask, 0, 1)
diff --git a/scripts/core/txt2vid.py b/scripts/core/txt2vid.py
index cb6c925..deb32c6 100644
--- a/scripts/core/txt2vid.py
+++ b/scripts/core/txt2vid.py
@@ -71,12 +71,23 @@ def start_process(*args):
# Create an output video file with the same fps, width, and height as the input video
output_video_name = f'outputs/sd-cn-animation/txt2vid/{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}.mp4'
+ output_video_folder = os.path.splitext(output_video_name)[0]
os.makedirs(os.path.dirname(output_video_name), exist_ok=True)
+
+ if args_dict['save_frames_check']:
+ os.makedirs(output_video_folder, exist_ok=True)
+
+ def save_result_to_image(image, ind):
+ if args_dict['save_frames_check']:
+ cv2.imwrite(os.path.join(output_video_folder, f'{ind:05d}.png'), cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
+
output_video = cv2.VideoWriter(output_video_name, cv2.VideoWriter_fourcc(*'mp4v'), args_dict['fps'], (args_dict['width'], args_dict['height']))
output_video.write(cv2.cvtColor(processed_frame, cv2.COLOR_RGB2BGR))
stat = f"Frame: 1 / {args_dict['length']}; " + utils.get_time_left(1, args_dict['length'], processing_start_time)
utils.shared.is_interrupted = False
+
+ save_result_to_image(processed_frame, 1)
yield stat, init_frame, None, None, processed_frame, None, gr.Button.update(interactive=False), gr.Button.update(interactive=True)
org_size = args_dict['width'], args_dict['height']
@@ -124,6 +135,7 @@ def start_process(*args):
args_dict['mode'] = 4
args_dict['init_img'] = Image.fromarray(curr_frame)
args_dict['mask_img'] = Image.fromarray(pred_occl)
+ args_dict['seed'] = -1
#utils.set_CNs_input_image(args_dict, Image.fromarray(curr_frame))
processed_frames, _, _, _ = utils.img2img(args_dict)
@@ -134,6 +146,7 @@ def start_process(*args):
args_dict['mode'] = 0
args_dict['init_img'] = Image.fromarray(processed_frame)
args_dict['mask_img'] = None
+ args_dict['seed'] = -1
args_dict['denoising_strength'] = args_dict['fix_frame_strength']
#utils.set_CNs_input_image(args_dict, Image.fromarray(curr_frame))
@@ -145,7 +158,7 @@ def start_process(*args):
output_video.write(cv2.cvtColor(processed_frame, cv2.COLOR_RGB2BGR))
prev_frame = processed_frame.copy()
-
+ save_result_to_image(processed_frame, ind + 2)
stat = f"Frame: {ind + 2} / {args_dict['length']}; " + utils.get_time_left(ind+2, args_dict['length'], processing_start_time)
yield stat, curr_frame, pred_occl, warped_frame, processed_frame, None, gr.Button.update(interactive=False), gr.Button.update(interactive=True)
diff --git a/scripts/core/utils.py b/scripts/core/utils.py
index 48f6678..90c8dcb 100644
--- a/scripts/core/utils.py
+++ b/scripts/core/utils.py
@@ -6,8 +6,11 @@ def get_component_names():
'sdcn_process_mode',
'v2v_file', 'v2v_width', 'v2v_height', 'v2v_prompt', 'v2v_n_prompt', 'v2v_cfg_scale', 'v2v_seed', 'v2v_processing_strength', 'v2v_fix_frame_strength',
'v2v_sampler_index', 'v2v_steps', 'v2v_override_settings',
+ 'v2v_occlusion_mask_blur', 'v2v_occlusion_mask_trailing', 'v2v_occlusion_mask_flow_multiplier', 'v2v_occlusion_mask_difo_multiplier', 'v2v_occlusion_mask_difs_multiplier',
+ 'v2v_step_1_processing_mode', 'v2v_step_1_blend_alpha', 'v2v_step_1_seed', 'v2v_step_2_seed',
't2v_width', 't2v_height', 't2v_prompt', 't2v_n_prompt', 't2v_cfg_scale', 't2v_seed', 't2v_processing_strength', 't2v_fix_frame_strength',
- 't2v_sampler_index', 't2v_steps', 't2v_length', 't2v_fps'
+ 't2v_sampler_index', 't2v_steps', 't2v_length', 't2v_fps',
+ 'glo_save_frames_check'
]
return components_list
@@ -105,7 +108,7 @@ def args_to_dict(*args): # converts list of argumets into dictionary for better
def get_mode_args(mode, args_dict):
mode_args_dict = {}
for key, value in args_dict.items():
- if key[:3] == mode:
+ if key[:3] in [mode, 'glo'] :
mode_args_dict[key[4:]] = value
return mode_args_dict
diff --git a/scripts/core/vid2vid.py b/scripts/core/vid2vid.py
index ffee1ec..038489f 100644
--- a/scripts/core/vid2vid.py
+++ b/scripts/core/vid2vid.py
@@ -100,7 +100,16 @@ def start_process(*args):
# Create an output video file with the same fps, width, and height as the input video
output_video_name = f'outputs/sd-cn-animation/vid2vid/{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}.mp4'
+ output_video_folder = os.path.splitext(output_video_name)[0]
os.makedirs(os.path.dirname(output_video_name), exist_ok=True)
+
+ if args_dict['save_frames_check']:
+ os.makedirs(output_video_folder, exist_ok=True)
+
+ def save_result_to_image(image, ind):
+ if args_dict['save_frames_check']:
+ cv2.imwrite(os.path.join(output_video_folder, f'{ind:05d}.png'), cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
+
sdcn_anim_tmp.output_video = cv2.VideoWriter(output_video_name, cv2.VideoWriter_fourcc(*'mp4v'), sdcn_anim_tmp.fps, (args_dict['width'], args_dict['height']))
curr_frame = read_frame_from_video()
@@ -122,6 +131,8 @@ def start_process(*args):
sdcn_anim_tmp.prev_frame = curr_frame.copy()
sdcn_anim_tmp.prev_frame_styled = processed_frame.copy()
utils.shared.is_interrupted = False
+
+ save_result_to_image(processed_frame, 1)
stat = get_cur_stat() + utils.get_time_left(1, loop_iterations, processing_start_time)
yield stat, sdcn_anim_tmp.curr_frame, None, None, processed_frame, None, gr.Button.update(interactive=False), gr.Button.update(interactive=True)
@@ -147,7 +158,7 @@ def start_process(*args):
curr_frame = cv2.resize(curr_frame, (args_dict['width'], args_dict['height']))
prev_frame = sdcn_anim_tmp.prev_frame.copy()
- next_flow, prev_flow, occlusion_mask = RAFT_estimate_flow(prev_frame, curr_frame, subtract_background=False, device=device)
+ next_flow, prev_flow, occlusion_mask = RAFT_estimate_flow(prev_frame, curr_frame, device=device)
occlusion_mask = np.clip(occlusion_mask * 0.1 * 255, 0, 255).astype(np.uint8)
cn = sdcn_anim_tmp.prepear_counter % 10
@@ -178,70 +189,81 @@ def start_process(*args):
prev_flow = sdcn_anim_tmp.prepared_prev_flows[cn]
### STEP 1
- alpha_mask, warped_styled_frame = compute_diff_map(next_flow, prev_flow, prev_frame, curr_frame, sdcn_anim_tmp.prev_frame_styled)
+ alpha_mask, warped_styled_frame = compute_diff_map(next_flow, prev_flow, prev_frame, curr_frame, sdcn_anim_tmp.prev_frame_styled, args_dict)
warped_styled_frame_ = warped_styled_frame.copy()
- if sdcn_anim_tmp.process_counter > 0:
+ #fl_w, fl_h = prev_flow.shape[:2]
+ #prev_flow_n = prev_flow / np.array([fl_h,fl_w])
+ #flow_mask = np.clip(1 - np.linalg.norm(prev_flow_n, axis=-1)[...,None] * 20, 0, 1)
+ #alpha_mask = alpha_mask * flow_mask
+
+ if sdcn_anim_tmp.process_counter > 0 and args_dict['occlusion_mask_trailing']:
alpha_mask = alpha_mask + sdcn_anim_tmp.prev_frame_alpha_mask * 0.5
sdcn_anim_tmp.prev_frame_alpha_mask = alpha_mask
- # alpha_mask = np.clip(alpha_mask + 0.05, 0.05, 0.95)
+
alpha_mask = np.clip(alpha_mask, 0, 1)
-
- fl_w, fl_h = prev_flow.shape[:2]
- prev_flow_n = prev_flow / np.array([fl_h,fl_w])
- flow_mask = np.clip(1 - np.linalg.norm(prev_flow_n, axis=-1)[...,None], 0, 1)
-
- # fix warped styled frame from duplicated that occures on the places where flow is zero, but only because there is no place to get the color from
- warped_styled_frame = curr_frame[...,:3].astype(float) * alpha_mask * flow_mask + warped_styled_frame[...,:3].astype(float) * (1 - alpha_mask * flow_mask)
-
- # This clipping at lower side required to fix small trailing issues that for some reason left outside of the bright part of the mask,
- # and at the higher part it making parts changed strongly to do it with less flickering.
-
occlusion_mask = np.clip(alpha_mask * 255, 0, 255).astype(np.uint8)
+ # fix warped styled frame from duplicated that occures on the places where flow is zero, but only because there is no place to get the color from
+ warped_styled_frame = curr_frame[...,:3].astype(float) * alpha_mask + warped_styled_frame[...,:3].astype(float) * (1 - alpha_mask)
+
# process current frame
- args_dict['mode'] = 4
- init_img = warped_styled_frame * 0.95 + curr_frame * 0.05
- args_dict['init_img'] = Image.fromarray(np.clip(init_img, 0, 255).astype(np.uint8))
- args_dict['mask_img'] = Image.fromarray(occlusion_mask)
- args_dict['seed'] = -1
- utils.set_CNs_input_image(args_dict, Image.fromarray(curr_frame))
- processed_frames, _, _, _ = utils.img2img(args_dict)
+ # TODO: convert args_dict into separate dict that stores only params necessery for img2img processing
+ img2img_args_dict = args_dict #copy.deepcopy(args_dict)
+ print('PROCESSING MODE:', args_dict['step_1_processing_mode'])
+ if args_dict['step_1_processing_mode'] == 0: # Process full image then blend in occlusions
+ img2img_args_dict['mode'] = 0
+ img2img_args_dict['mask_img'] = None #Image.fromarray(occlusion_mask)
+ elif args_dict['step_1_processing_mode'] == 1: # Inpaint occlusions
+ img2img_args_dict['mode'] = 4
+ img2img_args_dict['mask_img'] = Image.fromarray(occlusion_mask)
+ else:
+ raise Exception('Incorrect step 1 processing mode!')
+
+ blend_alpha = args_dict['step_1_blend_alpha']
+ init_img = warped_styled_frame * (1 - blend_alpha) + curr_frame * blend_alpha
+ img2img_args_dict['init_img'] = Image.fromarray(np.clip(init_img, 0, 255).astype(np.uint8))
+ img2img_args_dict['seed'] = args_dict['step_1_seed']
+ utils.set_CNs_input_image(img2img_args_dict, Image.fromarray(curr_frame))
+ processed_frames, _, _, _ = utils.img2img(img2img_args_dict)
processed_frame = np.array(processed_frames[0])
# normalizing the colors
processed_frame = skimage.exposure.match_histograms(processed_frame, curr_frame, channel_axis=None)
- #processed_frame = processed_frame.astype(float) * alpha_mask + warped_styled_frame.astype(float) * (1 - alpha_mask)
+ processed_frame = processed_frame.astype(float) * alpha_mask + warped_styled_frame.astype(float) * (1 - alpha_mask)
#processed_frame = processed_frame * 0.94 + curr_frame * 0.06
processed_frame = np.clip(processed_frame, 0, 255).astype(np.uint8)
sdcn_anim_tmp.prev_frame_styled = processed_frame.copy()
### STEP 2
- args_dict['mode'] = 0
- args_dict['init_img'] = Image.fromarray(processed_frame)
- args_dict['mask_img'] = None
- args_dict['denoising_strength'] = args_dict['fix_frame_strength']
- args_dict['seed'] = 8888
- utils.set_CNs_input_image(args_dict, Image.fromarray(curr_frame))
- processed_frames, _, _, _ = utils.img2img(args_dict)
- processed_frame = np.array(processed_frames[0])
- processed_frame = skimage.exposure.match_histograms(processed_frame, curr_frame, channel_axis=None)
+ if args_dict['fix_frame_strength'] > 0:
+ img2img_args_dict = args_dict #copy.deepcopy(args_dict)
+ img2img_args_dict['mode'] = 0
+ img2img_args_dict['init_img'] = Image.fromarray(processed_frame)
+ img2img_args_dict['mask_img'] = None
+ img2img_args_dict['denoising_strength'] = args_dict['fix_frame_strength']
+ img2img_args_dict['seed'] = args_dict['step_2_seed']
+ utils.set_CNs_input_image(img2img_args_dict, Image.fromarray(curr_frame))
+ processed_frames, _, _, _ = utils.img2img(img2img_args_dict)
+ processed_frame = np.array(processed_frames[0])
+ processed_frame = skimage.exposure.match_histograms(processed_frame, curr_frame, channel_axis=None)
processed_frame = np.clip(processed_frame, 0, 255).astype(np.uint8)
warped_styled_frame_ = np.clip(warped_styled_frame_, 0, 255).astype(np.uint8)
-
# Write the frame to the output video
frame_out = np.clip(processed_frame, 0, 255).astype(np.uint8)
frame_out = cv2.cvtColor(frame_out, cv2.COLOR_RGB2BGR)
sdcn_anim_tmp.output_video.write(frame_out)
sdcn_anim_tmp.process_counter += 1
- if sdcn_anim_tmp.process_counter >= sdcn_anim_tmp.total_frames - 1:
- sdcn_anim_tmp.input_video.release()
- sdcn_anim_tmp.output_video.release()
- sdcn_anim_tmp.prev_frame = None
+ #if sdcn_anim_tmp.process_counter >= sdcn_anim_tmp.total_frames - 1:
+ # sdcn_anim_tmp.input_video.release()
+ # sdcn_anim_tmp.output_video.release()
+ # sdcn_anim_tmp.prev_frame = None
+
+ save_result_to_image(processed_frame, sdcn_anim_tmp.process_counter + 1)
stat = get_cur_stat() + utils.get_time_left(step+2, loop_iterations+1, processing_start_time)
yield stat, curr_frame, occlusion_mask, warped_styled_frame_, processed_frame, None, gr.Button.update(interactive=False), gr.Button.update(interactive=True)