pull/87/head^2
Alexey Borsky 2023-05-14 02:59:45 +03:00
parent 63efd3e0e3
commit 027faf1612
5 changed files with 9 additions and 8 deletions

View File

@ -61,11 +61,12 @@ To install the extension go to 'Extensions' tab in [Automatic1111 web-ui](https:
* The extension might work incorrectly if 'Apply color correction to img2img results to match original colors.' option is enabled. Make sure to disable it in 'Settings' tab -> 'Stable Diffusion' section.
## Last version changes: v0.9
* Fixed issues #69, #76, #91.
* Fixed issues #69, #76, #91, #92.
* Fixed an issue in vid2vid mode when an occlusion mask computed from the optical flow may include unnecessary parts (where flow is non-zero).
* Added 'Extra params' in vid2vid mode for more fine-grain controls of the processing pipeline.
* Better default parameters set for vid2vid pipeline.
* In txt2vid mode after the first frame is generated the seed is now automatically set to -1 to prevent blurring issues.
* Added an option to save resulting frames into a folder alongside the video.
* Added ability to export current parameters in a human readable form as a json.
* Interpolation mode in the flow-applying stage is set to nearest to reduce overtime image blurring.

View File

@ -196,7 +196,7 @@ def on_ui_tabs():
with gr.Accordion("Export settings", open=False):
export_settings_button = gr.Button('Export', elem_id=f"sdcn_export_settings_button")
export_setting_json = gr.JSON(value='{}')
export_setting_json = gr.Code(value='')
with gr.Column(scale=1, variant='compact'):

View File

@ -116,8 +116,8 @@ def compute_diff_map(next_flow, prev_flow, prev_frame, cur_frame, prev_frame_sty
prev_frame_torch = torch.from_numpy(prev_frame).float().unsqueeze(0).permute(0, 3, 1, 2) #N, C, H, W
prev_frame_styled_torch = torch.from_numpy(prev_frame_styled).float().unsqueeze(0).permute(0, 3, 1, 2) #N, C, H, W
warped_frame = torch.nn.functional.grid_sample(prev_frame_torch, flow_grid, padding_mode="reflection").permute(0, 2, 3, 1)[0].numpy()
warped_frame_styled = torch.nn.functional.grid_sample(prev_frame_styled_torch, flow_grid, padding_mode="reflection").permute(0, 2, 3, 1)[0].numpy()
warped_frame = torch.nn.functional.grid_sample(prev_frame_torch, flow_grid, mode="nearest", padding_mode="reflection").permute(0, 2, 3, 1)[0].numpy()
warped_frame_styled = torch.nn.functional.grid_sample(prev_frame_styled_torch, flow_grid, mode="nearest", padding_mode="reflection").permute(0, 2, 3, 1)[0].numpy()
#warped_frame = cv2.remap(prev_frame, flow_map, None, cv2.INTER_NEAREST, borderMode = cv2.BORDER_REFLECT)
#warped_frame_styled = cv2.remap(prev_frame_styled, flow_map, None, cv2.INTER_NEAREST, borderMode = cv2.BORDER_REFLECT)

View File

@ -116,7 +116,7 @@ def start_process(*args):
flow_map[:,:,0] += np.arange(args_dict['width'])
flow_map[:,:,1] += np.arange(args_dict['height'])[:,np.newaxis]
warped_frame = cv2.remap(prev_frame, flow_map, None, cv2.INTER_CUBIC, borderMode = cv2.BORDER_REFLECT_101)
warped_frame = cv2.remap(prev_frame, flow_map, None, cv2.INTER_NEAREST, borderMode = cv2.BORDER_REFLECT_101)
curr_frame = warped_frame.copy()

View File

@ -171,8 +171,8 @@ def start_process(*args):
sdcn_anim_tmp.frames_prepared = False
cn = sdcn_anim_tmp.process_counter % 10
curr_frame = sdcn_anim_tmp.prepared_frames[cn+1]
prev_frame = sdcn_anim_tmp.prepared_frames[cn]
curr_frame = sdcn_anim_tmp.prepared_frames[cn+1][...,:3]
prev_frame = sdcn_anim_tmp.prepared_frames[cn][...,:3]
next_flow = sdcn_anim_tmp.prepared_next_flows[cn]
prev_flow = sdcn_anim_tmp.prepared_prev_flows[cn]
@ -193,7 +193,7 @@ def start_process(*args):
occlusion_mask = np.clip(alpha_mask * 255, 0, 255).astype(np.uint8)
# fix warped styled frame from duplicated that occures on the places where flow is zero, but only because there is no place to get the color from
warped_styled_frame = curr_frame[...,:3].astype(float) * alpha_mask + warped_styled_frame[...,:3].astype(float) * (1 - alpha_mask)
warped_styled_frame = curr_frame.astype(float) * alpha_mask + warped_styled_frame.astype(float) * (1 - alpha_mask)
# process current frame
# TODO: convert args_dict into separate dict that stores only params necessery for img2img processing