fixed 'processing_strength' related issue
parent
c3e4b42d98
commit
3c36b8e7c5
|
|
@ -81,4 +81,5 @@ pip install sckit-image==0.19.2 --no-cache-dir
|
|||
|
||||
<!--
|
||||
* ControlNet with preprocessers like "reference_only", "reference_adain", "reference_adain+attn" are not reseted with video frames to have an ability to control style of the video.
|
||||
* Fixed an issue because of witch 'processing_strength' UI parameters does not actually affected denoising strength at the fist processing step.
|
||||
-->
|
||||
|
|
|
|||
|
|
@ -88,6 +88,8 @@ def start_process(*args):
|
|||
|
||||
processed_frames, _, _, _ = utils.txt2img(args_dict)
|
||||
processed_frame = np.array(processed_frames[0])[...,:3]
|
||||
#if input_video is not None:
|
||||
# processed_frame = skimage.exposure.match_histograms(processed_frame, curr_video_frame, channel_axis=-1)
|
||||
processed_frame = np.clip(processed_frame, 0, 255).astype(np.uint8)
|
||||
init_frame = processed_frame.copy()
|
||||
|
||||
|
|
@ -146,6 +148,7 @@ def start_process(*args):
|
|||
args_dict['init_img'] = Image.fromarray(curr_frame)
|
||||
args_dict['mask_img'] = Image.fromarray(pred_occl)
|
||||
args_dict['seed'] = -1
|
||||
args_dict['denoising_strength'] = args_dict['processing_strength']
|
||||
|
||||
if input_video is not None:
|
||||
curr_video_frame = read_frame_from_video(input_video)
|
||||
|
|
@ -154,6 +157,9 @@ def start_process(*args):
|
|||
|
||||
processed_frames, _, _, _ = utils.img2img(args_dict)
|
||||
processed_frame = np.array(processed_frames[0])[...,:3]
|
||||
#if input_video is not None:
|
||||
# processed_frame = skimage.exposure.match_histograms(processed_frame, curr_video_frame, channel_axis=-1)
|
||||
#else:
|
||||
processed_frame = skimage.exposure.match_histograms(processed_frame, init_frame, channel_axis=-1)
|
||||
processed_frame = np.clip(processed_frame, 0, 255).astype(np.uint8)
|
||||
|
||||
|
|
@ -166,6 +172,9 @@ def start_process(*args):
|
|||
#utils.set_CNs_input_image(args_dict, Image.fromarray(curr_frame))
|
||||
processed_frames, _, _, _ = utils.img2img(args_dict)
|
||||
processed_frame = np.array(processed_frames[0])[...,:3]
|
||||
#if input_video is not None:
|
||||
# processed_frame = skimage.exposure.match_histograms(processed_frame, curr_video_frame, channel_axis=-1)
|
||||
#else:
|
||||
processed_frame = skimage.exposure.match_histograms(processed_frame, init_frame, channel_axis=-1)
|
||||
processed_frame = np.clip(processed_frame, 0, 255).astype(np.uint8)
|
||||
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ def args_to_dict(*args): # converts list of argumets into dictionary for better
|
|||
'v2v_cfg_scale': 5.5,
|
||||
'v2v_image_cfg_scale': 1.5,
|
||||
'v2v_denoising_strength': 0.75,
|
||||
'v2v_processing_strength': 0.85,
|
||||
'v2v_fix_frame_strength': 0.15,
|
||||
'v2v_seed': -1,
|
||||
'v2v_subseed': -1,
|
||||
|
|
@ -75,6 +76,7 @@ def args_to_dict(*args): # converts list of argumets into dictionary for better
|
|||
't2v_cfg_scale': 5.5,
|
||||
't2v_image_cfg_scale': 1.5,
|
||||
't2v_denoising_strength': 0.75,
|
||||
't2v_processing_strength': 0.85,
|
||||
't2v_fix_frame_strength': 0.15,
|
||||
't2v_seed': -1,
|
||||
't2v_subseed': -1,
|
||||
|
|
|
|||
|
|
@ -198,7 +198,7 @@ def start_process(*args):
|
|||
# process current frame
|
||||
# TODO: convert args_dict into separate dict that stores only params necessery for img2img processing
|
||||
img2img_args_dict = args_dict #copy.deepcopy(args_dict)
|
||||
print('PROCESSING MODE:', args_dict['step_1_processing_mode'])
|
||||
img2img_args_dict['denoising_strength'] = args_dict['processing_strength']
|
||||
if args_dict['step_1_processing_mode'] == 0: # Process full image then blend in occlusions
|
||||
img2img_args_dict['mode'] = 0
|
||||
img2img_args_dict['mask_img'] = None #Image.fromarray(occlusion_mask)
|
||||
|
|
|
|||
Loading…
Reference in New Issue