fix
parent
f36c4ad6b1
commit
3beff81526
|
|
@ -189,8 +189,6 @@ The extension enables **large image drawing & upscaling with limited VRAM** via
|
|||
|
||||
### Demofusion available
|
||||
|
||||
ℹ Please set seed in for a better result.
|
||||
|
||||
ℹ Ticking the random jitter usually yields better results.
|
||||
|
||||
ℹ Recommend using higher steps, such as 30 or more, for better results
|
||||
|
|
@ -201,10 +199,10 @@ The extension enables **large image drawing & upscaling with limited VRAM** via
|
|||
|
||||
ℹ Do not enable it together with tilediffusion. It supports operations such as tilevae, noise inversion, etc.
|
||||
|
||||
ℹ More suitable for realistic styles
|
||||
|
||||
ℹ For parameters such as c1, c2, and c3, please refer to the demofusion. You don't have to adjust it.
|
||||
|
||||
ℹ Euler sampler performs better
|
||||
|
||||
****
|
||||
|
||||
###
|
||||
|
|
|
|||
|
|
@ -184,8 +184,6 @@
|
|||
|
||||
### Demofusion现已可用
|
||||
|
||||
ℹ 建议设定随机种子以获取更好的图像
|
||||
|
||||
ℹ 勾选随机抖动通常会得到更好的结果
|
||||
|
||||
ℹ推荐使用较高的步数,例如30步以上,往往会有更好的效果
|
||||
|
|
@ -196,10 +194,10 @@
|
|||
|
||||
ℹ不要同时开启tilediffusion. 但该组件支持tilevae、noise inversion等常用功能
|
||||
|
||||
ℹ更适合写实风格的图像
|
||||
|
||||
ℹc1,c2,c3等参数可以参考demofusion. 你不必调整这些参数
|
||||
|
||||
ℹ Euler采样器表现得更好
|
||||
|
||||
****
|
||||
|
||||
###
|
||||
|
|
|
|||
|
|
@ -13,13 +13,14 @@ from modules.ui import gr_show
|
|||
from tile_methods.abstractdiffusion import AbstractDiffusion
|
||||
from tile_methods.demofusion import DemoFusion
|
||||
from tile_utils.utils import *
|
||||
from modules.sd_samplers_common import InterruptedException
|
||||
# import k_diffusion.sampling
|
||||
|
||||
|
||||
CFG_PATH = os.path.join(scripts.basedir(), 'region_configs')
|
||||
BBOX_MAX_NUM = min(getattr(shared.cmd_opts, 'md_max_regions', 8), 16)
|
||||
|
||||
|
||||
|
||||
class Script(scripts.Script):
|
||||
def __init__(self):
|
||||
self.controlnet_script: ModuleType = None
|
||||
|
|
@ -42,10 +43,8 @@ class Script(scripts.Script):
|
|||
with gr.Accordion('DemoFusion', open=False, elem_id=f'MD-{tab}'):
|
||||
with gr.Row(variant='compact') as tab_enable:
|
||||
enabled = gr.Checkbox(label='Enable DemoFusion(Do not open it with tilediffusion)', value=False, elem_id=uid('enabled'))
|
||||
|
||||
with gr.Row(variant='compact') as tab_default:
|
||||
random_jitter = gr.Checkbox(label='Random jitter windows', value=True, elem_id=uid('random-jitter'))
|
||||
keep_input_size = gr.Checkbox(label='Keep input image size', value=True, visible=is_img2img, elem_id=uid('keep-input-size'))
|
||||
random_jitter = gr.Checkbox(label='Random jitter', value = True, elem_id=uid('random-jitter'))
|
||||
keep_input_size = gr.Checkbox(label='Keep input-image size', value=False,visible=is_img2img, elem_id=uid('keep-input-size'))
|
||||
gaussian_filter = gr.Checkbox(label='Gaussian filter', value=False, elem_id=uid('gaussian'))
|
||||
|
||||
|
||||
|
|
@ -64,9 +63,9 @@ class Script(scripts.Script):
|
|||
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Latent window batch size', value=4, elem_id=uid('latent-tile-batch-size'))
|
||||
|
||||
with gr.Row(variant='compact', visible=True) as tab_size:
|
||||
c1 = gr.Slider(minimum=0.5, maximum=3, step=0.1, label='c1', value=3, elem_id=f'c1-{tab}')
|
||||
c2 = gr.Slider(minimum=0.5, maximum=3, step=0.1, label='c2', value=1, elem_id=f'c2-{tab}')
|
||||
c3 = gr.Slider(minimum=0.5, maximum=3, step=0.1, label='c3', value=1, elem_id=f'c3-{tab}')
|
||||
c1 = gr.Slider(minimum=0, maximum=5, step=0.1, label='c1', value=3, elem_id=f'c1-{tab}')
|
||||
c2 = gr.Slider(minimum=0, maximum=5, step=0.1, label='c2', value=1, elem_id=f'c2-{tab}')
|
||||
c3 = gr.Slider(minimum=0, maximum=5, step=0.1, label='c3', value=1, elem_id=f'c3-{tab}')
|
||||
with gr.Row(variant='compact') as tab_upscale:
|
||||
|
||||
scale_factor = gr.Slider(minimum=1.0, maximum=8.0, step=1, label='Scale Factor', value=2.0, elem_id=uid('upscaler-factor'))
|
||||
|
|
@ -223,11 +222,12 @@ class Script(scripts.Script):
|
|||
################################################## Phase Initialization ######################################################
|
||||
|
||||
if not image_ori:
|
||||
|
||||
p.current_step = 0
|
||||
p.denoising_strength = 1
|
||||
# p.sampler = sd_samplers.create_sampler(p.sampler_name, p.sd_model) #NOTE:Wrong but very useful. If corrected, please replace with the content with the following lines
|
||||
# latents = p.rng.next()
|
||||
p.denoising_strength = 1
|
||||
p.sampler = Script.create_sampler_original_md(p.sampler_name, p.sd_model)
|
||||
|
||||
p.sampler = Script.create_sampler_original_md(p.sampler_name, p.sd_model) #scale
|
||||
x = p.rng.next()
|
||||
latents = p.sampler.sample(p, x, conditioning, unconditional_conditioning, image_conditioning=p.txt2img_image_conditioning(x))
|
||||
del x
|
||||
|
|
@ -285,7 +285,7 @@ class Script(scripts.Script):
|
|||
|
||||
p.noise = noise
|
||||
p.x = p.latents.clone()
|
||||
p.current_step=-1
|
||||
p.current_step=0
|
||||
|
||||
p.latents = p.sampler.sample_img2img(p,p.latents, noise , conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning)
|
||||
if self.flag_noise_inverse:
|
||||
|
|
@ -297,6 +297,17 @@ class Script(scripts.Script):
|
|||
p.width = p.width*p.scale_factor
|
||||
p.height = p.height*p.scale_factor
|
||||
return p.latents
|
||||
|
||||
@staticmethod
|
||||
def callback_hijack(self_sampler,d,p):
|
||||
p.current_step = d['i']
|
||||
|
||||
if self_sampler.stop_at is not None and p.current_step > self_sampler.stop_at:
|
||||
raise InterruptedException
|
||||
|
||||
state.sampling_step = p.current_step
|
||||
shared.total_tqdm.update()
|
||||
p.current_step += 1
|
||||
|
||||
|
||||
def create_sampler_hijack(
|
||||
|
|
@ -313,6 +324,8 @@ class Script(scripts.Script):
|
|||
return self.delegate.sampler_raw
|
||||
else:
|
||||
self.reset()
|
||||
sd_samplers_common.Sampler.callback_ori = sd_samplers_common.Sampler.callback_state
|
||||
sd_samplers_common.Sampler.callback_state = lambda self_sampler,d:Script.callback_hijack(self_sampler,d,p)
|
||||
|
||||
self.flag_noise_inverse = hasattr(p, "init_images") and len(p.init_images) > 0 and noise_inverse
|
||||
flag_noise_inverse = self.flag_noise_inverse
|
||||
|
|
@ -467,6 +480,9 @@ class Script(scripts.Script):
|
|||
if hasattr(Script, "create_random_tensors_original_md"):
|
||||
processing.create_random_tensors = Script.create_random_tensors_original_md
|
||||
del Script.create_random_tensors_original_md
|
||||
if hasattr(sd_samplers_common.Sampler, "callback_ori"):
|
||||
sd_samplers_common.Sampler.callback_state = sd_samplers_common.Sampler.callback_ori
|
||||
del sd_samplers_common.Sampler.callback_ori
|
||||
DemoFusion.unhook()
|
||||
self.delegate = None
|
||||
|
||||
|
|
|
|||
|
|
@ -17,10 +17,6 @@ class DemoFusion(AbstractDiffusion):
|
|||
super().__init__(p, *args, **kwargs)
|
||||
assert p.sampler_name != 'UniPC', 'Demofusion is not compatible with UniPC!'
|
||||
|
||||
def add_one(self):
|
||||
self.p.current_step += 1
|
||||
return
|
||||
|
||||
|
||||
def hook(self):
|
||||
steps, self.t_enc = sd_samplers_common.setup_img2img_steps(self.p, None)
|
||||
|
|
@ -177,13 +173,13 @@ class DemoFusion(AbstractDiffusion):
|
|||
blurred_latents = F.conv2d(latents, kernel, padding=kernel_size//2, groups=channels)
|
||||
|
||||
return blurred_latents
|
||||
|
||||
|
||||
|
||||
''' ↓↓↓ kernel hijacks ↓↓↓ '''
|
||||
@torch.no_grad()
|
||||
@keep_signature
|
||||
def forward_one_step(self, x_in, sigma, **kwarg):
|
||||
self.add_one()
|
||||
if self.is_kdiff:
|
||||
self.xi = self.p.x + self.p.noise * self.p.sigmas[self.p.current_step]
|
||||
else:
|
||||
|
|
@ -197,23 +193,24 @@ class DemoFusion(AbstractDiffusion):
|
|||
|
||||
self.c1 = self.cosine_factor ** self.p.cosine_scale_1
|
||||
|
||||
self.x_in_tmp = x_in*(1 - self.c1) + self.xi * self.c1
|
||||
x_in_tmp = x_in*(1 - self.c1) + self.xi * self.c1
|
||||
|
||||
if self.p.random_jitter:
|
||||
jitter_range = self.jitter_range
|
||||
else:
|
||||
jitter_range = 0
|
||||
self.x_in_tmp_ = F.pad(self.x_in_tmp,(jitter_range, jitter_range, jitter_range, jitter_range),'constant',value=0)
|
||||
_,_,H,W = self.x_in_tmp.shape
|
||||
x_in_tmp_ = F.pad(x_in_tmp,(jitter_range, jitter_range, jitter_range, jitter_range),'constant',value=0)
|
||||
_,_,H,W = x_in_tmp.shape
|
||||
|
||||
std_, mean_ = self.x_in_tmp.std(), self.x_in_tmp.mean()
|
||||
std_, mean_ = x_in_tmp.std(), x_in_tmp.mean()
|
||||
c3 = 0.99 * self.cosine_factor ** self.p.cosine_scale_3 + 1e-2
|
||||
latents_gaussian = self.gaussian_filter(self.x_in_tmp, kernel_size=(2*self.p.current_scale_num-1), sigma=0.8*c3)
|
||||
self.latents_gaussian = (latents_gaussian - latents_gaussian.mean()) / latents_gaussian.std() * std_ + mean_
|
||||
latents_gaussian = self.gaussian_filter(x_in_tmp, kernel_size=(2*self.p.current_scale_num-1), sigma=0.8*c3)
|
||||
latents_gaussian = (latents_gaussian - latents_gaussian.mean()) / latents_gaussian.std() * std_ + mean_
|
||||
self.jitter_range = jitter_range
|
||||
self.sampler.model_wrap_cfg.inner_model.forward = self.sample_one_step_local
|
||||
self.repeat_3 = False
|
||||
x_local = self.sampler.model_wrap_cfg.forward_ori(self.x_in_tmp_,sigma, **kwarg)
|
||||
|
||||
x_local = self.sampler.model_wrap_cfg.forward_ori(x_in_tmp_,sigma, **kwarg)
|
||||
self.sampler.model_wrap_cfg.inner_model.forward = self.sampler_forward
|
||||
x_local = x_local[:,:,jitter_range:jitter_range+H,jitter_range:jitter_range+W]
|
||||
|
||||
|
|
@ -229,9 +226,9 @@ class DemoFusion(AbstractDiffusion):
|
|||
|
||||
######
|
||||
if self.gaussian_filter:
|
||||
x_global_i = self.sampler.model_wrap_cfg.forward_ori(self.latents_gaussian[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num],sigma, **kwarg)
|
||||
x_global_i = self.sampler.model_wrap_cfg.forward_ori(latents_gaussian[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num],sigma, **kwarg)
|
||||
else:
|
||||
x_global_i = self.sampler.model_wrap_cfg.forward_ori(self.x_in_tmp[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num],sigma, **kwarg) # x_in_tmp could be changed to latents_gaussian
|
||||
x_global_i = self.sampler.model_wrap_cfg.forward_ori(x_in_tmp[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num],sigma, **kwarg) # x_in_tmp could be changed to latents_gaussian
|
||||
x_global[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num] += x_global_i
|
||||
|
||||
######
|
||||
|
|
@ -241,9 +238,9 @@ class DemoFusion(AbstractDiffusion):
|
|||
# self.x_out_list = []
|
||||
# self.x_out_idx = -1
|
||||
# self.flag = 1
|
||||
# self.sampler.model_wrap_cfg.forward_ori(self.latents_gaussian[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num],sigma,**kwarg)
|
||||
# self.sampler.model_wrap_cfg.forward_ori(x_in_tmp[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num],sigma,**kwarg)
|
||||
# self.flag = 0
|
||||
# x_global_i = self.sampler.model_wrap_cfg.forward_ori(self.x_in_tmp[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num],sigma,**kwarg)
|
||||
# x_global_i = self.sampler.model_wrap_cfg.forward_ori(x_in_tmp[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num],sigma,**kwarg)
|
||||
# x_global[:,:,h::self.p.current_scale_num,w::self.p.current_scale_num] += x_global_i
|
||||
|
||||
self.p.sd_model.apply_model = self.p.sd_model.apply_model_ori
|
||||
|
|
|
|||
Loading…
Reference in New Issue