mirror of https://github.com/vladmandic/automatic
parent
2635906742
commit
66820edb63
|
|
@ -68,7 +68,7 @@ def instant_id(p: processing.StableDiffusionProcessing, app, source_images, stre
|
|||
processing.process_init(p)
|
||||
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
|
||||
orig_prompt_attention = shared.opts.prompt_attention
|
||||
shared.opts.data['prompt_attention'] = 'Fixed attention' # otherwise need to deal with class_tokens_mask
|
||||
shared.opts.data['prompt_attention'] = 'fixed' # otherwise need to deal with class_tokens_mask
|
||||
p.task_args['image_embeds'] = face_embeds[0].shape # placeholder
|
||||
p.task_args['image'] = face_images[0]
|
||||
p.task_args['controlnet_conditioning_scale'] = float(conditioning)
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ def photo_maker(p: processing.StableDiffusionProcessing, input_images, trigger,
|
|||
shared.sd_model.to(dtype=devices.dtype)
|
||||
|
||||
orig_prompt_attention = shared.opts.prompt_attention
|
||||
shared.opts.data['prompt_attention'] = 'Fixed attention' # otherwise need to deal with class_tokens_mask
|
||||
shared.opts.data['prompt_attention'] = 'fixed' # otherwise need to deal with class_tokens_mask
|
||||
p.task_args['input_id_images'] = input_images
|
||||
p.task_args['start_merge_step'] = int(start * p.steps)
|
||||
p.task_args['prompt'] = p.all_prompts[0] if p.all_prompts is not None else p.prompt
|
||||
|
|
|
|||
|
|
@ -107,12 +107,11 @@ def set_pipeline_args(p, model, prompts: list, negative_prompts: list, prompts_2
|
|||
|
||||
debug(f'Diffusers pipeline possible: {possible}')
|
||||
prompts, negative_prompts, prompts_2, negative_prompts_2 = fix_prompts(prompts, negative_prompts, prompts_2, negative_prompts_2)
|
||||
parser = 'Fixed attention'
|
||||
steps = kwargs.get("num_inference_steps", None) or len(getattr(p, 'timesteps', ['1']))
|
||||
clip_skip = kwargs.pop("clip_skip", 1)
|
||||
|
||||
# prompt_parser_diffusers.fix_position_ids(model)
|
||||
if shared.opts.prompt_attention != 'Fixed attention' and 'Onnx' not in model.__class__.__name__ and (
|
||||
parser = 'fixed'
|
||||
if shared.opts.prompt_attention != 'fixed' and 'Onnx' not in model.__class__.__name__ and (
|
||||
'StableDiffusion' in model.__class__.__name__ or
|
||||
'StableCascade' in model.__class__.__name__ or
|
||||
'Flux' in model.__class__.__name__
|
||||
|
|
@ -125,6 +124,8 @@ def set_pipeline_args(p, model, prompts: list, negative_prompts: list, prompts_2
|
|||
if os.environ.get('SD_PROMPT_DEBUG', None) is not None:
|
||||
errors.display(e, 'Prompt parser encode')
|
||||
timer.process.record('encode', reset=False)
|
||||
else:
|
||||
prompt_parser_diffusers.embedder = None
|
||||
|
||||
if 'prompt' in possible:
|
||||
if 'OmniGen' in model.__class__.__name__:
|
||||
|
|
@ -156,7 +157,7 @@ def set_pipeline_args(p, model, prompts: list, negative_prompts: list, prompts_2
|
|||
else:
|
||||
args['negative_prompt'] = negative_prompts
|
||||
|
||||
if 'clip_skip' in possible and parser == 'Fixed attention':
|
||||
if 'clip_skip' in possible and parser == 'fixed':
|
||||
if clip_skip == 1:
|
||||
pass # clip_skip = None
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from modules import shared, sd_samplers_common, sd_vae, generation_parameters_co
|
|||
from modules.processing_class import StableDiffusionProcessing
|
||||
|
||||
|
||||
debug = shared.log.trace if os.environ.get('SD_PROCESS_DEBUG', None) is not None else lambda *args, **kwargs: None
|
||||
if not shared.native:
|
||||
from modules import sd_hijack
|
||||
else:
|
||||
|
|
@ -39,27 +40,27 @@ def create_infotext(p: StableDiffusionProcessing, all_prompts=None, all_seeds=No
|
|||
ops.reverse()
|
||||
args = {
|
||||
# basic
|
||||
"Size": f"{p.width}x{p.height}" if hasattr(p, 'width') and hasattr(p, 'height') else None,
|
||||
"Sampler": p.sampler_name if p.sampler_name != 'Default' else None,
|
||||
"Steps": p.steps,
|
||||
"Seed": all_seeds[index],
|
||||
"Sampler": p.sampler_name if p.sampler_name != 'Default' else None,
|
||||
"Seed resize from": None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}",
|
||||
"CFG scale": p.cfg_scale if p.cfg_scale > 1.0 else None,
|
||||
"CFG end": p.cfg_end if p.cfg_end < 1.0 else None,
|
||||
"Size": f"{p.width}x{p.height}" if hasattr(p, 'width') and hasattr(p, 'height') else None,
|
||||
"Clip skip": p.clip_skip if p.clip_skip > 1 else None,
|
||||
"Batch": f'{p.n_iter}x{p.batch_size}' if p.n_iter > 1 or p.batch_size > 1 else None,
|
||||
"Parser": shared.opts.prompt_attention.split()[0],
|
||||
"Model": None if (not shared.opts.add_model_name_to_info) or (not shared.sd_model.sd_checkpoint_info.model_name) else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', ''),
|
||||
"Model hash": getattr(p, 'sd_model_hash', None if (not shared.opts.add_model_hash_to_info) or (not shared.sd_model.sd_model_hash) else shared.sd_model.sd_model_hash),
|
||||
"VAE": (None if not shared.opts.add_model_name_to_info or sd_vae.loaded_vae_file is None else os.path.splitext(os.path.basename(sd_vae.loaded_vae_file))[0]) if p.full_quality else 'TAESD',
|
||||
"Seed resize from": None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}",
|
||||
"Clip skip": p.clip_skip if p.clip_skip > 1 else None,
|
||||
"Prompt2": p.refiner_prompt if len(p.refiner_prompt) > 0 else None,
|
||||
"Negative2": p.refiner_negative if len(p.refiner_negative) > 0 else None,
|
||||
"Styles": "; ".join(p.styles) if p.styles is not None and len(p.styles) > 0 else None,
|
||||
"Tiling": p.tiling if p.tiling else None,
|
||||
# sdnext
|
||||
"Backend": 'Diffusers' if shared.native else 'Original',
|
||||
"App": 'SD.Next',
|
||||
"Version": git_commit,
|
||||
"Backend": 'Diffusers' if shared.native else 'Original',
|
||||
"Pipeline": 'LDM',
|
||||
"Parser": shared.opts.prompt_attention.split()[0],
|
||||
"Comment": comment,
|
||||
"Operations": '; '.join(ops).replace('"', '') if len(p.ops) > 0 else 'none',
|
||||
}
|
||||
|
|
@ -165,7 +166,9 @@ def create_infotext(p: StableDiffusionProcessing, all_prompts=None, all_seeds=No
|
|||
if isinstance(v, str):
|
||||
if len(v) == 0 or v == '0x0':
|
||||
del args[k]
|
||||
debug(f'Infotext: args={args}')
|
||||
params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in args.items()])
|
||||
negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""
|
||||
infotext = f"{all_prompts[index]}{negative_prompt_text}\n{params_text}".strip()
|
||||
debug(f'Infotext: "{infotext}"')
|
||||
return infotext
|
||||
|
|
|
|||
|
|
@ -308,11 +308,11 @@ def parse_prompt_attention(text):
|
|||
res = []
|
||||
round_brackets = []
|
||||
square_brackets = []
|
||||
if opts.prompt_attention == 'Fixed attention':
|
||||
if opts.prompt_attention == 'fixed':
|
||||
res = [[text, 1.0]]
|
||||
debug(f'Prompt: parser="{opts.prompt_attention}" {res}')
|
||||
return res
|
||||
elif opts.prompt_attention == 'Compel parser':
|
||||
elif opts.prompt_attention == 'compel':
|
||||
conjunction = Compel.parse_prompt_string(text)
|
||||
if conjunction is None or conjunction.prompts is None or conjunction.prompts is None or len(conjunction.prompts[0].children) == 0:
|
||||
return [["", 1.0]]
|
||||
|
|
@ -321,7 +321,7 @@ def parse_prompt_attention(text):
|
|||
res.append([frag.text, frag.weight])
|
||||
debug(f'Prompt: parser="{opts.prompt_attention}" {res}')
|
||||
return res
|
||||
elif opts.prompt_attention == 'A1111 parser':
|
||||
elif opts.prompt_attention == 'a1111':
|
||||
re_attention = re_attention_v1
|
||||
whitespace = ''
|
||||
else:
|
||||
|
|
@ -360,7 +360,7 @@ def parse_prompt_attention(text):
|
|||
for i, part in enumerate(parts):
|
||||
if i > 0:
|
||||
res.append(["BREAK", -1])
|
||||
if opts.prompt_attention == 'Full parser':
|
||||
if opts.prompt_attention == 'native':
|
||||
part = re_clean.sub("", part)
|
||||
part = re_whitespace.sub(" ", part).strip()
|
||||
if len(part) == 0:
|
||||
|
|
@ -392,15 +392,15 @@ if __name__ == "__main__":
|
|||
log.info(f'Schedules: {all_schedules}')
|
||||
for schedule in all_schedules:
|
||||
log.info(f'Schedule: {schedule[0]}')
|
||||
opts.data['prompt_attention'] = 'Fixed attention'
|
||||
opts.data['prompt_attention'] = 'fixed'
|
||||
output_list = parse_prompt_attention(schedule[1])
|
||||
log.info(f' Fixed: {output_list}')
|
||||
opts.data['prompt_attention'] = 'Compel parser'
|
||||
opts.data['prompt_attention'] = 'compel'
|
||||
output_list = parse_prompt_attention(schedule[1])
|
||||
log.info(f' Compel: {output_list}')
|
||||
opts.data['prompt_attention'] = 'A1111 parser'
|
||||
opts.data['prompt_attention'] = 'a1111'
|
||||
output_list = parse_prompt_attention(schedule[1])
|
||||
log.info(f' A1111: {output_list}')
|
||||
opts.data['prompt_attention'] = 'Full parser'
|
||||
opts.data['prompt_attention'] = 'native'
|
||||
log.info = parse_prompt_attention(schedule[1])
|
||||
log.info(f' Full: {output_list}')
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@ class PromptEmbedder:
|
|||
self.prompts = prompts
|
||||
self.negative_prompts = negative_prompts
|
||||
self.batchsize = len(self.prompts)
|
||||
self.attention = None
|
||||
self.allsame = self.compare_prompts() # collapses batched prompts to single prompt if possible
|
||||
self.steps = steps
|
||||
self.clip_skip = clip_skip
|
||||
|
|
@ -75,6 +76,10 @@ class PromptEmbedder:
|
|||
def checkcache(self, p):
|
||||
if shared.opts.sd_textencoder_cache_size == 0:
|
||||
return False
|
||||
if self.attention != shared.opts.prompt_attention:
|
||||
debug(f"Prompt change: parser={shared.opts.prompt_attention}")
|
||||
cache.clear()
|
||||
return False
|
||||
|
||||
def flatten(xss):
|
||||
return [x for xs in xss for x in xs]
|
||||
|
|
@ -97,23 +102,22 @@ class PromptEmbedder:
|
|||
'positive_pooleds': self.positive_pooleds,
|
||||
'negative_pooleds': self.negative_pooleds,
|
||||
}
|
||||
debug(f"Prompt cache: Adding {key}")
|
||||
debug(f"Prompt cache: add={key}")
|
||||
while len(cache) > int(shared.opts.sd_textencoder_cache_size):
|
||||
cache.popitem(last=False)
|
||||
if item:
|
||||
self.__dict__.update(cache[key])
|
||||
cache.move_to_end(key)
|
||||
if self.allsame and len(self.prompt_embeds) < self.batchsize: # If current batch larger than cached
|
||||
if self.allsame and len(self.prompt_embeds) < self.batchsize:
|
||||
self.prompt_embeds = [self.prompt_embeds[0]] * self.batchsize
|
||||
self.positive_pooleds = [self.positive_pooleds[0]] * self.batchsize
|
||||
self.negative_prompt_embeds = [self.negative_prompt_embeds[0]] * self.batchsize
|
||||
self.negative_pooleds = [self.negative_pooleds[0]] * self.batchsize
|
||||
debug(f"Prompt cache: Retrieving {key}")
|
||||
debug(f"Prompt cache: get={key}")
|
||||
return True
|
||||
|
||||
def compare_prompts(self):
|
||||
same = (self.prompts == [self.prompts[0]] * len(self.prompts) and
|
||||
self.negative_prompts == [self.negative_prompts[0]] * len(self.negative_prompts))
|
||||
same = (self.prompts == [self.prompts[0]] * len(self.prompts) and self.negative_prompts == [self.negative_prompts[0]] * len(self.negative_prompts))
|
||||
if same:
|
||||
self.prompts = [self.prompts[0]]
|
||||
self.negative_prompts = [self.negative_prompts[0]]
|
||||
|
|
@ -123,6 +127,7 @@ class PromptEmbedder:
|
|||
self.positive_schedule, scheduled = get_prompt_schedule(prompt, self.steps)
|
||||
self.negative_schedule, neg_scheduled = get_prompt_schedule(negative_prompt, self.steps)
|
||||
self.scheduled_prompt = scheduled or neg_scheduled
|
||||
debug(f"Prompt schedule: positive={self.positive_schedule} negative={self.negative_schedule} scheduled={scheduled}")
|
||||
|
||||
def scheduled_encode(self, pipe, batchidx):
|
||||
prompt_dict = {} # index cache
|
||||
|
|
@ -138,20 +143,21 @@ class PromptEmbedder:
|
|||
prompt_dict[positive_prompt+negative_prompt] = i
|
||||
|
||||
def extend_embeds(self, batchidx, idx): # Extends scheduled prompt via index
|
||||
self.prompt_embeds[batchidx].append(self.prompt_embeds[batchidx][idx])
|
||||
self.negative_prompt_embeds[batchidx].append(self.negative_prompt_embeds[batchidx][idx])
|
||||
if len(self.prompt_embeds[batchidx]) > 0:
|
||||
self.prompt_embeds[batchidx].append(self.prompt_embeds[batchidx][idx])
|
||||
if len(self.negative_prompt_embeds[batchidx]) > 0:
|
||||
self.negative_prompt_embeds[batchidx].append(self.negative_prompt_embeds[batchidx][idx])
|
||||
if len(self.positive_pooleds[batchidx]) > 0:
|
||||
self.positive_pooleds[batchidx].append(self.positive_pooleds[batchidx][idx])
|
||||
if len(self.negative_pooleds[batchidx]) > 0:
|
||||
self.negative_pooleds[batchidx].append(self.negative_pooleds[batchidx][idx])
|
||||
|
||||
def encode(self, pipe, positive_prompt, negative_prompt, batchidx):
|
||||
if shared.opts.prompt_attention == "xhinker parser" or 'Flux' in pipe.__class__.__name__:
|
||||
prompt_embed, positive_pooled, negative_embed, negative_pooled = get_xhinker_text_embeddings(
|
||||
pipe, positive_prompt, negative_prompt, self.clip_skip)
|
||||
self.attention = shared.opts.prompt_attention
|
||||
if self.attention == "xhinker" or 'Flux' in pipe.__class__.__name__:
|
||||
prompt_embed, positive_pooled, negative_embed, negative_pooled = get_xhinker_text_embeddings(pipe, positive_prompt, negative_prompt, self.clip_skip)
|
||||
else:
|
||||
prompt_embed, positive_pooled, negative_embed, negative_pooled = get_weighted_text_embeddings(
|
||||
pipe, positive_prompt, negative_prompt, self.clip_skip)
|
||||
prompt_embed, positive_pooled, negative_embed, negative_pooled = get_weighted_text_embeddings(pipe, positive_prompt, negative_prompt, self.clip_skip)
|
||||
if prompt_embed is not None:
|
||||
self.prompt_embeds[batchidx].append(prompt_embed)
|
||||
if negative_embed is not None:
|
||||
|
|
@ -311,6 +317,7 @@ def get_tokens(msg, prompt):
|
|||
tokens.append(f'UNK_{i}')
|
||||
token_count = len(ids) - int(has_bos_token) - int(has_eos_token)
|
||||
debug(f'Prompt tokenizer: type={msg} tokens={token_count} {tokens}')
|
||||
return token_count
|
||||
|
||||
|
||||
def normalize_prompt(pairs: list):
|
||||
|
|
@ -338,6 +345,12 @@ def get_prompts_with_weights(prompt: str):
|
|||
if shared.opts.prompt_mean_norm:
|
||||
texts_and_weights = normalize_prompt(texts_and_weights)
|
||||
texts, text_weights = zip(*texts_and_weights)
|
||||
if debug_enabled:
|
||||
all_tokens = 0
|
||||
for text in texts:
|
||||
tokens = get_tokens('section', text)
|
||||
all_tokens += tokens
|
||||
debug(f'Prompt tokenizer: parser={shared.opts.prompt_attention} tokens={all_tokens}')
|
||||
debug(f'Prompt: weights={texts_and_weights} time={(time.time() - t0):.3f}')
|
||||
return texts, text_weights
|
||||
|
||||
|
|
@ -479,7 +492,7 @@ def get_weighted_text_embeddings(pipe, prompt: str = "", neg_prompt: str = "", c
|
|||
# negative prompt has no keywords
|
||||
embed, ntokens = embedding_providers[i].get_embeddings_for_weighted_prompt_fragments(text_batch=[negatives[i]], fragment_weights_batch=[negative_weights[i]], device=device, should_return_tokens=True)
|
||||
negative_prompt_embeds.append(embed)
|
||||
debug(f'Prompt: unpadded shape={prompt_embeds[0].shape} TE{i+1} ptokens={torch.count_nonzero(ptokens)} ntokens={torch.count_nonzero(ntokens)} time={(time.time() - t0):.3f}')
|
||||
debug(f'Prompt: unpadded={prompt_embeds[0].shape} TE{i+1} ptokens={torch.count_nonzero(ptokens)} ntokens={torch.count_nonzero(ntokens)} time={(time.time() - t0):.3f}')
|
||||
if SD3:
|
||||
t0 = time.time()
|
||||
pooled_prompt_embeds.append(embedding_providers[0].get_pooled_embeddings(texts=positives[0] if len(positives[0]) == 1 else [" ".join(positives[0])], device=device))
|
||||
|
|
@ -488,7 +501,7 @@ def get_weighted_text_embeddings(pipe, prompt: str = "", neg_prompt: str = "", c
|
|||
negative_pooled_prompt_embeds.append(embedding_providers[1].get_pooled_embeddings(texts=negatives[-1] if len(negatives[-1]) == 1 else [" ".join(negatives[-1])], device=device))
|
||||
pooled_prompt_embeds = torch.cat(pooled_prompt_embeds, dim=-1)
|
||||
negative_pooled_prompt_embeds = torch.cat(negative_pooled_prompt_embeds, dim=-1)
|
||||
debug(f'Prompt: pooled shape={pooled_prompt_embeds[0].shape} time={(time.time() - t0):.3f}')
|
||||
debug(f'Prompt: pooled={pooled_prompt_embeds[0].shape} time={(time.time() - t0):.3f}')
|
||||
elif prompt_embeds[-1].shape[-1] > 768:
|
||||
t0 = time.time()
|
||||
if shared.opts.diffusers_pooled == "weighted":
|
||||
|
|
|
|||
|
|
@ -1305,7 +1305,7 @@ def get_weighted_text_embeddings_sd3(
|
|||
# ---------------------- get neg t5 embeddings -------------------------
|
||||
neg_prompt_tokens_3 = torch.tensor([neg_prompt_tokens_3], dtype=torch.long)
|
||||
|
||||
t5_neg_prompt_embeds = pipe.text_encoder_3(neg_prompt_tokens_3.to(pipe.pipe.text_encoder_3.device))[0].squeeze(0)
|
||||
t5_neg_prompt_embeds = pipe.text_encoder_3(neg_prompt_tokens_3.to(pipe.text_encoder_3.device))[0].squeeze(0)
|
||||
t5_neg_prompt_embeds = t5_neg_prompt_embeds.to(device=pipe.text_encoder_3.device)
|
||||
|
||||
# add weight to neg t5 embeddings
|
||||
|
|
|
|||
|
|
@ -273,6 +273,40 @@ class OptionInfo:
|
|||
self.comment_after += " <span class='info'>(requires restart)</span>"
|
||||
return self
|
||||
|
||||
def validate(self, opt, value):
|
||||
args = self.component_args if self.component_args is not None else {}
|
||||
if callable(args):
|
||||
try:
|
||||
args = args()
|
||||
except Exception:
|
||||
args = {}
|
||||
choices = args.get("choices", [])
|
||||
if callable(choices):
|
||||
try:
|
||||
choices = choices()
|
||||
except Exception:
|
||||
choices = []
|
||||
if len(choices) > 0:
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
for v in value:
|
||||
if v not in choices:
|
||||
log.warning(f'Setting validation: "{opt}"="{v}" default="{self.default}" choices={choices}')
|
||||
return False
|
||||
minimum = args.get("minimum", None)
|
||||
maximum = args.get("maximum", None)
|
||||
if (minimum is not None and value < minimum) or (maximum is not None and value > maximum):
|
||||
log.error(f'Setting validation: "{opt}"={value} default={self.default} minimum={minimum} maximum={maximum}')
|
||||
return False
|
||||
return True
|
||||
|
||||
def __str__(self) -> str:
|
||||
args = self.component_args if self.component_args is not None else {}
|
||||
if callable(args):
|
||||
args = args()
|
||||
choices = args.get("choices", [])
|
||||
return f'OptionInfo: label="{self.label}" section="{self.section}" component="{self.component}" default="{self.default}" refresh="{self.refresh is not None}" change="{self.onchange is not None}" args={args} choices={choices}'
|
||||
|
||||
|
||||
def options_section(section_identifier, options_dict):
|
||||
for v in options_dict.values():
|
||||
|
|
@ -442,7 +476,7 @@ options_templates.update(options_section(('sd', "Execution & Models"), {
|
|||
"model_reuse_dict": OptionInfo(False, "Reuse loaded model dictionary", gr.Checkbox, {"visible": False}),
|
||||
"prompt_mean_norm": OptionInfo(False, "Prompt attention normalization", gr.Checkbox),
|
||||
"comma_padding_backtrack": OptionInfo(20, "Prompt padding", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1, "visible": not native }),
|
||||
"prompt_attention": OptionInfo("Full parser", "Prompt attention parser", gr.Radio, {"choices": ["Full parser", "Compel parser", "xhinker parser", "A1111 parser", "Fixed attention"] }),
|
||||
"prompt_attention": OptionInfo("native", "Prompt attention parser", gr.Radio, {"choices": ["native", "compel", "xhinker", "a1111", "fixed"] }),
|
||||
"latent_history": OptionInfo(16, "Latent history size", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
|
||||
"sd_checkpoint_cache": OptionInfo(0, "Cached models", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1, "visible": not native }),
|
||||
"sd_vae_checkpoint_cache": OptionInfo(0, "Cached VAEs", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1, "visible": False}),
|
||||
|
|
@ -994,7 +1028,7 @@ class Options:
|
|||
if filename is None:
|
||||
filename = self.filename
|
||||
if cmd_opts.freeze:
|
||||
log.warning(f'Settings saving is disabled: {filename}')
|
||||
log.warning(f'Setting: fn="{filename}" save disabled')
|
||||
return
|
||||
try:
|
||||
# output = json.dumps(self.data, indent=2)
|
||||
|
|
@ -1002,12 +1036,12 @@ class Options:
|
|||
unused_settings = []
|
||||
|
||||
if os.environ.get('SD_CONFIG_DEBUG', None) is not None:
|
||||
log.debug('Config: user settings')
|
||||
log.debug('Settings: user')
|
||||
for k, v in self.data.items():
|
||||
log.trace(f' Config: item={k} value={v} default={self.data_labels[k].default if k in self.data_labels else None}')
|
||||
log.debug('Config: default settings')
|
||||
log.debug('Settings: defaults')
|
||||
for k in self.data_labels.keys():
|
||||
log.trace(f' Config: item={k} default={self.data_labels[k].default}')
|
||||
log.trace(f' Setting: item={k} default={self.data_labels[k].default}')
|
||||
|
||||
for k, v in self.data.items():
|
||||
if k in self.data_labels:
|
||||
|
|
@ -1022,9 +1056,9 @@ class Options:
|
|||
unused_settings.append(k)
|
||||
writefile(diff, filename, silent=silent)
|
||||
if len(unused_settings) > 0:
|
||||
log.debug(f"Unused settings: {unused_settings}")
|
||||
log.debug(f"Settings: unused={unused_settings}")
|
||||
except Exception as err:
|
||||
log.error(f'Save settings failed: {filename} {err}')
|
||||
log.error(f'Settings: fn="{filename}" {err}')
|
||||
|
||||
def save(self, filename=None, silent=False):
|
||||
threading.Thread(target=self.save_atomic, args=(filename, silent)).start()
|
||||
|
|
@ -1040,7 +1074,7 @@ class Options:
|
|||
if filename is None:
|
||||
filename = self.filename
|
||||
if not os.path.isfile(filename):
|
||||
log.debug(f'Created default config: {filename}')
|
||||
log.debug(f'Settings: fn="{filename}" created')
|
||||
self.save(filename)
|
||||
return
|
||||
self.data = readfile(filename, lock=True)
|
||||
|
|
@ -1048,13 +1082,16 @@ class Options:
|
|||
self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
|
||||
unknown_settings = []
|
||||
for k, v in self.data.items():
|
||||
info = self.data_labels.get(k, None)
|
||||
info: OptionInfo = self.data_labels.get(k, None)
|
||||
if not info.validate(k, v):
|
||||
self.data[k] = info.default
|
||||
if info is not None and not self.same_type(info.default, v):
|
||||
log.error(f"Error: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})")
|
||||
log.warning(f"Setting validation: {k}={v} ({type(v).__name__} expected={type(info.default).__name__})")
|
||||
self.data[k] = info.default
|
||||
if info is None and k not in compatibility_opts and not k.startswith('uiux_'):
|
||||
unknown_settings.append(k)
|
||||
if len(unknown_settings) > 0:
|
||||
log.debug(f"Unknown settings: {unknown_settings}")
|
||||
log.warning(f"Setting validation: unknown={unknown_settings}")
|
||||
|
||||
def onchange(self, key, func, call=True):
|
||||
item = self.data_labels.get(key)
|
||||
|
|
|
|||
|
|
@ -258,7 +258,7 @@ class Script(scripts.Script):
|
|||
shared.log.debug(f'AnimateDiff args: {p.task_args}')
|
||||
set_prompt(p)
|
||||
orig_prompt_attention = shared.opts.prompt_attention
|
||||
shared.opts.data['prompt_attention'] = 'Fixed attention'
|
||||
shared.opts.data['prompt_attention'] = 'fixed'
|
||||
processed: processing.Processed = processing.process_images(p) # runs processing using main loop
|
||||
shared.opts.data['prompt_attention'] = orig_prompt_attention
|
||||
devices.torch_gc()
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ class Script(scripts.Script):
|
|||
from modules.ctrlx.utils import get_self_recurrence_schedule
|
||||
|
||||
orig_prompt_attention = shared.opts.prompt_attention
|
||||
shared.opts.data['prompt_attention'] = 'Fixed attention'
|
||||
shared.opts.data['prompt_attention'] = 'fixed'
|
||||
shared.sd_model = sd_models.switch_pipe(CtrlXStableDiffusionXLPipeline, shared.sd_model)
|
||||
shared.sd_model.restore_pipeline = self.restore
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class Script(scripts.Script):
|
|||
orig_offload = shared.opts.diffusers_model_cpu_offload
|
||||
orig_prompt_attention = shared.opts.prompt_attention
|
||||
shared.opts.data['diffusers_model_cpu_offload'] = False
|
||||
shared.opts.data['prompt_attention'] = 'Fixed attention'
|
||||
shared.opts.data['prompt_attention'] = 'fixed'
|
||||
# shared.sd_model.maybe_free_model_hooks() # ledits is not compatible with offloading
|
||||
# shared.sd_model.has_accelerate = False
|
||||
sd_models.move_model(shared.sd_model, devices.device, force=True)
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ class Script(scripts.Script):
|
|||
shared.sd_model = orig_pipeline
|
||||
return
|
||||
sd_models.set_diffuser_options(shared.sd_model)
|
||||
shared.opts.data['prompt_attention'] = 'Fixed attention' # this pipeline is not compatible with embeds
|
||||
shared.opts.data['prompt_attention'] = 'fixed' # this pipeline is not compatible with embeds
|
||||
shared.sd_model.to(torch.float32) # this pipeline unet is not compatible with fp16
|
||||
processing.fix_seed(p)
|
||||
# set pipeline specific params, note that standard params are applied when applicable
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ class Script(scripts.Script):
|
|||
# mulan only works with single image, single prompt and in fixed attention
|
||||
p.batch_size = 1
|
||||
p.n_iter = 1
|
||||
shared.opts.prompt_attention = 'Fixed attention'
|
||||
shared.opts.prompt_attention = 'fixed'
|
||||
if isinstance(p.prompt, list):
|
||||
p.prompt = p.prompt[0]
|
||||
p.task_args['prompt'] = p.prompt
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ class Script(scripts.Script):
|
|||
shared.sd_model = orig_pipeline
|
||||
return
|
||||
sd_models.set_diffuser_options(shared.sd_model)
|
||||
shared.opts.data['prompt_attention'] = 'Fixed attention' # this pipeline is not compatible with embeds
|
||||
shared.opts.data['prompt_attention'] = 'fixed' # this pipeline is not compatible with embeds
|
||||
processing.fix_seed(p)
|
||||
# set pipeline specific params, note that standard params are applied when applicable
|
||||
rp_args = {
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ class Script(scripts.Script):
|
|||
pipe.to(device=devices.device, dtype=devices.dtype)
|
||||
except Exception:
|
||||
pass
|
||||
shared.opts.data['prompt_attention'] = 'Fixed attention'
|
||||
shared.opts.data['prompt_attention'] = 'fixed'
|
||||
prompt = shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles)
|
||||
negative = shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)
|
||||
p.task_args['prompt'] = prompt
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ class SharedSettingsStackHelper(object):
|
|||
sd_text_encoder = None
|
||||
extra_networks_default_multiplier = None
|
||||
disable_weights_auto_swap = None
|
||||
prompt_attention = None
|
||||
|
||||
def __enter__(self):
|
||||
#Save overridden settings so they can be restored later.
|
||||
|
|
@ -52,6 +53,7 @@ class SharedSettingsStackHelper(object):
|
|||
self.sd_text_encoder = shared.opts.sd_text_encoder
|
||||
self.extra_networks_default_multiplier = shared.opts.extra_networks_default_multiplier
|
||||
self.disable_weights_auto_swap = shared.opts.disable_weights_auto_swap
|
||||
self.prompt_attention = shared.opts.prompt_attention
|
||||
shared.opts.data["disable_weights_auto_swap"] = False
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
|
|
@ -62,6 +64,7 @@ class SharedSettingsStackHelper(object):
|
|||
shared.opts.data["tome_ratio"] = self.tome_ratio
|
||||
shared.opts.data["todo_ratio"] = self.todo_ratio
|
||||
shared.opts.data["extra_networks_default_multiplier"] = self.extra_networks_default_multiplier
|
||||
shared.opts.data["prompt_attention"] = self.prompt_attention
|
||||
if self.sd_model_checkpoint != shared.opts.sd_model_checkpoint:
|
||||
shared.opts.data["sd_model_checkpoint"] = self.sd_model_checkpoint
|
||||
sd_models.reload_model_weights(op='model')
|
||||
|
|
@ -92,6 +95,7 @@ axis_options = [
|
|||
AxisOption("[Model] Dictionary", str, apply_dict, fmt=format_value_add_label, cost=0.9, choices=lambda: ['None'] + list(sd_models.checkpoints_list)),
|
||||
AxisOption("[Prompt] Search & replace", str, apply_prompt, fmt=format_value_add_label),
|
||||
AxisOption("[Prompt] Prompt order", str_permutations, apply_order, fmt=format_value_join_list),
|
||||
AxisOption("[Prompt] Prompt parser", str, apply_setting("prompt_attention"), choices=lambda: ["native", "compel", "xhinker", "a1111", "fixed"]),
|
||||
AxisOption("[Network] LoRA", str, apply_lora, cost=0.5, choices=list_lora),
|
||||
AxisOption("[Network] LoRA strength", float, apply_setting('extra_networks_default_multiplier')),
|
||||
AxisOption("[Network] Styles", str, apply_styles, choices=lambda: [s.name for s in shared.prompt_styles.styles.values()]),
|
||||
|
|
|
|||
2
wiki
2
wiki
|
|
@ -1 +1 @@
|
|||
Subproject commit 47ea50e9152a13325dd1daf92bc50b700783182f
|
||||
Subproject commit 352fc655b0dc9edb22aac093186da087ba18b474
|
||||
Loading…
Reference in New Issue