Signed-off-by: Vladimir Mandic <mandic00@live.com>
pull/3696/head
Vladimir Mandic 2025-01-10 08:25:00 -05:00
parent 663b0ed800
commit ac2adaa299
12 changed files with 2 additions and 24 deletions

View File

@ -1010,7 +1010,6 @@ def ensure_base_requirements():
setuptools_version = '69.5.1'
def update_setuptools():
# print('Install base requirements')
global pkg_resources, setuptools, distutils # pylint: disable=global-statement
# python may ship with incompatible setuptools
subprocess.run(f'"{sys.executable}" -m pip install setuptools=={setuptools_version}', shell=True, check=False, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

View File

@ -169,8 +169,6 @@ class IPAdapterPlus(torch.nn.Module):
if "latents" in state_dict["image_proj"] and "latents" in self.image_proj.state_dict():
# Check if the shapes are mismatched
if state_dict["image_proj"]["latents"].shape != self.image_proj.state_dict()["latents"].shape:
print(f"Shapes of 'image_proj.latents' in checkpoint {ckpt_path} and current model do not match.")
print("Removing 'latents' from checkpoint and loading the rest of the weights.")
del state_dict["image_proj"]["latents"]
strict_load_image_proj_model = False

View File

@ -89,9 +89,7 @@ class LinFusion(ModelMixin, ConfigMixin):
pipe_name_path = pipe_name_path or pipeline._internal_dict._name_or_path # pylint: disable=protected-access
pretrained_model_name_or_path = model_dict.get(pipe_name_path, None)
if pretrained_model_name_or_path:
print(
f"Matching LinFusion '{pretrained_model_name_or_path}' for pipeline '{pipe_name_path}'."
)
pass
else:
raise RuntimeError(
f"LinFusion not found for pipeline [{pipe_name_path}], please provide the path."

View File

@ -205,8 +205,6 @@ def _convert_to_ai_toolkit_cat(sds_sd, ait_sd, sds_key, ait_keys, dims=None):
up_weight[i : i + dims[j], k * ait_rank : (k + 1) * ait_rank] == 0
)
i += dims[j]
# if is_sparse:
# print(f"weight is sparse: {sds_key}")
# make ai-toolkit weight
ait_down_keys = [k + ".lora_down.weight" for k in ait_keys]

View File

@ -670,15 +670,11 @@ class Simple_UVitBlock(nn.Module):
self.upsample = None
def forward(self, x):
# print("before,", x.shape)
if self.downsample is not None:
# print('downsample')
x = self.downsample(x)
if self.upsample is not None:
# print('upsample')
x = self.upsample(x)
# print("after,", x.shape)
return x

View File

@ -259,7 +259,6 @@ class OmniGen(nn.Module, PeftAdapterMixin):
left = (self.pos_embed_max_size - width) // 2
spatial_pos_embed = self.pos_embed.reshape(1, self.pos_embed_max_size, self.pos_embed_max_size, -1)
spatial_pos_embed = spatial_pos_embed[:, top : top + height, left : left + width, :]
# print(top, top + height, left, left + width, spatial_pos_embed.size())
spatial_pos_embed = spatial_pos_embed.reshape(1, -1, spatial_pos_embed.shape[-1])
return spatial_pos_embed

View File

@ -681,10 +681,6 @@ class VectorQuantizer(nn.Module):
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices."
)
else:
self.re_embed = n_e

View File

@ -222,7 +222,6 @@ class HFTextEncoder(nn.Module):
encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
embeddings = getattr(
self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
modules = [embeddings, *layer_list][:-unlocked_layers]

View File

@ -820,7 +820,6 @@ class DCSolverMultistepScheduler(SchedulerMixin, ConfigMixin):
loss.backward()
optimizer.step()
ratio_bound = bound_func(ratio_param)
print(f'iter [{iter_}]', ratio_bound.item(), loss.item())
torch.cuda.empty_cache()
return ratio_bound.data.detach().item()

View File

@ -117,8 +117,7 @@ class TDDScheduler(DPMSolverSinglestepScheduler):
timesteps = tcd_origin_timesteps[inference_indices]
if self.special_jump:
if self.tdd_train_step == 50:
#timesteps = np.array([999., 879., 759., 499., 259.])
print(timesteps)
pass
elif self.tdd_train_step == 250:
if num_inference_steps == 5:
timesteps = np.array([999., 875., 751., 499., 251.])
@ -203,7 +202,6 @@ class TDDScheduler(DPMSolverSinglestepScheduler):
sigmas_s = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
if self.config.use_karras_sigmas:
print("have not write")
pass
else:
sigmas_s = np.interp(timesteps_s, np.arange(0, len(sigmas_s)), sigmas_s)

View File

@ -355,7 +355,6 @@ class VDMScheduler(SchedulerMixin, ConfigMixin):
)
# 3. Clip or threshold "predicted x_0"
# print({ 'timestep': timestep.item(), 'min': pred_original_sample.min().item(), 'max': pred_original_sample.max().item(), 'alpha': alpha.item(), 'sigma': sigma.item() })
if self.config.thresholding:
pred_original_sample = self._threshold_sample(pred_original_sample)
elif self.config.clip_sample:

View File

@ -25,7 +25,6 @@ def init_generator(device: torch.device, fallback: torch.Generator = None):
"""
Forks the current default random generator given device.
"""
print(f"init_generator device = {device}")
if device.type == "cpu":
return torch.Generator(device="cpu").set_state(torch.get_rng_state())
elif device.type == "cuda":