fix gallery save/delete

Signed-off-by: vladmandic <mandic00@live.com>
pull/4562/head
vladmandic 2026-01-20 09:53:20 +01:00
parent 8cf27dffdb
commit f9aa2591e5
22 changed files with 65 additions and 53 deletions

View File

@ -1,8 +1,8 @@
# Change Log for SD.Next
## Update for 2025-01-19
## Update for 2025-01-20
### Highlights for 2025-01-19
### Highlights for 2025-01-20
First release of 2026 brings quite a few new models: **Flux.2-Klein, Qwen-Image-2512, LTX-2-Dev, GLM-Image**
There are also improvements to SDNQ quantization engine, updated Prompt Enhance and many others.
@ -11,7 +11,7 @@ For full list of changes, see full changelog.
[ReadMe](https://github.com/vladmandic/automatic/blob/master/README.md) | [ChangeLog](https://github.com/vladmandic/automatic/blob/master/CHANGELOG.md) | [Docs](https://vladmandic.github.io/sdnext-docs/) | [WiKi](https://github.com/vladmandic/automatic/wiki) | [Discord](https://discord.com/invite/sd-next-federal-batch-inspectors-1101998836328697867) | [Sponsor](https://github.com/sponsors/vladmandic)
### Details for 2025-01-19
### Details for 2025-01-20
- **Models**
- [Flux.2 Klein](https://bfl.ai/blog/flux2-klein-towards-interactive-visual-intelligence)
@ -80,31 +80,32 @@ For full list of changes, see full changelog.
- refactor handling of seeds
- allow unsafe ssl context for downloads
- **Fixes**
- controlnet: controlnet with non-english ui locales
- core: add skip_keys to offloading logic, fixes wan frames mismatch, thanks @ryanmeador
- core: force model move on offload=none
- core: hidiffusion tracing
- core: hip device name detection
- core: reduce triton test verbosity
- core: switch processing class not restoring params
- extension tab: update checker, date handling, formatting etc., thanks @awsr
- controlnet with non-english ui locales
- update civitai base models, thanks @trojaner
- wildards with folder specification
- z-image single-file loader
- hip device name detection
- force align width/height to vae scale factor
- meituan-longca-image-edit missing image param
- mobile auto-collapse when using side panel, thanks @awsr
- switch processing class not restoring params
- hidiffusion tracing
- add skip_keys to offloading logic, fixes wan frames mismatch, thanks @ryanmeador
- wan 2.2 i2v
- force model move on offload=none
- kandinsky-5 image and video on non-cuda platforms
- lora loading when using torch without distributed support
- generate slowdown when consequtive lora-diffusers enabled
- google-genai auth, thanks @CalamitousFelicitousness
- reduce triton test verbosity
- improve qwen i2i handling
- networks filter by model type
- netoworks icon/list view type switch, thanks @awsr
- lora skip with strength zero
- lora force unapply on change
- lora handle null description, thanks @CalamitousFelicitousness
- lora loading when using torch without distributed support
- lora skip with strength zero
- lora: generate slowdown when consequtive lora-diffusers enabled
- model: google-genai auth, thanks @CalamitousFelicitousness
- model: improve qwen i2i handling
- model: kandinsky-5 image and video on non-cuda platforms
- model: meituan-longca-image-edit missing image param
- model: wan 2.2 i2v
- model: z-image single-file loader
- other: update civitai base models, thanks @trojaner
- ui: gallery save/delete
- ui: mobile auto-collapse when using side panel, thanks @awsr
- ui: networks filter by model type
- ui: networks icon/list view type switch, thanks @awsr
- vae: force align width/height to vae scale factor
- wildards with folder specification
## Update for 2025-12-26

View File

@ -47,6 +47,7 @@
TODO: Investigate which models are diffusers-compatible and prioritize!
- [Bria FiboEdit](https://github.com/huggingface/diffusers/commit/d7a1c31f4f85bae5a9e01cdce49bd7346bd8ccd6)
- [LTXVideo 0.98 LongMulti](https://github.com/huggingface/diffusers/pull/12614)
- [Cosmos-Predict-2.5](https://huggingface.co/nvidia/Cosmos-Predict2.5-2B)
- [NewBie Image Exp0.1](https://github.com/huggingface/diffusers/pull/12803)

View File

@ -33,9 +33,10 @@ function clip_gallery_urls(gallery) {
}
function isVisible(el) {
if (!el) return false;
const rect = el.getBoundingClientRect();
if (rect.width === 0 && rect.height === 0) return false;
return rect.top >= 0 && rect.left >= 0 && rect.bottom <= (window.innerHeight || document.documentElement.clientHeight) && rect.right <= (window.innerWidth || document.documentElement.clientWidth);
return (rect.top >= 0) && (rect.left >= 0) && (rect.bottom <= (window.innerHeight || document.documentElement.clientHeight)) && (rect.right <= (window.innerWidth || document.documentElement.clientWidth));
}
function all_gallery_buttons() {
@ -66,15 +67,24 @@ function selected_gallery_index() {
return result;
}
function selected_gallery_files() {
function selected_gallery_files(tabname) {
let allImages = [];
let allThumbnails;
if (tabname && tabname !== 'gallery') allThumbnails = gradioApp().querySelectorAll('div[id$=_gallery].gradio-gallery .thumbnail-item.thumbnail-small');
else allThumbnails = gradioApp().querySelectorAll('.gradio-gallery .thumbnails > .thumbnail-item.thumbnail-small');
try {
let allCurrentButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnail-item.thumbnail-small');
if (allCurrentButtons.length === 0) allCurrentButtons = gradioApp().querySelectorAll('.gradio-gallery .thumbnails > .thumbnail-item.thumbnail-small');
allImages = Array.from(allCurrentButtons).map((v) => v.querySelector('img')?.src);
allImages = allImages.filter((el) => isVisible(el));
} catch { /**/ }
const selectedIndex = selected_gallery_index();
allImages = Array.from(allThumbnails).map((v) => v.querySelector('img'));
if (tabname && tabname !== 'gallery') allImages = allImages.filter((img) => isVisible(img));
allImages = allImages.map((img) => {
let fn = img.src;
if (fn.includes('file=')) fn = fn.split('file=')[1];
return decodeURI(fn);
});
} catch (err) {
error(`selected_gallery_files: ${err}`);
}
let selectedIndex = -1;
if (tabname && tabname !== 'gallery') selectedIndex = selected_gallery_index();
return [allImages, selectedIndex];
}

View File

@ -1081,7 +1081,7 @@ class StableDiffusionXLPipelineAPG(
latents,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings

View File

@ -965,7 +965,7 @@ class StableDiffusionPipelineAPG(
latents,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6.1 Add image embeds for IP-Adapter

View File

@ -882,7 +882,7 @@ class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
# 7. Prepare extra step kwargs.
# 7. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7.1 Create tensor stating which controlnets to keep

View File

@ -679,7 +679,7 @@ class PhotoMakerStableDiffusionXLPipeline(StableDiffusionXLPipeline):
latents,
)
# 9. Prepare extra step kwargs.
# 9. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 10. Prepare added time ids & embeddings

View File

@ -550,7 +550,7 @@ def make_diffusers_sdxl_contrtolnet_ppl(block_class):
# # scale the initial noise by the standard deviation required by the scheduler
# latents = latents * self.scheduler.init_noise_sigma
# 7. Prepare extra step kwargs.
# 7. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7.1 Create tensor stating which controlnets to keep

View File

@ -128,7 +128,7 @@ class OnnxStableDiffusionUpscalePipeline(diffusers.OnnxStableDiffusionUpscalePip
" `pipeline.unet` or your `image` input."
)
# 8. Prepare extra step kwargs.
# 8. Prepare extra step kwargs.
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:

View File

@ -1268,7 +1268,7 @@ class StableDiffusionPAGPipeline(
latents,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6.1 Add image embeds for IP-Adapter

View File

@ -1366,7 +1366,7 @@ class StableDiffusionXLPAGPipeline(
latents,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings

View File

@ -88,7 +88,7 @@ def delete_files(js_data, files, all_files, index):
continue
if os.path.exists(fn) and os.path.isfile(fn):
deleted.append(fn)
os.remove(fn)
# os.remove(fn)
if fn in all_files:
all_files.remove(fn)
shared.log.info(f'Delete: image="{fn}"')
@ -312,7 +312,7 @@ def create_output_panel(tabname, preview=True, prompt=None, height=None, transfe
outputs=[download_files, html_log],
)
delete.click(fn=call_queue.wrap_gradio_call(delete_files), show_progress='hidden',
_js="(x, y, i, j) => [x, y, ...selected_gallery_files()]",
_js=f'(x, y, i, j) => [x, y, ...selected_gallery_files("{tabname}")]',
inputs=[generation_info, result_gallery, html_info, html_info],
outputs=[result_gallery, html_log],
)

View File

@ -322,7 +322,7 @@ class ConsistoryExtendAttnSDXLPipeline(
latents,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
if share_queries:

View File

@ -1756,7 +1756,7 @@ class StableDiffusionDiffImg2ImgPipeline(DiffusionPipeline):
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
# 7. Prepare extra step kwargs.
# 7. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
map = torchvision.transforms.Resize(tuple(s // self.vae_scale_factor for s in image.shape[2:]),antialias=None)(map)

View File

@ -873,7 +873,7 @@ class StableDiffusionXLFreeScale(DiffusionPipeline, FromSingleFileMixin, LoraLoa
latents,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings

View File

@ -902,7 +902,7 @@ class StableDiffusionXLFreeScaleImg2Img(DiffusionPipeline, FromSingleFileMixin,
latents,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings

View File

@ -1405,7 +1405,7 @@ class InstantIRPipeline(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
# 7. Prepare extra step kwargs.
# 7. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7.1 Create tensor stating which controlnets to keep

View File

@ -1039,7 +1039,7 @@ class StableDiffusionXLTilingPipeline(
if isinstance(self.scheduler, LMSDiscreteScheduler):
latents = latents * self.scheduler.sigmas[0]
# 5. Prepare extra step kwargs.
# 5. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6. Prepare added time ids & embeddings

View File

@ -1384,7 +1384,7 @@ class PixelSmithXLPipeline(
latents,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings

View File

@ -833,7 +833,7 @@ class StableDiffusionXLAdapterPipeline(DiffusionPipeline, FromSingleFileMixin, L
latents_sd1_5,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings

View File

@ -934,7 +934,7 @@ class StableDiffusionXLAdapterControlnetPipeline(DiffusionPipeline, FromSingleFi
latents_sd1_5,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings

View File

@ -941,7 +941,7 @@ class StableDiffusionXLAdapterControlnetI2IPipeline(DiffusionPipeline, FromSingl
generator,
)
# 6. Prepare extra step kwargs.
# 6. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings