reduce use of python generators with ui

Signed-off-by: vladmandic <mandic00@live.com>
pull/4667/head
vladmandic 2026-03-01 18:17:42 +01:00
parent 9ad68c2ff4
commit ff4b5c33dc
7 changed files with 112 additions and 27 deletions

View File

@ -1,8 +1,8 @@
# Change Log for SD.Next
## Update for 2026-02-27
## Update for 2026-03-01
### Highlights for 2026-02-27
### Highlights for 2026-03-01
This release brings massive code refactoring to modernize codebase and removal of some obsolete features. Leaner & Faster!
And since its a bit quieter period when it comes to new models, so we have two deep fine-tunes: *FireRed-Image-Edit* and *SkyWorks-UniPic-3*
@ -11,7 +11,7 @@ But also many smaller quality-of-life improvements - for full details, see [Chan
[ReadMe](https://github.com/vladmandic/automatic/blob/master/README.md) | [ChangeLog](https://github.com/vladmandic/automatic/blob/master/CHANGELOG.md) | [Docs](https://vladmandic.github.io/sdnext-docs/) | [WiKi](https://github.com/vladmandic/automatic/wiki) | [Discord](https://discord.com/invite/sd-next-federal-batch-inspectors-1101998836328697867) | [Sponsor](https://github.com/sponsors/vladmandic)
### Details for 2026-02-27
### Details for 2026-03-01
- **Models**
- [Google Flash 3.1 Image](https://ai.google.dev/gemini-api/docs/models/gemini-3-flash-preview) a.k.a. *Nano Banana 2*
@ -36,7 +36,7 @@ But also many smaller quality-of-life improvements - for full details, see [Chan
- **Compute**
- **ROCm** support for additional AMD GPUs: `gfx103X`, thanks @crashingalexsan
- **Cuda** `torch==2.10` removed support for `rtx1000` series, use following before first startup:
> set TORCH_COMMAND='torch==2.9.1 torchvision==0.24.1 torchaudio==2.9.1 --index-url https://download.pytorch.org/whl/cu126'
> `set TORCH_COMMAND='torch==2.9.1 torchvision==0.24.1 torchaudio==2.9.1 --index-url https://download.pytorch.org/whl/cu126'`
- **UI**
- **localization** improved translation quality and new translations locales:
*en, en1, en2, en3, en4, hr, es, it, fr, de, pt, ru, zh, ja, ko, hi, ar, bn, ur, id, vi, tr, sr, po, he, xx, yy, qq, tlh*
@ -74,6 +74,7 @@ But also many smaller quality-of-life improvements - for full details, see [Chan
- remove requirements: `clip`, `open-clip`
- captioning part-2, thanks @CalamitousFelicitousness
- add new build of `insightface`, thanks @hameerabbasi
- reduce use of generators with ui interactor
- **Obsolete**
- remove `normalbae` pre-processor
- remove `dwpose` pre-processor

View File

@ -1,4 +1,5 @@
class ConnectionMonitorState {
static ws = undefined;
static delay = 1000;
static element;
static version = '';
@ -56,16 +57,14 @@ async function updateIndicator(online, data = {}, msg = undefined) {
async function wsMonitorLoop(url) {
try {
const ws = new WebSocket(`${url}/queue/join`);
ws.onopen = () => {};
ws.onmessage = (evt) => updateIndicator(true);
ws.onclose = () => {
// happens regularly if there is no traffic
setTimeout(() => wsMonitorLoop(url), ConnectionMonitorState.delay);
ConnectionMonitorState.ws = new WebSocket(`${url}/queue/join`);
ConnectionMonitorState.ws.onopen = () => {};
ConnectionMonitorState.ws.onmessage = (evt) => updateIndicator(true);
ConnectionMonitorState.ws.onclose = () => {
setTimeout(() => wsMonitorLoop(url), ConnectionMonitorState.delay); // happens regularly if there is no traffic
};
ws.onerror = (e) => {
// actual error
updateIndicator(false, {}, e.message);
ConnectionMonitorState.ws.onerror = (e) => {
updateIndicator(false, {}, e.message); // actual error
setTimeout(() => wsMonitorLoop(url), ConnectionMonitorState.delay);
};
} catch (e) {

View File

@ -66,7 +66,6 @@ def add_http_args(p):
p.add_argument("--share", default=env_flag("SD_SHARE", False), action='store_true', help="Enable UI accessible through Gradio site, default: %(default)s")
p.add_argument("--insecure", default=env_flag("SD_INSECURE", False), action='store_true', help="Enable extensions tab regardless of other options, default: %(default)s")
p.add_argument("--listen", default=env_flag("SD_LISTEN", False), action='store_true', help="Launch web server using public IP address, default: %(default)s")
p.add_argument("--remote", default=env_flag("SD_REMOTE", False), action='store_true', help="Reduce client-server communication, default: %(default)s")
p.add_argument("--port", type=int, default=os.environ.get("SD_PORT", 7860), help="Launch web server with given server port, default: %(default)s")

View File

@ -500,7 +500,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
if not cap.isOpened():
if is_generator:
yield terminate(f'Video open failed: path={inputs}')
return [], '', '', 'Error: video open failed'
return terminate(f'Video open failed: path={inputs}')
frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
w, h = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
@ -513,7 +513,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
except Exception as e:
if is_generator:
yield terminate(f'Video open failed: path={inputs} {e}')
return [], '', '', 'Error: video open failed'
return terminate(f'Video open failed: path={inputs} {e}')
while status:
processed_image = None
@ -535,7 +535,7 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
shared.state.interrupted = False
if is_generator:
yield terminate('Interrupted')
return [], '', '', 'Interrupted'
return terminate('Interrupted')
# get input
if isinstance(input_image, str) and os.path.exists(input_image):
try:
@ -580,9 +580,8 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
and getattr(p, 'init_images', None) is None \
and getattr(p, 'image', None) is None:
if is_generator:
log.debug(f'Control args: {p.task_args}')
yield terminate(f'Mode={p.extra_generation_params.get("Control type", None)} input image is none')
return [], '', '', 'Error: Input image is none'
return terminate(f'Mode={p.extra_generation_params.get("Control type", None)} input image is none')
if unit_type == 'lite':
instance.apply(selected_models, processed_image, control_conditioning)
@ -695,7 +694,6 @@ def control_run(state: str = '', # pylint: disable=keyword-arg-before-vararg
if len(info_txt) > 0:
html_txt = html_txt + infotext_to_html(info_txt[0])
if is_generator:
jobid = shared.state.begin('UI')
yield (output_images, blended_image, html_txt, output_filename)
shared.state.end(jobid)
return (output_images, blended_image, html_txt, output_filename)
else:
return (output_images, blended_image, html_txt, output_filename)

View File

@ -121,7 +121,91 @@ def Blocks_get_config_file(self, *args, **kwargs):
return config
def reset_gradio_sessions(job_id):
from modules import shared
try:
app = shared.demo.app
session_hash = job_id
if session_hash in app.iterators and len(app.iterators[session_hash]) > 0:
async def force_reset():
async with app.lock:
for fn_index in list(app.iterators[session_hash].keys()):
app.iterators[session_hash][fn_index] = None
if session_hash not in app.iterators_to_reset:
app.iterators_to_reset[session_hash] = set()
app.iterators_to_reset[session_hash].add(fn_index)
import asyncio
try:
loop = asyncio.get_event_loop()
if loop.is_running():
asyncio.ensure_future(force_reset()) # noqa: RUF006
else:
loop.run_until_complete(force_reset())
except RuntimeError:
# No event loop, create one
asyncio.run(force_reset())
log.debug(f'Gradio reset: job={job_id} session={session_hash}')
except Exception as e:
log.error(f'Gradio reset: {e}')
def patch_gradio():
orig_cancel_tasks = gradio.utils.cancel_tasks
orig_restore_session_state = gradio.route_utils.restore_session_state
orig_call_prediction = gradio.queueing.Queue.call_prediction
async def wrap_cancel_tasks(task_ids: set[str]):
log.error(f'Gradio cancel: task={task_ids}')
return await orig_cancel_tasks(task_ids)
def wrap_restore_session_state(*args, **kwargs):
app = kwargs.get("app", args[0] if len(args) > 0 else None)
body = kwargs.get("body", args[1] if len(args) > 1 else None)
session_hash = getattr(body, "session_hash", None)
fn_index = getattr(body, "fn_index", None)
try:
return orig_restore_session_state(*args, **kwargs)
except GeneratorExit:
# Force proper iterator cleanup when GeneratorExit occurs
if (
app is not None
and session_hash is not None
and fn_index is not None
and session_hash in app.iterators
and fn_index in app.iterators[session_hash]
):
try:
app.iterators[session_hash][fn_index] = None
app.iterators_to_reset[session_hash].add(fn_index)
log.debug(f"Gradio reset: session={session_hash} fn={fn_index}")
except Exception as e:
log.error(f"Gradio reset: {e}")
raise
async def wrap_call_prediction(self, events, batch):
try:
response = await orig_call_prediction(self, events, batch)
# If the backend returns None/empty during cancellation, frontend stays disabled.
if response is None or response == {}:
log.debug(f"Gradio queue: events={len(events)} batch={batch} empty response")
return {"is_generating": False, "data": [], "error": "empty response"}
return response
except GeneratorExit as e:
log.error(f"Gradio queue: events={len(events)} batch={batch} error: {e}")
return {"is_generating": False, "data": [None, None, None, None, "cancelled", ""], "error": None}
except Exception as e:
log.error(f"Gradio queue: events={len(events)} batch={batch} error: {e}")
raise
except BaseException as e:
log.error(f"Gradio queue: events={len(events)} batch={batch} error: {e}")
raise
gradio.queueing.Queue.call_prediction = wrap_call_prediction
gradio.route_utils.restore_session_state = wrap_restore_session_state
gradio.utils.cancel_tasks = wrap_cancel_tasks
def patch_gradio_future():
def wrap_gradio_js(fn):
def wrapper(*args, js=None, _js=None, **kwargs):
if _js is not None:
@ -150,6 +234,7 @@ def patch_gradio():
gradio.components.Image.edit = lambda *args, **kwargs: None
# gradio.components.image.Image.__init__ missing tool, brush_radius, mask_opacity, edit()
def init():
global hijacked, original_IOComponent_init, original_Block_get_config, original_BlockContext_init, original_Blocks_get_config_file # pylint: disable=global-statement
if hijacked:
@ -161,6 +246,7 @@ def init():
original_Block_get_config = patches.patch(__name__, obj=gr.blocks.Block, field="get_config", replacement=Block_get_config)
original_BlockContext_init = patches.patch(__name__, obj=gr.blocks.BlockContext, field="__init__", replacement=BlockContext_init)
original_Blocks_get_config_file = patches.patch(__name__, obj=gr.blocks.Blocks, field="get_config_file", replacement=Blocks_get_config_file)
patch_gradio()
if not gr.__version__.startswith('3.43'):
patch_gradio()
patch_gradio_future()
hijacked = True

View File

@ -216,7 +216,7 @@ class OffloadHook(accelerate.hooks.ModelHook):
if shared.opts.diffusers_offload_min_gpu_memory > shared.opts.diffusers_offload_max_gpu_memory:
shared.opts.diffusers_offload_min_gpu_memory = shared.opts.diffusers_offload_max_gpu_memory
log.warning(f'Offload: type=balanced op=validate: watermark low={shared.opts.diffusers_offload_min_gpu_memory} reset')
if shared.opts.diffusers_offload_max_gpu_memory * shared.gpu_memory < 4:
if shared.opts.diffusers_offload_max_gpu_memory * shared.gpu_memory < 3:
log.warning(f'Offload: type=balanced op=validate: watermark high={shared.opts.diffusers_offload_max_gpu_memory} low memory')
def model_size(self):

View File

@ -16,6 +16,7 @@ units: list[unit.Unit] = [] # main state variable
controls: list[gr.components.Component] = [] # list of gr controls
debug = log.trace if os.environ.get('SD_CONTROL_DEBUG', None) is not None else lambda *args, **kwargs: None
debug('Trace: CONTROL')
use_generator = os.environ.get('SD_USE_GENERATOR', None) is not None
def return_stats(t: float = None):
@ -89,7 +90,7 @@ def get_units(*values):
break
def generate_click(job_id: str, state: str, active_tab: str, *args):
def generate_click_generator(job_id: str, state: str, active_tab: str, *args):
while helpers.busy:
debug(f'Control: tab="{active_tab}" job={job_id} busy')
time.sleep(0.1)
@ -118,7 +119,7 @@ def generate_click(job_id: str, state: str, active_tab: str, *args):
shared.state.end(jobid)
def generate_click_alt(job_id: str, state: str, active_tab: str, *args):
def generate_click(job_id: str, state: str, active_tab: str, *args):
while helpers.busy:
debug(f'Control: tab="{active_tab}" job={job_id} busy')
time.sleep(0.1)
@ -345,7 +346,8 @@ def create_ui(_blocks: gr.Blocks=None):
result_txt,
output_html_log,
]
generate_fn = generate_click_alt if shared.cmd_opts.remote else generate_click
generate_fn = generate_click_generator if use_generator else generate_click
control_dict = dict(
fn=generate_fn,
_js="submit_control",