Add safety check for total ram available

pull/3819/head
Disty0 2025-03-14 13:48:31 +03:00
parent cd9cb51a05
commit 75ff932e79
2 changed files with 4 additions and 1 deletions

View File

@ -13,6 +13,7 @@
- fix untyped_storage, torch.eye and torch.cuda.device ops
- fix torch 2.7 compatibility
- fix performance with balanced offload
- fix triton and torch.compile
## Update for 2025-02-28

View File

@ -88,7 +88,9 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False, name=None):
gpu += f" | retries {retries} oom {ooms}" if retries > 0 or ooms > 0 else ''
ram = shared.ram_stats()
if ram['used'] > 0:
cpu += f"| RAM {ram['used']} GB {round(100.0 * ram['used'] / ram['total'])}%"
cpu += f"| RAM {ram['used']} GB"
if ram['total'] > 0:
cpu += f" {round(100.0 * ram['used'] / ram['total'])}%"
if isinstance(res, list):
res[-1] += f"<div class='performance'><p>Time: {elapsed_text} | {summary} {gpu} {cpu}</p></div>"
return tuple(res)