From 21244f50c45968a58abfa7cf486f004a3a4b2890 Mon Sep 17 00:00:00 2001 From: berkybear Date: Sun, 18 Sep 2022 14:37:31 -0700 Subject: [PATCH 1/3] fix some typos and remove unused import --- modules/shared.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index ac5bf4667..88879a645 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -4,7 +4,6 @@ import json import os import gradio as gr -import torch import tqdm import modules.artists @@ -38,7 +37,7 @@ parser.add_argument("--share", action='store_true', help="use share=True for gra parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN')) parser.add_argument("--opt-split-attention", action='store_true', help="does not do anything") parser.add_argument("--disable-opt-split-attention", action='store_true', help="disable an optimization that reduces vram usage by a lot") -parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consaumes all the VRAM it can find") +parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consumes all the VRAM it can find") parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) @@ -135,7 +134,7 @@ class Options: "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), "add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normaly you'd do less with less denoising)."), + "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), "font": OptionInfo("", "Font for image grids that have text"), "enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"), From 1878994f9b3424495740c130c60670cbafcac034 Mon Sep 17 00:00:00 2001 From: berkybear Date: Sun, 18 Sep 2022 14:39:41 -0700 Subject: [PATCH 2/3] fix grammer --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 88879a645..c2c3c7ee0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -37,7 +37,7 @@ parser.add_argument("--share", action='store_true', help="use share=True for gra parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN')) parser.add_argument("--opt-split-attention", action='store_true', help="does not do anything") parser.add_argument("--disable-opt-split-attention", action='store_true', help="disable an optimization that reduces vram usage by a lot") -parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consumes all the VRAM it can find") +parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) From 5a759687a245cea5fa1fa9fd34868810df0fcb8f Mon Sep 17 00:00:00 2001 From: EyeDeck Date: Sun, 18 Sep 2022 17:10:32 -0400 Subject: [PATCH 3/3] Move memmon tooltip to hints.js Move memmon tooltip to hints.js so it's with the other tooltips, and doesn't have to be re-sent from the server every time. Also, allowed tooltips to be applied by matching a class name if all else fails. --- javascript/hints.js | 13 ++++++++++++- modules/ui.py | 5 +---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 6d5ffc010..20a71dbde 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -70,17 +70,28 @@ titles = { "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.", "Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.", + + "vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).", } onUiUpdate(function(){ - gradioApp().querySelectorAll('span, button, select').forEach(function(span){ + gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){ tooltip = titles[span.textContent]; if(!tooltip){ tooltip = titles[span.value]; } + if(!tooltip){ + for (const c of span.classList) { + if (c in titles) { + tooltip = titles[c]; + break; + } + } + } + if(tooltip){ span.title = tooltip; } diff --git a/modules/ui.py b/modules/ui.py index 0303e0579..3a0c6ffb3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -151,11 +151,8 @@ def wrap_gradio_call(func): sys_peak = mem_stats['system_peak'] sys_total = mem_stats['total'] sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2) - vram_tooltip = "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data. " \ - "Torch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data. " \ - "Sys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%)." - vram_html = f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" + vram_html = f"

Torch active/reserved: {active_peak}/{reserved_peak} MiB, Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)

" else: vram_html = ''