diff --git a/CHANGELOG.md b/CHANGELOG.md index 1cd3572c8..2c72359fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +## 1.6.1 + +### Bug Fixes: + * fix an error causing the webui to fail to start ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839)) + ## 1.6.0 ### Features: diff --git a/README.md b/README.md index c7a4e363f..25ba070e7 100644 --- a/README.md +++ b/README.md @@ -91,6 +91,7 @@ A browser interface based on Gradio library for Stable Diffusion. - Eased resolution restriction: generated image's dimensions must be a multiple of 8 rather than 64 - Now with a license! - Reorder elements in the UI from settings screen +- [Segmind Stable Diffusion](https://huggingface.co/segmind/SSD-1B) support ## Installation and Running Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for: diff --git a/extensions-builtin/mobile/javascript/mobile.js b/extensions-builtin/mobile/javascript/mobile.js index 652f07ac7..bff1acedf 100644 --- a/extensions-builtin/mobile/javascript/mobile.js +++ b/extensions-builtin/mobile/javascript/mobile.js @@ -12,6 +12,8 @@ function isMobile() { } function reportWindowSize() { + if (gradioApp().querySelector('.toprow-compact-tools')) return; // not applicable for compact prompt layout + var currentlyMobile = isMobile(); if (currentlyMobile == isSetupForMobile) return; isSetupForMobile = currentlyMobile; diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 044641006..688c2f112 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -28,7 +28,7 @@ function keyupEditAttention(event) { if (afterParen == -1) return false; let afterOpeningParen = after.indexOf(OPEN); - if (afterOpeningParen != -1 && afterOpeningParen < beforeParen) return false; + if (afterOpeningParen != -1 && afterOpeningParen < afterParen) return false; // Set the selection to the text between the parenthesis const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index ac26718f6..a1bf29a8c 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -26,8 +26,9 @@ function setupExtraNetworksForTab(tabname) { var refresh = gradioApp().getElementById(tabname + '_extra_refresh'); var showDirsDiv = gradioApp().getElementById(tabname + '_extra_show_dirs'); var showDirs = gradioApp().querySelector('#' + tabname + '_extra_show_dirs input'); + var promptContainer = gradioApp().querySelector('.prompt-container-compact#' + tabname + '_prompt_container'); + var negativePrompt = gradioApp().querySelector('#' + tabname + '_neg_prompt'); - sort.dataset.sortkey = 'sortDefault'; tabs.appendChild(searchDiv); tabs.appendChild(sort); tabs.appendChild(sortOrder); @@ -49,20 +50,23 @@ function setupExtraNetworksForTab(tabname) { elem.style.display = visible ? "" : "none"; }); + + applySort(); }; var applySort = function() { + var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card'); + var reverse = sortOrder.classList.contains("sortReverse"); - var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim(); - sortKey = sortKey ? "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1) : ""; - var sortKeyStore = sortKey ? sortKey + (reverse ? "Reverse" : "") : ""; - if (!sortKey || sortKeyStore == sort.dataset.sortkey) { + var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim() || "name"; + sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1); + var sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length; + + if (sortKeyStore == sort.dataset.sortkey) { return; } - sort.dataset.sortkey = sortKeyStore; - var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card'); cards.forEach(function(card) { card.originalParentElement = card.parentElement; }); @@ -88,15 +92,13 @@ function setupExtraNetworksForTab(tabname) { }; search.addEventListener("input", applyFilter); - applyFilter(); - ["change", "blur", "click"].forEach(function(evt) { - sort.querySelector("input").addEventListener(evt, applySort); - }); sortOrder.addEventListener("click", function() { sortOrder.classList.toggle("sortReverse"); applySort(); }); + applyFilter(); + extraNetworksApplySort[tabname] = applySort; extraNetworksApplyFilter[tabname] = applyFilter; var showDirsUpdate = function() { @@ -109,11 +111,47 @@ function setupExtraNetworksForTab(tabname) { showDirsUpdate(); } +function extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt) { + if (!gradioApp().querySelector('.toprow-compact-tools')) return; // only applicable for compact prompt layout + + var promptContainer = gradioApp().getElementById(tabname + '_prompt_container'); + var prompt = gradioApp().getElementById(tabname + '_prompt_row'); + var negPrompt = gradioApp().getElementById(tabname + '_neg_prompt_row'); + var elem = id ? gradioApp().getElementById(id) : null; + + if (showNegativePrompt && elem) { + elem.insertBefore(negPrompt, elem.firstChild); + } else { + promptContainer.insertBefore(negPrompt, promptContainer.firstChild); + } + + if (showPrompt && elem) { + elem.insertBefore(prompt, elem.firstChild); + } else { + promptContainer.insertBefore(prompt, promptContainer.firstChild); + } +} + + +function extraNetworksUrelatedTabSelected(tabname) { // called from python when user selects an unrelated tab (generate) + extraNetworksMovePromptToTab(tabname, '', false, false); +} + +function extraNetworksTabSelected(tabname, id, showPrompt, showNegativePrompt) { // called from python when user selects an extra networks tab + extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt); + +} + function applyExtraNetworkFilter(tabname) { setTimeout(extraNetworksApplyFilter[tabname], 1); } +function applyExtraNetworkSort(tabname) { + setTimeout(extraNetworksApplySort[tabname], 1); +} + var extraNetworksApplyFilter = {}; +var extraNetworksApplySort = {}; var activePromptTextarea = {}; function setupExtraNetworks() { diff --git a/javascript/inputAccordion.js b/javascript/inputAccordion.js index f2839852e..7570309aa 100644 --- a/javascript/inputAccordion.js +++ b/javascript/inputAccordion.js @@ -1,37 +1,68 @@ -var observerAccordionOpen = new MutationObserver(function(mutations) { - mutations.forEach(function(mutationRecord) { - var elem = mutationRecord.target; - var open = elem.classList.contains('open'); - - var accordion = elem.parentNode; - accordion.classList.toggle('input-accordion-open', open); - - var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); - checkbox.checked = open; - updateInput(checkbox); - - var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); - if (extra) { - extra.style.display = open ? "" : "none"; - } - }); -}); - function inputAccordionChecked(id, checked) { - var label = gradioApp().querySelector('#' + id + " .label-wrap"); - if (label.classList.contains('open') != checked) { - label.click(); + var accordion = gradioApp().getElementById(id); + accordion.visibleCheckbox.checked = checked; + accordion.onVisibleCheckboxChange(); +} + +function setupAccordion(accordion) { + var labelWrap = accordion.querySelector('.label-wrap'); + var gradioCheckbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input"); + var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); + var span = labelWrap.querySelector('span'); + var linked = true; + + var isOpen = function() { + return labelWrap.classList.contains('open'); + }; + + var observerAccordionOpen = new MutationObserver(function(mutations) { + mutations.forEach(function(mutationRecord) { + accordion.classList.toggle('input-accordion-open', isOpen()); + + if (linked) { + accordion.visibleCheckbox.checked = isOpen(); + accordion.onVisibleCheckboxChange(); + } + }); + }); + observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); + + if (extra) { + labelWrap.insertBefore(extra, labelWrap.lastElementChild); } + + accordion.onChecked = function(checked) { + if (isOpen() != checked) { + labelWrap.click(); + } + }; + + var visibleCheckbox = document.createElement('INPUT'); + visibleCheckbox.type = 'checkbox'; + visibleCheckbox.checked = isOpen(); + visibleCheckbox.id = accordion.id + "-visible-checkbox"; + visibleCheckbox.className = gradioCheckbox.className + " input-accordion-checkbox"; + span.insertBefore(visibleCheckbox, span.firstChild); + + accordion.visibleCheckbox = visibleCheckbox; + accordion.onVisibleCheckboxChange = function() { + if (linked && isOpen() != visibleCheckbox.checked) { + labelWrap.click(); + } + + gradioCheckbox.checked = visibleCheckbox.checked; + updateInput(gradioCheckbox); + }; + + visibleCheckbox.addEventListener('click', function(event) { + linked = false; + event.stopPropagation(); + }); + visibleCheckbox.addEventListener('input', accordion.onVisibleCheckboxChange); } onUiLoaded(function() { for (var accordion of gradioApp().querySelectorAll('.input-accordion')) { - var labelWrap = accordion.querySelector('.label-wrap'); - observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']}); - - var extra = gradioApp().querySelector('#' + accordion.id + "-extra"); - if (extra) { - labelWrap.insertBefore(extra, labelWrap.lastElementChild); - } + setupAccordion(accordion); } }); diff --git a/javascript/notification.js b/javascript/notification.js index 6d7995612..3ee972ae1 100644 --- a/javascript/notification.js +++ b/javascript/notification.js @@ -26,7 +26,11 @@ onAfterUiUpdate(function() { lastHeadImg = headImg; // play notification sound if available - gradioApp().querySelector('#audio_notification audio')?.play(); + const notificationAudio = gradioApp().querySelector('#audio_notification audio'); + if (notificationAudio) { + notificationAudio.volume = opts.notification_volume / 100.0 || 1.0; + notificationAudio.play(); + } if (document.hasFocus()) return; diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 4e602a842..a9fb9bfa3 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -107,7 +107,7 @@ parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, req parser.add_argument("--disable-tls-verify", action="store_false", help="When passed, enables the use of self-signed certificates.", default=None) parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None) parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True) -parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions") +parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the default in earlier versions") parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index 8e0f13bdc..01d668ecd 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -9,6 +9,7 @@ from modules import paths, shared, devices, modelloader, errors model_dir = "GFPGAN" user_path = None model_path = os.path.join(paths.models_path, model_dir) +model_file_path = None model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" have_gfpgan = False loaded_gfpgan_model = None @@ -17,6 +18,7 @@ loaded_gfpgan_model = None def gfpgann(): global loaded_gfpgan_model global model_path + global model_file_path if loaded_gfpgan_model is not None: loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan) return loaded_gfpgan_model @@ -24,17 +26,24 @@ def gfpgann(): if gfpgan_constructor is None: return None - models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN") + models = modelloader.load_models(model_path, model_url, user_path, ext_filter=['.pth']) + if len(models) == 1 and models[0].startswith("http"): model_file = models[0] elif len(models) != 0: - latest_file = max(models, key=os.path.getctime) + gfp_models = [] + for item in models: + if 'GFPGAN' in os.path.basename(item): + gfp_models.append(item) + latest_file = max(gfp_models, key=os.path.getctime) model_file = latest_file else: print("Unable to load gfpgan model!") return None + if hasattr(facexlib.detection.retinaface, 'device'): facexlib.detection.retinaface.device = devices.device_gfpgan + model_file_path = model_file model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan) loaded_gfpgan_model = model @@ -77,19 +86,25 @@ def setup_model(dirname): global user_path global have_gfpgan global gfpgan_constructor + global model_file_path + + facexlib_path = model_path + + if dirname is not None: + facexlib_path = dirname load_file_from_url_orig = gfpgan.utils.load_file_from_url facex_load_file_from_url_orig = facexlib.detection.load_file_from_url facex_load_file_from_url_orig2 = facexlib.parsing.load_file_from_url def my_load_file_from_url(**kwargs): - return load_file_from_url_orig(**dict(kwargs, model_dir=model_path)) + return load_file_from_url_orig(**dict(kwargs, model_dir=model_file_path)) def facex_load_file_from_url(**kwargs): - return facex_load_file_from_url_orig(**dict(kwargs, save_dir=model_path, model_dir=None)) + return facex_load_file_from_url_orig(**dict(kwargs, save_dir=facexlib_path, model_dir=None)) def facex_load_file_from_url2(**kwargs): - return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=model_path, model_dir=None)) + return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=facexlib_path, model_dir=None)) gfpgan.utils.load_file_from_url = my_load_file_from_url facexlib.detection.load_file_from_url = facex_load_file_from_url diff --git a/modules/processing.py b/modules/processing.py index 40598f5cf..b0e240a46 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -296,7 +296,7 @@ class StableDiffusionProcessing: return conditioning def edit_image_conditioning(self, source_image): - conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method)) + conditioning_image = shared.sd_model.encode_first_stage(source_image).mode() return conditioning_image @@ -886,6 +886,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() + state.nextjob() + if p.scripts is not None: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) @@ -958,8 +960,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - state.nextjob() - if not infotexts: infotexts.append(Processed(p, []).infotext(p, 0)) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index ddf4d2dd4..cba134554 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -4,7 +4,7 @@ import re from collections import namedtuple import lark -# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" +# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][: in background:0.25] [shoddy:masterful:0.5]" # will be represented with prompt_schedule like this (assuming steps=100): # [25, 'fantasy landscape with a mountain and an oak in foreground shoddy'] # [50, 'fantasy landscape with a lake and an oak in foreground in background shoddy'] diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index bc5fbcd3f..0157e19f0 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -184,6 +184,20 @@ class StableDiffusionModelHijack: errors.display(e, "applying cross attention optimization") undo_optimizations() + def convert_sdxl_to_ssd(self, m): + """Converts an SDXL model to a Segmind Stable Diffusion model (see https://huggingface.co/segmind/SSD-1B)""" + + delattr(m.model.diffusion_model.middle_block, '1') + delattr(m.model.diffusion_model.middle_block, '2') + for i in ['9', '8', '7', '6', '5', '4']: + delattr(m.model.diffusion_model.input_blocks[7][1].transformer_blocks, i) + delattr(m.model.diffusion_model.input_blocks[8][1].transformer_blocks, i) + delattr(m.model.diffusion_model.output_blocks[0][1].transformer_blocks, i) + delattr(m.model.diffusion_model.output_blocks[1][1].transformer_blocks, i) + delattr(m.model.diffusion_model.output_blocks[4][1].transformer_blocks, '1') + delattr(m.model.diffusion_model.output_blocks[5][1].transformer_blocks, '1') + devices.torch_gc() + def hijack(self, m): conditioner = getattr(m, 'conditioner', None) if conditioner: @@ -242,8 +256,12 @@ class StableDiffusionModelHijack: self.layers = flatten(m) + import modules.models.diffusion.ddpm_edit + if isinstance(m, ldm.models.diffusion.ddpm.LatentDiffusion): sd_unet.original_forward = ldm_original_forward + elif isinstance(m, modules.models.diffusion.ddpm_edit.LatentDiffusion): + sd_unet.original_forward = ldm_original_forward elif isinstance(m, sgm.models.diffusion.DiffusionEngine): sd_unet.original_forward = sgm_original_forward else: diff --git a/modules/sd_models.py b/modules/sd_models.py index 3b6cdea18..841402e86 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -352,10 +352,13 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer model.is_sdxl = hasattr(model, 'conditioner') model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model') model.is_sd1 = not model.is_sdxl and not model.is_sd2 - + model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys() if model.is_sdxl: sd_models_xl.extend_sdxl(model) + if model.is_ssd: + sd_hijack.model_hijack.convert_sdxl_to_ssd(model) + if shared.opts.sd_checkpoint_cache > 0: # cache newly loaded model checkpoints_loaded[checkpoint_info] = state_dict.copy() diff --git a/modules/sd_models_types.py b/modules/sd_models_types.py index 5ffd2f4f9..f911fbb68 100644 --- a/modules/sd_models_types.py +++ b/modules/sd_models_types.py @@ -22,7 +22,10 @@ class WebuiSdModel(LatentDiffusion): """structure with additional information about the file with model's weights""" is_sdxl: bool - """True if the model's architecture is SDXL""" + """True if the model's architecture is SDXL or SSD""" + + is_ssd: bool + """True if the model is SSD""" is_sd2: bool """True if the model's architecture is SD 2.x""" diff --git a/modules/shared_items.py b/modules/shared_items.py index b1459f8c4..5024b4268 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -67,6 +67,8 @@ def reload_hypernetworks(): ui_reorder_categories_builtin_items = [ + "prompt", + "image", "inpaint", "sampler", "accordions", diff --git a/modules/shared_options.py b/modules/shared_options.py index 5b07dd041..0f9cb08df 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -64,6 +64,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."), "notification_audio": OptionInfo(True, "Play notification sound after image generation").info("notification.mp3 should be present in the root directory").needs_reload_ui(), + "notification_volume": OptionInfo(100, "Notification sound volume", gr.Slider, {"minimum": 0, "maximum": 100, "step": 1}).info("in %"), })) options_templates.update(options_section(('saving-paths', "Paths for saving"), { @@ -234,6 +235,8 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), + "extra_networks_card_order_field": OptionInfo("Name", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), + "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), @@ -272,6 +275,7 @@ options_templates.update(options_section(('ui', "User interface"), { "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), "txt2img_settings_accordion": OptionInfo(False, "Settings in txt2img hidden under Accordion").needs_reload_ui(), "img2img_settings_accordion": OptionInfo(False, "Settings in img2img hidden under Accordion").needs_reload_ui(), + "compact_prompt_box": OptionInfo(False, "Compact prompt layout").info("puts prompt and negative prompt inside the Generate tab, leaving more vertical space for the image on the right").needs_reload_ui(), })) diff --git a/modules/ui.py b/modules/ui.py index d05b9f55d..4a3e60d13 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,7 +12,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import gradio_extensons # noqa: F401 -from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks +from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, scripts, sd_samplers, processing, ui_extra_networks, ui_toprow from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion, ResizeHandleRow from modules.paths import script_path from modules.ui_common import create_refresh_button @@ -25,7 +25,6 @@ import modules.hypernetworks.ui as hypernetworks_ui import modules.textual_inversion.ui as textual_inversion_ui import modules.textual_inversion.textual_inversion as textual_inversion import modules.shared as shared -import modules.images from modules import prompt_parser from modules.sd_hijack import model_hijack from modules.generation_parameters_copypaste import image_from_url_text @@ -177,79 +176,6 @@ def update_negative_prompt_token_counter(text, steps): return update_token_counter(text, steps, is_positive=False) -class Toprow: - """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation""" - - def __init__(self, is_img2img): - id_part = "img2img" if is_img2img else "txt2img" - self.id_part = id_part - - with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): - with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6): - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - self.prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - self.prompt_img = gr.File(label="", elem_id=f"{id_part}_prompt_image", file_count="single", type="binary", visible=False) - - with gr.Row(): - with gr.Column(scale=80): - with gr.Row(): - self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) - - self.button_interrogate = None - self.button_deepbooru = None - if is_img2img: - with gr.Column(scale=1, elem_classes="interrogate-col"): - self.button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - self.button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") - - with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): - with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"): - self.interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") - self.skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") - self.submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - - self.skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - - self.interrupt.click( - fn=lambda: shared.state.interrupt(), - inputs=[], - outputs=[], - ) - - with gr.Row(elem_id=f"{id_part}_tools"): - self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.") - self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt", tooltip="Clear prompt") - self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{id_part}_style_apply", tooltip="Apply all selected styles to prompts.") - self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False, tooltip="Restore progress") - - self.token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) - self.token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"]) - self.negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") - - self.clear_prompt_button.click( - fn=lambda *x: x, - _js="confirm_clear_prompt", - inputs=[self.prompt, self.negative_prompt], - outputs=[self.prompt, self.negative_prompt], - ) - - self.ui_styles = ui_prompt_styles.UiPromptStyles(id_part, self.prompt, self.negative_prompt) - self.ui_styles.setup_apply_button(self.apply_styles) - - self.prompt_img.change( - fn=modules.images.image_data, - inputs=[self.prompt_img], - outputs=[self.prompt, self.prompt_img], - show_progress=False, - ) - def setup_progressbar(*args, **kwargs): pass @@ -288,8 +214,8 @@ def apply_setting(key, value): return getattr(opts, key) -def create_output_panel(tabname, outdir): - return ui_common.create_output_panel(tabname, outdir) +def create_output_panel(tabname, outdir, toprow=None): + return ui_common.create_output_panel(tabname, outdir, toprow) def create_sampler_and_steps_selection(choices, tabname): @@ -336,7 +262,7 @@ def create_ui(): scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - toprow = Toprow(is_img2img=False) + toprow = ui_toprow.Toprow(is_img2img=False, is_compact=shared.opts.compact_prompt_box) dummy_component = gr.Label(visible=False) @@ -349,6 +275,9 @@ def create_ui(): scripts.scripts_txt2img.prepare_ui() for category in ordered_ui_categories(): + if category == "prompt": + toprow.create_inline_toprow_prompts() + if category == "sampler": steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img") @@ -443,7 +372,7 @@ def create_ui(): show_progress=False, ) - txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples, toprow) txt2img_args = dict( fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), @@ -555,7 +484,7 @@ def create_ui(): scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - toprow = Toprow(is_img2img=True) + toprow = ui_toprow.Toprow(is_img2img=True, is_compact=shared.opts.compact_prompt_box) extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs") extra_tabs.__enter__() @@ -579,78 +508,131 @@ def create_ui(): button = gr.Button(title) copy_image_buttons.append((button, name, elem)) - with gr.Tabs(elem_id="mode_img2img"): - img2img_selected_tab = gr.State(0) + scripts.scripts_img2img.prepare_ui() - with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: - init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) - add_copy_image_controls('img2img', init_img) + for category in ordered_ui_categories(): + if category == "prompt": + toprow.create_inline_toprow_prompts() - with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: - sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) - add_copy_image_controls('sketch', sketch) + if category == "image": + with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) - with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: - init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) - add_copy_image_controls('inpaint', init_img_with_mask) + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA", height=opts.img2img_editor_height) + add_copy_image_controls('img2img', init_img) - with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: - inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) - inpaint_color_sketch_orig = gr.State(None) - add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) + with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch: + sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color) + add_copy_image_controls('sketch', sketch) - def update_orig(image, state): - if image is not None: - same_size = state is not None and state.size == image.size - has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) - edited = same_size and has_exact_match - return image if not edited or state is None else state + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint: + init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color) + add_copy_image_controls('inpaint', init_img_with_mask) - inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) + with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color: + inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGB", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color) + inpaint_color_sketch_orig = gr.State(None) + add_copy_image_controls('inpaint_sketch', inpaint_color_sketch) - with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: - init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") - init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") + def update_orig(image, state): + if image is not None: + same_size = state is not None and state.size == image.size + has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1)) + edited = same_size and has_exact_match + return image if not edited or state is None else state - with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") - with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") - img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") - img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") + inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig) - img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload: + init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base") + init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") - for i, tab in enumerate(img2img_tabs): - tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Accordion("PNG info", open=False): + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") + img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") - def copy_image(img): - if isinstance(img, dict) and 'image' in img: - return img['image'] + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - return img + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) - for button, name, elem in copy_image_buttons: - button.click( - fn=copy_image, - inputs=[elem], - outputs=[copy_image_destinations[name]], - ) - button.click( - fn=lambda: None, - _js=f"switch_to_{name.replace(' ', '_')}", - inputs=[], - outputs=[], - ) + def copy_image(img): + if isinstance(img, dict) and 'image' in img: + return img['image'] + + return img + + for button, name, elem in copy_image_buttons: + button.click( + fn=copy_image, + inputs=[elem], + outputs=[copy_image_destinations[name]], + ) + button.click( + fn=lambda: None, + _js=f"switch_to_{name.replace(' ', '_')}", + inputs=[], + outputs=[], + ) + + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + + if category == "sampler": + steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img") + + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to", elem_id="img2img_tab_resize_to") as tab_scale_to: + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn", tooltip="Switch width/height") + detect_image_size_btn = ToolButton(value=detect_image_size_symbol, elem_id="img2img_detect_image_size_btn", tooltip="Auto detect size from img2img") + + with gr.Tab(label="Resize by", elem_id="img2img_tab_resize_by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") + + on_change_args = dict( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) with FormRow(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") @@ -758,20 +740,19 @@ def create_ui(): with gr.Column(scale=4): inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - def select_img2img_tab(tab): - return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - - for i, elem in enumerate(img2img_tabs): - elem.select( - fn=lambda tab=i: select_img2img_tab(tab), - inputs=[], - outputs=[inpaint_controls, mask_alpha], - ) - if category not in {"accordions"}: scripts.scripts_img2img.setup_ui_for_section(category) + def select_img2img_tab(tab): + return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) + for i, elem in enumerate(img2img_tabs): + elem.select( + fn=lambda tab=i: select_img2img_tab(tab), + inputs=[], + outputs=[inpaint_controls, mask_alpha], + ) + + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples, toprow) img2img_args = dict( fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), diff --git a/modules/ui_common.py b/modules/ui_common.py index 84a7d7f27..032ec4af7 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -104,7 +104,7 @@ def save_files(js_data, images, do_make_zip, index): return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}") -def create_output_panel(tabname, outdir): +def create_output_panel(tabname, outdir, toprow=None): def open_folder(f): if not os.path.exists(f): @@ -130,12 +130,15 @@ Requested path was: {f} else: sp.Popen(["xdg-open", path]) - with gr.Column(variant='panel', elem_id=f"{tabname}_results"): - with gr.Group(elem_id=f"{tabname}_gallery_container"): - result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None) + with gr.Column(elem_id=f"{tabname}_results"): + if toprow: + toprow.create_inline_toprow_image() - generation_info = None - with gr.Column(): + with gr.Column(variant='panel', elem_id=f"{tabname}_results_panel"): + with gr.Group(elem_id=f"{tabname}_gallery_container"): + result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery", columns=4, preview=True, height=shared.opts.gallery_height or None) + + generation_info = None with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"): open_folder_button = ToolButton(folder_symbol, elem_id=f'{tabname}_open_folder', visible=not shared.cmd_opts.hide_ui_dir_config, tooltip="Open images output directory.") diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 59d6ecc61..7907cd63f 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -103,6 +103,7 @@ class ExtraNetworksPage: self.name = title.lower() self.id_page = self.name.replace(" ", "_") self.card_page = shared.html("extra-networks-card.html") + self.allow_prompt = True self.allow_negative_prompt = False self.metadata = {} self.items = {} @@ -367,7 +368,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): related_tabs = [] for page in ui.stored_extra_pages: - with gr.Tab(page.title, id=page.id_page) as tab: + with gr.Tab(page.title, elem_id=f"{tabname}_{page.id_page}", elem_classes=["extra-page"]) as tab: elem_id = f"{tabname}_{page.id_page}_cards_html" page_elem = gr.HTML('Loading...', elem_id=elem_id) ui.pages.append(page_elem) @@ -381,19 +382,28 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): related_tabs.append(tab) edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True) - dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") - button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False, tooltip="Invert sort order") + dropdown_sort = gr.Dropdown(choices=['Name', 'Date Created', 'Date Modified', ], value=shared.opts.extra_networks_card_order_field, elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order") + button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes=["sortorder"] + ([] if shared.opts.extra_networks_card_order == "Ascending" else ["sortReverse"]), visible=False, tooltip="Invert sort order") button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False) checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False) ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False) ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False) - for tab in unrelated_tabs: - tab.select(fn=lambda: [gr.update(visible=False) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False) + tab_controls = [edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs] - for tab in related_tabs: - tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False) + for tab in unrelated_tabs: + tab.select(fn=lambda: [gr.update(visible=False) for _ in tab_controls], _js='function(){ extraNetworksUrelatedTabSelected("' + tabname + '"); }', inputs=[], outputs=tab_controls, show_progress=False) + + for page, tab in zip(ui.stored_extra_pages, related_tabs): + allow_prompt = "true" if page.allow_prompt else "false" + allow_negative_prompt = "true" if page.allow_negative_prompt else "false" + + jscode = 'extraNetworksTabSelected("' + tabname + '", "' + f"{tabname}_{page.id_page}" + '", ' + allow_prompt + ', ' + allow_negative_prompt + ');' + + tab.select(fn=lambda: [gr.update(visible=True) for _ in tab_controls], _js='function(){ ' + jscode + ' }', inputs=[], outputs=tab_controls, show_progress=False) + + dropdown_sort.change(fn=lambda: None, _js="function(){ applyExtraNetworkSort('" + tabname + "'); }") def pages_html(): if not ui.pages_contents: diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index ca6c26076..2fc0ed43d 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -10,6 +10,8 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def __init__(self): super().__init__('Checkpoints') + self.allow_prompt = False + def refresh(self): shared.refresh_checkpoints() diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py new file mode 100644 index 000000000..985b5a2dd --- /dev/null +++ b/modules/ui_toprow.py @@ -0,0 +1,141 @@ +import gradio as gr + +from modules import shared, ui_prompt_styles +import modules.images + +from modules.ui_components import ToolButton + + +class Toprow: + """Creates a top row UI with prompts, generate button, styles, extra little buttons for things, and enables some functionality related to their operation""" + + prompt = None + prompt_img = None + negative_prompt = None + + button_interrogate = None + button_deepbooru = None + + interrupt = None + skip = None + submit = None + + paste = None + clear_prompt_button = None + apply_styles = None + restore_progress_button = None + + token_counter = None + token_button = None + negative_token_counter = None + negative_token_button = None + + ui_styles = None + + submit_box = None + + def __init__(self, is_img2img, is_compact=False): + id_part = "img2img" if is_img2img else "txt2img" + self.id_part = id_part + self.is_img2img = is_img2img + self.is_compact = is_compact + + if not is_compact: + with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"): + self.create_classic_toprow() + else: + self.create_submit_box() + + def create_classic_toprow(self): + self.create_prompts() + + with gr.Column(scale=1, elem_id=f"{self.id_part}_actions_column"): + self.create_submit_box() + + self.create_tools_row() + + self.create_styles_ui() + + def create_inline_toprow_prompts(self): + if not self.is_compact: + return + + self.create_prompts() + + with gr.Row(elem_classes=["toprow-compact-stylerow"]): + with gr.Column(elem_classes=["toprow-compact-tools"]): + self.create_tools_row() + with gr.Column(): + self.create_styles_ui() + + def create_inline_toprow_image(self): + if not self.is_compact: + return + + self.submit_box.render() + + def create_prompts(self): + with gr.Column(elem_id=f"{self.id_part}_prompt_container", elem_classes=["prompt-container-compact"] if self.is_compact else [], scale=6): + with gr.Row(elem_id=f"{self.id_part}_prompt_row", elem_classes=["prompt-row"]): + self.prompt = gr.Textbox(label="Prompt", elem_id=f"{self.id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + self.prompt_img = gr.File(label="", elem_id=f"{self.id_part}_prompt_image", file_count="single", type="binary", visible=False) + + with gr.Row(elem_id=f"{self.id_part}_neg_prompt_row", elem_classes=["prompt-row"]): + self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{self.id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"]) + + self.prompt_img.change( + fn=modules.images.image_data, + inputs=[self.prompt_img], + outputs=[self.prompt, self.prompt_img], + show_progress=False, + ) + + def create_submit_box(self): + with gr.Row(elem_id=f"{self.id_part}_generate_box", elem_classes=["generate-box"] + (["generate-box-compact"] if self.is_compact else []), render=not self.is_compact) as submit_box: + self.submit_box = submit_box + + self.interrupt = gr.Button('Interrupt', elem_id=f"{self.id_part}_interrupt", elem_classes="generate-box-interrupt") + self.skip = gr.Button('Skip', elem_id=f"{self.id_part}_skip", elem_classes="generate-box-skip") + self.submit = gr.Button('Generate', elem_id=f"{self.id_part}_generate", variant='primary') + + self.skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) + + self.interrupt.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + + def create_tools_row(self): + with gr.Row(elem_id=f"{self.id_part}_tools"): + from modules.ui import paste_symbol, clear_prompt_symbol, restore_progress_symbol + + self.paste = ToolButton(value=paste_symbol, elem_id="paste", tooltip="Read generation parameters from prompt or last generation if prompt is empty into user interface.") + self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{self.id_part}_clear_prompt", tooltip="Clear prompt") + self.apply_styles = ToolButton(value=ui_prompt_styles.styles_materialize_symbol, elem_id=f"{self.id_part}_style_apply", tooltip="Apply all selected styles to prompts.") + + if self.is_img2img: + self.button_interrogate = ToolButton('📎', tooltip='Interrogate CLIP - use CLIP neural network to create a text describing the image, and put it into the prompt field', elem_id="interrogate") + self.button_deepbooru = ToolButton('📦', tooltip='Interrogate DeepBooru - use DeepBooru neural network to create a text describing the image, and put it into the prompt field', elem_id="deepbooru") + + self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{self.id_part}_restore_progress", visible=False, tooltip="Restore progress") + + self.token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_token_counter", elem_classes=["token-counter"]) + self.token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_token_button") + self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_negative_token_counter", elem_classes=["token-counter"]) + self.negative_token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_negative_token_button") + + self.clear_prompt_button.click( + fn=lambda *x: x, + _js="confirm_clear_prompt", + inputs=[self.prompt, self.negative_prompt], + outputs=[self.prompt, self.negative_prompt], + ) + + def create_styles_ui(self): + self.ui_styles = ui_prompt_styles.UiPromptStyles(self.id_part, self.prompt, self.negative_prompt) + self.ui_styles.setup_apply_button(self.apply_styles) diff --git a/requirements_versions.txt b/requirements_versions.txt index 7d27f2be3..cb7403a9d 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -29,3 +29,4 @@ torch torchdiffeq==0.2.3 torchsde==0.2.6 transformers==4.30.2 +httpx==0.24.1 diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index ca73b2a56..a4a2f24dd 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -114,6 +114,7 @@ class Script(scripts.Script): def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate")) checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch")) + prompt_position = gr.Radio(["start", "end"], label="Insert prompts at the", elem_id=self.elem_id("prompt_position"), value="start") prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=self.elem_id("prompt_txt")) file = gr.File(label="Upload prompt inputs", type='binary', elem_id=self.elem_id("file")) @@ -124,9 +125,9 @@ class Script(scripts.Script): # We don't shrink back to 1, because that causes the control to ignore [enter], and it may # be unclear to the user that shift-enter is needed. prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt], show_progress=False) - return [checkbox_iterate, checkbox_iterate_batch, prompt_txt] + return [checkbox_iterate, checkbox_iterate_batch, prompt_position, prompt_txt] - def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str): + def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_position, prompt_txt: str): lines = [x for x in (x.strip() for x in prompt_txt.splitlines()) if x] p.do_not_save_grid = True @@ -167,6 +168,18 @@ class Script(scripts.Script): else: setattr(copy_p, k, v) + if args.get("prompt") and p.prompt: + if prompt_position == "start": + copy_p.prompt = args.get("prompt") + " " + p.prompt + else: + copy_p.prompt = p.prompt + " " + args.get("prompt") + + if args.get("negative_prompt") and p.negative_prompt: + if prompt_position == "start": + copy_p.negative_prompt = args.get("negative_prompt") + " " + p.negative_prompt + else: + copy_p.negative_prompt = p.negative_prompt + " " + args.get("negative_prompt") + proc = process_images(copy_p) images += proc.images diff --git a/style.css b/style.css index 115626cd9..731620226 100644 --- a/style.css +++ b/style.css @@ -204,6 +204,11 @@ div.block.gradio-accordion { padding: 8px 8px; } +input[type="checkbox"].input-accordion-checkbox{ + vertical-align: sub; + margin-right: 0.5em; +} + /* txt2img/img2img specific */ @@ -291,6 +296,13 @@ div.block.gradio-accordion { min-height: 4.5em; } +#txt2img_generate, #img2img_generate { + min-height: 4.5em; +} +.generate-box-compact #txt2img_generate, .generate-box-compact #img2img_generate { + min-height: 3em; +} + @media screen and (min-width: 2500px) { #txt2img_gallery, #img2img_gallery { min-height: 768px; @@ -398,6 +410,15 @@ div#extras_scale_to_tab div.form{ min-width: 0.5em; } +div.toprow-compact-stylerow{ + margin: 0.5em 0; +} + +div.toprow-compact-tools{ + min-width: fit-content !important; + max-width: fit-content; +} + /* settings */ #quicksettings { align-items: end; @@ -520,7 +541,8 @@ table.popup-table .link{ height: 20px; background: #b4c0cc; border-radius: 3px !important; - top: -20px; + top: -14px; + left: 0px; width: 100%; } @@ -818,6 +840,10 @@ footer { /* extra networks UI */ +.extra-page .prompt{ + margin: 0 0 0.5em 0; +} + .extra-network-cards{ height: calc(100vh - 24rem); overflow: clip scroll;