diff --git a/configs/instruct-pix2pix.yaml b/configs/instruct-pix2pix.yaml
index 437ddcef0..4e896879d 100644
--- a/configs/instruct-pix2pix.yaml
+++ b/configs/instruct-pix2pix.yaml
@@ -20,8 +20,7 @@ model:
     conditioning_key: hybrid
     monitor: val/loss_simple_ema
     scale_factor: 0.18215
-    use_ema: true
-    load_ema: true
+    use_ema: false
 
     scheduler_config: # 10000 warmup steps
       target: ldm.lr_scheduler.LambdaLinearScheduler
diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py
index 8f2e753e6..6be6ef73c 100644
--- a/extensions-builtin/Lora/extra_networks_lora.py
+++ b/extensions-builtin/Lora/extra_networks_lora.py
@@ -1,4 +1,4 @@
-from modules import extra_networks
+from modules import extra_networks, shared
 import lora
 
 class ExtraNetworkLora(extra_networks.ExtraNetwork):
@@ -6,6 +6,12 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
         super().__init__('lora')
 
     def activate(self, p, params_list):
+        additional = shared.opts.sd_lora
+
+        if additional != "" and additional in lora.available_loras and len([x for x in params_list if x.items[0] == additional]) == 0:
+            p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
+            params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
+
         names = []
         multipliers = []
         for params in params_list:
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index 544b228d9..2e860160e 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -1,4 +1,5 @@
 import torch
+import gradio as gr
 
 import lora
 import extra_networks_lora
@@ -31,5 +32,7 @@ script_callbacks.on_before_ui(before_ui)
 
 
 shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
+    "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": [""] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras),
     "lora_apply_to_outputs": shared.OptionInfo(False, "Apply Lora to outputs rather than inputs when possible (experimental)"),
+
 }))
diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py
index 54a80d368..22cabcb0f 100644
--- a/extensions-builtin/Lora/ui_extra_networks_lora.py
+++ b/extensions-builtin/Lora/ui_extra_networks_lora.py
@@ -20,13 +20,14 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
             preview = None
             for file in previews:
                 if os.path.isfile(file):
-                    preview = "./file=" + file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(file))
+                    preview = self.link_preview(file)
                     break
 
             yield {
                 "name": name,
                 "filename": path,
                 "preview": preview,
+                "search_term": self.search_terms_from_path(lora_on_disk.filename),
                 "prompt": json.dumps(f"<lora:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
                 "local_preview": path + ".png",
             }
diff --git a/html/extra-networks-card.html b/html/extra-networks-card.html
index aa9fca87e..8a5e2fbd2 100644
--- a/html/extra-networks-card.html
+++ b/html/extra-networks-card.html
@@ -4,6 +4,7 @@
 			<ul>
 				<a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a>
 			</ul>
+			<span style="display:none" class='search_term'>{search_term}</span>
 		</div>
 		<span class='name'>{name}</span>
 	</div>
diff --git a/javascript/extensions.js b/javascript/extensions.js
index ac6e35b96..c593cd2e5 100644
--- a/javascript/extensions.js
+++ b/javascript/extensions.js
@@ -1,7 +1,8 @@
 
 function extensions_apply(_, _){
-    disable = []
-    update = []
+    var disable = []
+    var update = []
+
     gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
         if(x.name.startsWith("enable_") && ! x.checked)
             disable.push(x.name.substr(7))
@@ -16,11 +17,24 @@ function extensions_apply(_, _){
 }
 
 function extensions_check(){
+    var disable = []
+
+    gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
+        if(x.name.startsWith("enable_") && ! x.checked)
+            disable.push(x.name.substr(7))
+    })
+
     gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){
         x.innerHTML = "Loading..."
     })
 
-    return []
+
+    var id = randomId()
+    requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function(){
+
+    })
+
+    return [id, JSON.stringify(disable)]
 }
 
 function install_extension_from_index(button, url){
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js
index c5a9adb37..17bf20004 100644
--- a/javascript/extraNetworks.js
+++ b/javascript/extraNetworks.js
@@ -16,7 +16,7 @@ function setupExtraNetworksForTab(tabname){
         searchTerm = search.value.toLowerCase()
 
         gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){
-            text = elem.querySelector('.name').textContent.toLowerCase()
+            text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase()
             elem.style.display = text.indexOf(searchTerm) == -1 ? "none" : ""
         })
     });
@@ -48,10 +48,39 @@ function setupExtraNetworks(){
 
 onUiLoaded(setupExtraNetworks)
 
+var re_extranet   =    /<([^:]+:[^:]+):[\d\.]+>/;
+var re_extranet_g = /\s+<([^:]+:[^:]+):[\d\.]+>/g;
+
+function tryToRemoveExtraNetworkFromPrompt(textarea, text){
+    var m = text.match(re_extranet)
+    if(! m) return false
+
+    var partToSearch = m[1]
+    var replaced = false
+    var newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, index){
+        m = found.match(re_extranet);
+        if(m[1] == partToSearch){
+            replaced = true;
+            return ""
+        }
+        return found;
+    })
+
+    if(replaced){
+        textarea.value = newTextareaText
+        return true;
+    }
+
+    return false
+}
+
 function cardClicked(tabname, textToAdd, allowNegativePrompt){
     var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea")
 
-    textarea.value = textarea.value + " " + textToAdd
+    if(! tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)){
+        textarea.value = textarea.value + " " + textToAdd
+    }
+
     updateInput(textarea)
 }
 
@@ -67,3 +96,12 @@ function saveCardPreview(event, tabname, filename){
     event.stopPropagation()
     event.preventDefault()
 }
+
+function extraNetworksSearchButton(tabs_id, event){
+    searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea')
+    button = event.target
+    text = button.classList.contains("search-all") ? "" : button.textContent.trim()
+
+    searchTextarea.value = text
+    updateInput(searchTextarea)
+}
\ No newline at end of file
diff --git a/javascript/ui.js b/javascript/ui.js
index ba72623c8..b7a8268a8 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -191,6 +191,28 @@ function confirm_clear_prompt(prompt, negative_prompt) {
     return [prompt, negative_prompt]
 }
 
+
+promptTokecountUpdateFuncs = {}
+
+function recalculatePromptTokens(name){
+    if(promptTokecountUpdateFuncs[name]){
+        promptTokecountUpdateFuncs[name]()
+    }
+}
+
+function recalculate_prompts_txt2img(){
+    recalculatePromptTokens('txt2img_prompt')
+    recalculatePromptTokens('txt2img_neg_prompt')
+    return args_to_array(arguments);
+}
+
+function recalculate_prompts_img2img(){
+    recalculatePromptTokens('img2img_prompt')
+    recalculatePromptTokens('img2img_neg_prompt')
+    return args_to_array(arguments);
+}
+
+
 opts = {}
 onUiUpdate(function(){
 	if(Object.keys(opts).length != 0) return;
@@ -232,14 +254,12 @@ onUiUpdate(function(){
             return
         }
 
-
         prompt.parentElement.insertBefore(counter, prompt)
         counter.classList.add("token-counter")
         prompt.parentElement.style.position = "relative"
 
-		textarea.addEventListener("input", function(){
-		    update_token_counter(id_button);
-		});
+		promptTokecountUpdateFuncs[id] = function(){ update_token_counter(id_button); }
+		textarea.addEventListener("input", promptTokecountUpdateFuncs[id]);
     }
 
     registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button')
@@ -273,7 +293,7 @@ onOptionsChanged(function(){
 
 let txt2img_textarea, img2img_textarea = undefined;
 let wait_time = 800
-let token_timeout;
+let token_timeouts = {};
 
 function update_txt2img_tokens(...args) {
 	update_token_counter("txt2img_token_button")
@@ -290,9 +310,9 @@ function update_img2img_tokens(...args) {
 }
 
 function update_token_counter(button_id) {
-	if (token_timeout)
-		clearTimeout(token_timeout);
-	token_timeout = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
+	if (token_timeouts[button_id])
+		clearTimeout(token_timeouts[button_id]);
+	token_timeouts[button_id] = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
 }
 
 function restart_reload(){
@@ -309,3 +329,10 @@ function updateInput(target){
 	Object.defineProperty(e, "target", {value: target})
 	target.dispatchEvent(e);
 }
+
+
+var desiredCheckpointName = null;
+function selectCheckpoint(name){
+    desiredCheckpointName = name;
+    gradioApp().getElementById('change_checkpoint').click()
+}
diff --git a/launch.py b/launch.py
index 370920de9..25909469c 100644
--- a/launch.py
+++ b/launch.py
@@ -223,6 +223,7 @@ def prepare_environment():
     requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
     commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
 
+    xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.16rc425')
     gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
     clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
     openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b")
@@ -282,7 +283,7 @@ def prepare_environment():
     if (not is_installed("xformers") or reinstall_xformers) and xformers:
         if platform.system() == "Windows":
             if platform.python_version().startswith("3.10"):
-                run_pip(f"install -U -I --no-deps xformers==0.0.16rc425", "xformers")
+                run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
             else:
                 print("Installation of xformers is not supported in this version of Python.")
                 print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
diff --git a/modules/devices.py b/modules/devices.py
index 4687944e9..655ca1d3f 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -87,6 +87,14 @@ dtype_unet = torch.float16
 unet_needs_upcast = False
 
 
+def cond_cast_unet(input):
+    return input.to(dtype_unet) if unet_needs_upcast else input
+
+
+def cond_cast_float(input):
+    return input.float() if unet_needs_upcast else input
+
+
 def randn(seed, shape):
     torch.manual_seed(seed)
     if device.type == 'mps':
@@ -199,6 +207,3 @@ if has_mps():
         cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0))
         torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
         torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
-        orig_narrow = torch.narrow
-        torch.narrow = lambda *args, **kwargs: ( orig_narrow(*args, **kwargs).clone() )
-
diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py
index ff279a1f4..d3a4d7adc 100644
--- a/modules/extra_networks_hypernet.py
+++ b/modules/extra_networks_hypernet.py
@@ -1,4 +1,4 @@
-from modules import extra_networks
+from modules import extra_networks, shared, extra_networks
 from modules.hypernetworks import hypernetwork
 
 
@@ -7,6 +7,12 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
         super().__init__('hypernet')
 
     def activate(self, p, params_list):
+        additional = shared.opts.sd_hypernetwork
+
+        if additional != "" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0:
+            p.all_prompts = [x + f"<hypernet:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
+            params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
+
         names = []
         multipliers = []
         for params in params_list:
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 773c5c0e8..fc9e17aa2 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -1,4 +1,5 @@
 import base64
+import html
 import io
 import math
 import os
@@ -11,19 +12,28 @@ from modules import shared, ui_tempdir, script_callbacks
 import tempfile
 from PIL import Image
 
-re_param_code = r'\s*([\w ]+):\s*("(?:\\|\"|[^\"])+"|[^,]*)(?:,|$)'
+re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
 re_param = re.compile(re_param_code)
-re_params = re.compile(r"^(?:" + re_param_code + "){3,}$")
 re_imagesize = re.compile(r"^(\d+)x(\d+)$")
 re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$")
 type_of_gr_update = type(gr.update())
+
 paste_fields = {}
-bind_list = []
+registered_param_bindings = []
+
+
+class ParamBinding:
+    def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None):
+        self.paste_button = paste_button
+        self.tabname = tabname
+        self.source_text_component = source_text_component
+        self.source_image_component = source_image_component
+        self.source_tabname = source_tabname
+        self.override_settings_component = override_settings_component
 
 
 def reset():
     paste_fields.clear()
-    bind_list.clear()
 
 
 def quote(text):
@@ -75,26 +85,6 @@ def add_paste_fields(tabname, init_img, fields):
         modules.ui.img2img_paste_fields = fields
 
 
-def integrate_settings_paste_fields(component_dict):
-    from modules import ui
-
-    settings_map = {
-        'CLIP_stop_at_last_layers': 'Clip skip',
-        'inpainting_mask_weight': 'Conditional mask weight',
-        'sd_model_checkpoint': 'Model hash',
-        'eta_noise_seed_delta': 'ENSD',
-        'initial_noise_multiplier': 'Noise multiplier',
-    }
-    settings_paste_fields = [
-        (component_dict[k], lambda d, k=k, v=v: ui.apply_setting(k, d.get(v, None)))
-        for k, v in settings_map.items()
-    ]
-
-    for tabname, info in paste_fields.items():
-        if info["fields"] is not None:
-            info["fields"] += settings_paste_fields
-
-
 def create_buttons(tabs_list):
     buttons = {}
     for tab in tabs_list:
@@ -102,9 +92,60 @@ def create_buttons(tabs_list):
     return buttons
 
 
-#if send_generate_info is a tab name, mean generate_info comes from the params fields of the tab
 def bind_buttons(buttons, send_image, send_generate_info):
-    bind_list.append([buttons, send_image, send_generate_info])
+    """old function for backwards compatibility; do not use this, use register_paste_params_button"""
+    for tabname, button in buttons.items():
+        source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None
+        source_tabname = send_generate_info if isinstance(send_generate_info, str) else None
+
+        register_paste_params_button(ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname))
+
+
+def register_paste_params_button(binding: ParamBinding):
+    registered_param_bindings.append(binding)
+
+
+def connect_paste_params_buttons():
+    binding: ParamBinding
+    for binding in registered_param_bindings:
+        destination_image_component = paste_fields[binding.tabname]["init_img"]
+        fields = paste_fields[binding.tabname]["fields"]
+
+        destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None)
+        destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
+
+        if binding.source_image_component and destination_image_component:
+            if isinstance(binding.source_image_component, gr.Gallery):
+                func = send_image_and_dimensions if destination_width_component else image_from_url_text
+                jsfunc = "extract_image_from_gallery"
+            else:
+                func = send_image_and_dimensions if destination_width_component else lambda x: x
+                jsfunc = None
+
+            binding.paste_button.click(
+                fn=func,
+                _js=jsfunc,
+                inputs=[binding.source_image_component],
+                outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
+            )
+
+        if binding.source_text_component is not None and fields is not None:
+            connect_paste(binding.paste_button, fields, binding.source_text_component, binding.override_settings_component, binding.tabname)
+
+        if binding.source_tabname is not None and fields is not None:
+            paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else [])
+            binding.paste_button.click(
+                fn=lambda *x: x,
+                inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names],
+                outputs=[field for field, name in fields if name in paste_field_names],
+            )
+
+        binding.paste_button.click(
+            fn=None,
+            _js=f"switch_to_{binding.tabname}",
+            inputs=None,
+            outputs=None,
+        )
 
 
 def send_image_and_dimensions(x):
@@ -123,49 +164,6 @@ def send_image_and_dimensions(x):
     return img, w, h
 
 
-def run_bind():
-    for buttons, source_image_component, send_generate_info in bind_list:
-        for tab in buttons:
-            button = buttons[tab]
-            destination_image_component = paste_fields[tab]["init_img"]
-            fields = paste_fields[tab]["fields"]
-
-            destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None)
-            destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
-
-            if source_image_component and destination_image_component:
-                if isinstance(source_image_component, gr.Gallery):
-                    func = send_image_and_dimensions if destination_width_component else image_from_url_text
-                    jsfunc = "extract_image_from_gallery"
-                else:
-                    func = send_image_and_dimensions if destination_width_component else lambda x: x
-                    jsfunc = None
-
-                button.click(
-                    fn=func,
-                    _js=jsfunc,
-                    inputs=[source_image_component],
-                    outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
-                )
-
-            if send_generate_info and fields is not None:
-                if send_generate_info in paste_fields:
-                    paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else [])
-                    button.click(
-                        fn=lambda *x: x,
-                        inputs=[field for field, name in paste_fields[send_generate_info]["fields"] if name in paste_field_names],
-                        outputs=[field for field, name in fields if name in paste_field_names],
-                    )
-                else:
-                    connect_paste(button, fields, send_generate_info)
-
-            button.click(
-                fn=None,
-                _js=f"switch_to_{tab}",
-                inputs=None,
-                outputs=None,
-            )
-
 
 def find_hypernetwork_key(hypernet_name, hypernet_hash=None):
     """Determines the config parameter name to use for the hypernet based on the parameters in the infotext.
@@ -243,7 +241,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
     done_with_prompt = False
 
     *lines, lastline = x.strip().split("\n")
-    if not re_params.match(lastline):
+    if len(re_param.findall(lastline)) < 3:
         lines.append(lastline)
         lastline = ''
 
@@ -262,6 +260,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
     res["Negative prompt"] = negative_prompt
 
     for k, v in re_param.findall(lastline):
+        v = v[1:-1] if v[0] == '"' and v[-1] == '"' else v
         m = re_imagesize.match(v)
         if m is not None:
             res[k+"-1"] = m.group(1)
@@ -286,7 +285,50 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
     return res
 
 
-def connect_paste(button, paste_fields, input_comp, jsfunc=None):
+settings_map = {}
+
+infotext_to_setting_name_mapping = [
+    ('Clip skip', 'CLIP_stop_at_last_layers', ),
+    ('Conditional mask weight', 'inpainting_mask_weight'),
+    ('Model hash', 'sd_model_checkpoint'),
+    ('ENSD', 'eta_noise_seed_delta'),
+    ('Noise multiplier', 'initial_noise_multiplier'),
+    ('Eta', 'eta_ancestral'),
+    ('Eta DDIM', 'eta_ddim'),
+    ('Discard penultimate sigma', 'always_discard_next_to_last_sigma')
+]
+
+
+def create_override_settings_dict(text_pairs):
+    """creates processing's override_settings parameters from gradio's multiselect
+
+    Example input:
+        ['Clip skip: 2', 'Model hash: e6e99610c4', 'ENSD: 31337']
+
+    Example output:
+        {'CLIP_stop_at_last_layers': 2, 'sd_model_checkpoint': 'e6e99610c4', 'eta_noise_seed_delta': 31337}
+    """
+
+    res = {}
+
+    params = {}
+    for pair in text_pairs:
+        k, v = pair.split(":", maxsplit=1)
+
+        params[k] = v.strip()
+
+    for param_name, setting_name in infotext_to_setting_name_mapping:
+        value = params.get(param_name, None)
+
+        if value is None:
+            continue
+
+        res[setting_name] = shared.opts.cast_value(setting_name, value)
+
+    return res
+
+
+def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname):
     def paste_func(prompt):
         if not prompt and not shared.cmd_opts.hide_ui_dir_config:
             filename = os.path.join(data_path, "params.txt")
@@ -323,9 +365,35 @@ def connect_paste(button, paste_fields, input_comp, jsfunc=None):
 
         return res
 
+    if override_settings_component is not None:
+        def paste_settings(params):
+            vals = {}
+
+            for param_name, setting_name in infotext_to_setting_name_mapping:
+                v = params.get(param_name, None)
+                if v is None:
+                    continue
+
+                if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap:
+                    continue
+
+                v = shared.opts.cast_value(setting_name, v)
+                current_value = getattr(shared.opts, setting_name, None)
+
+                if v == current_value:
+                    continue
+
+                vals[param_name] = v
+
+            vals_pairs = [f"{k}: {v}" for k, v in vals.items()]
+
+            return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=len(vals_pairs) > 0)
+
+        paste_fields = paste_fields + [(override_settings_component, paste_settings)]
+
     button.click(
         fn=paste_func,
-        _js=jsfunc,
+        _js=f"recalculate_prompts_{tabname}",
         inputs=[input_comp],
         outputs=[x[0] for x in paste_fields],
     )
diff --git a/modules/images.py b/modules/images.py
index 0bc3d5241..ae3cdaf4a 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -36,6 +36,8 @@ def image_grid(imgs, batch_size=1, rows=None):
         else:
             rows = math.sqrt(len(imgs))
             rows = round(rows)
+    if rows > len(imgs):
+        rows = len(imgs)
 
     cols = math.ceil(len(imgs) / rows)
 
diff --git a/modules/img2img.py b/modules/img2img.py
index fe9447c7e..f813299c9 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -7,6 +7,7 @@ import numpy as np
 from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops
 
 from modules import devices, sd_samplers
+from modules.generation_parameters_copypaste import create_override_settings_dict
 from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
 from modules.shared import opts, state
 import modules.shared as shared
@@ -21,8 +22,10 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
 
     images = shared.listfiles(input_dir)
 
-    inpaint_masks = shared.listfiles(inpaint_mask_dir)
-    is_inpaint_batch = inpaint_mask_dir and len(inpaint_masks) > 0
+    is_inpaint_batch = False
+    if inpaint_mask_dir:
+        inpaint_masks = shared.listfiles(inpaint_mask_dir)
+        is_inpaint_batch = len(inpaint_masks) > 0
     if is_inpaint_batch:
         print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
 
@@ -73,7 +76,9 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
                 processed_image.save(os.path.join(output_dir, filename))
 
 
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
+    override_settings = create_override_settings_dict(override_settings_texts)
+
     is_batch = mode == 5
 
     if mode == 0:  # img2img
@@ -140,6 +145,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
         inpaint_full_res=inpaint_full_res,
         inpaint_full_res_padding=inpaint_full_res_padding,
         inpainting_mask_invert=inpainting_mask_invert,
+        override_settings=override_settings,
     )
 
     p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/processing.py b/modules/processing.py
index 5072fc409..e544c2e16 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -173,8 +173,7 @@ class StableDiffusionProcessing:
         midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
         midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
 
-        conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image.to(devices.dtype_vae) if devices.unet_needs_upcast else source_image))
-        conditioning_image = conditioning_image.float() if devices.unet_needs_upcast else conditioning_image
+        conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
         conditioning = torch.nn.functional.interpolate(
             self.sd_model.depth_model(midas_in),
             size=conditioning_image.shape[2:],
@@ -218,7 +217,7 @@ class StableDiffusionProcessing:
         )
 
         # Encode the new masked image using first stage of network.
-        conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image.to(devices.dtype_vae) if devices.unet_needs_upcast else conditioning_image))
+        conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
 
         # Create the concatenated conditioning tensor to be fed to `c_concat`
         conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
@@ -229,16 +228,18 @@ class StableDiffusionProcessing:
         return image_conditioning
 
     def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
+        source_image = devices.cond_cast_float(source_image)
+
         # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
         # identify itself with a field common to all models. The conditioning_key is also hybrid.
         if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
-            return self.depth2img_image_conditioning(source_image.float() if devices.unet_needs_upcast else source_image)
+            return self.depth2img_image_conditioning(source_image)
 
         if self.sd_model.cond_stage_key == "edit":
             return self.edit_image_conditioning(source_image)
 
         if self.sampler.conditioning_key in {'hybrid', 'concat'}:
-            return self.inpainting_image_conditioning(source_image.float() if devices.unet_needs_upcast else source_image, latent_image, image_mask=image_mask)
+            return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
 
         # Dummy zero conditioning if we're not using inpainting or depth model.
         return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
@@ -418,7 +419,7 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
 
 def decode_first_stage(model, x):
     with devices.autocast(disable=x.dtype == devices.dtype_vae):
-        x = model.decode_first_stage(x.to(devices.dtype_vae) if devices.unet_needs_upcast else x)
+        x = model.decode_first_stage(x)
 
     return x
 
@@ -449,14 +450,11 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
         "Size": f"{p.width}x{p.height}",
         "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
         "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
-        "Batch size": (None if p.batch_size < 2 else p.batch_size),
-        "Batch pos": (None if p.batch_size < 2 else position_in_batch),
         "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
         "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
         "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
         "Denoising strength": getattr(p, 'denoising_strength', None),
         "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
-        "Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
         "Clip skip": None if clip_skip <= 1 else clip_skip,
         "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
     }
@@ -1007,7 +1005,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
 
         image = torch.from_numpy(batch_images)
         image = 2. * image - 1.
-        image = image.to(device=shared.device, dtype=devices.dtype_vae if devices.unet_needs_upcast else None)
+        image = image.to(shared.device)
 
         self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
 
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index 47f702513..aad4a6298 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -46,7 +46,7 @@ class UpscalerRealESRGAN(Upscaler):
             scale=info.scale,
             model_path=info.local_data_path,
             model=info.model(),
-            half=not cmd_opts.no_half,
+            half=not cmd_opts.no_half and not cmd_opts.upcast_sampling,
             tile=opts.ESRGAN_tile,
             tile_pad=opts.ESRGAN_tile_overlap,
         )
diff --git a/modules/scripts.py b/modules/scripts.py
index 6e9dc0c03..24056a12f 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -345,6 +345,20 @@ class ScriptRunner:
             outputs=[script.group for script in self.selectable_scripts]
         )
 
+        self.script_load_ctr = 0
+        def onload_script_visibility(params):
+            title = params.get('Script', None)
+            if title:
+                title_index = self.titles.index(title)
+                visibility = title_index == self.script_load_ctr
+                self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles)
+                return gr.update(visible=visibility)
+            else:
+                return gr.update(visible=False)
+
+        self.infotext_fields.append( (dropdown, lambda x: gr.update(value=x.get('Script', 'None'))) )
+        self.infotext_fields.extend( [(script.group, onload_script_visibility) for script in self.selectable_scripts] )
+
         return inputs
 
     def run(self, p, *args):
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index f9652d215..8fdc59909 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -131,6 +131,8 @@ class StableDiffusionModelHijack:
             m.cond_stage_model.wrapped.model.token_embedding = m.cond_stage_model.wrapped.model.token_embedding.wrapped
             m.cond_stage_model = m.cond_stage_model.wrapped
 
+        undo_optimizations()
+
         self.apply_circular(False)
         self.layers = None
         self.clip = None
@@ -171,7 +173,7 @@ class EmbeddingsWithFixes(torch.nn.Module):
         vecs = []
         for fixes, tensor in zip(batch_fixes, inputs_embeds):
             for offset, embedding in fixes:
-                emb = embedding.vec
+                emb = devices.cond_cast_unet(embedding.vec)
                 emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
                 tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]])
 
diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py
index a6ee577cb..45cf2b18e 100644
--- a/modules/sd_hijack_unet.py
+++ b/modules/sd_hijack_unet.py
@@ -55,8 +55,14 @@ class GELUHijack(torch.nn.GELU, torch.nn.Module):
 
 unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
 CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
-CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).to(devices.dtype_unet), unet_needs_upcast)
+CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
 if version.parse(torch.__version__) <= version.parse("1.13.1"):
     CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
     CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
     CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU)
+
+first_stage_cond = lambda _, self, *args, **kwargs: devices.unet_needs_upcast and self.model.diffusion_model.dtype == torch.float16
+first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devices.dtype_vae), **kwargs)
+CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
+CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
+CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index b2d48a510..300387a9b 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -41,6 +41,7 @@ class CheckpointInfo:
             name = name[1:]
 
         self.name = name
+        self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
         self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
         self.hash = model_hash(filename)
 
@@ -231,12 +232,10 @@ def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
 
 
 def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
-    title = checkpoint_info.title
     sd_model_hash = checkpoint_info.calculate_shorthash()
     timer.record("calculate hash")
 
-    if checkpoint_info.title != title:
-        shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
+    shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
 
     if state_dict is None:
         state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
index 00217990b..91c217004 100644
--- a/modules/sd_models_config.py
+++ b/modules/sd_models_config.py
@@ -1,7 +1,9 @@
 import re
 import os
 
-from modules import shared, paths
+import torch
+
+from modules import shared, paths, sd_disable_initialization
 
 sd_configs_path = shared.sd_configs_path
 sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
@@ -16,12 +18,51 @@ config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml"
 config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
 config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
 
-re_parametrization_v = re.compile(r'-v\b')
+
+def is_using_v_parameterization_for_sd2(state_dict):
+    """
+    Detects whether unet in state_dict is using v-parameterization. Returns True if it is. You're welcome.
+    """
+
+    import ldm.modules.diffusionmodules.openaimodel
+    from modules import devices
+
+    device = devices.cpu
+
+    with sd_disable_initialization.DisableInitialization():
+        unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
+            use_checkpoint=True,
+            use_fp16=False,
+            image_size=32,
+            in_channels=4,
+            out_channels=4,
+            model_channels=320,
+            attention_resolutions=[4, 2, 1],
+            num_res_blocks=2,
+            channel_mult=[1, 2, 4, 4],
+            num_head_channels=64,
+            use_spatial_transformer=True,
+            use_linear_in_transformer=True,
+            transformer_depth=1,
+            context_dim=1024,
+            legacy=False
+        )
+        unet.eval()
+
+    with torch.no_grad():
+        unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k}
+        unet.load_state_dict(unet_sd, strict=True)
+        unet.to(device=device, dtype=torch.float)
+
+        test_cond = torch.ones((1, 2, 1024), device=device) * 0.5
+        x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5
+
+        out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().item()
+
+    return out < -1
 
 
 def guess_model_config_from_state_dict(sd, filename):
-    fn = os.path.basename(filename)
-
     sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
     diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
 
@@ -31,7 +72,7 @@ def guess_model_config_from_state_dict(sd, filename):
     if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
         if diffusion_model_input.shape[1] == 9:
             return config_sd2_inpainting
-        elif re.search(re_parametrization_v, fn):
+        elif is_using_v_parameterization_for_sd2(sd):
             return config_sd2v
         else:
             return config_sd2
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index a7910b56e..28c2136fe 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -1,53 +1,11 @@
-from collections import namedtuple, deque
-import numpy as np
-from math import floor
-import torch
-import tqdm
-from PIL import Image
-import inspect
-import k_diffusion.sampling
-import torchsde._brownian.brownian_interval
-import ldm.models.diffusion.ddim
-import ldm.models.diffusion.plms
-from modules import prompt_parser, devices, processing, images, sd_vae_approx
+from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared
 
-from modules.shared import opts, cmd_opts, state
-import modules.shared as shared
-from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
-
-
-SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
-
-samplers_k_diffusion = [
-    ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}),
-    ('Euler', 'sample_euler', ['k_euler'], {}),
-    ('LMS', 'sample_lms', ['k_lms'], {}),
-    ('Heun', 'sample_heun', ['k_heun'], {}),
-    ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}),
-    ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}),
-    ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
-    ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
-    ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}),
-    ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
-    ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
-    ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
-    ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
-    ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
-    ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
-    ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
-    ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}),
-]
-
-samplers_data_k_diffusion = [
-    SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
-    for label, funcname, aliases, options in samplers_k_diffusion
-    if hasattr(k_diffusion.sampling, funcname)
-]
+# imports for functions that previously were here and are used by other modules
+from modules.sd_samplers_common import samples_to_image_grid, sample_to_image
 
 all_samplers = [
-    *samplers_data_k_diffusion,
-    SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}),
-    SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
+    *sd_samplers_kdiffusion.samplers_data_k_diffusion,
+    *sd_samplers_compvis.samplers_data_compvis,
 ]
 all_samplers_map = {x.name: x for x in all_samplers}
 
@@ -73,8 +31,8 @@ def create_sampler(name, model):
 def set_samplers():
     global samplers, samplers_for_img2img
 
-    hidden = set(opts.hide_samplers)
-    hidden_img2img = set(opts.hide_samplers + ['PLMS'])
+    hidden = set(shared.opts.hide_samplers)
+    hidden_img2img = set(shared.opts.hide_samplers + ['PLMS'])
 
     samplers = [x for x in all_samplers if x.name not in hidden]
     samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
@@ -87,466 +45,3 @@ def set_samplers():
 
 
 set_samplers()
-
-sampler_extra_params = {
-    'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
-    'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
-    'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
-}
-
-
-def setup_img2img_steps(p, steps=None):
-    if opts.img2img_fix_steps or steps is not None:
-        requested_steps = (steps or p.steps)
-        steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
-        t_enc = requested_steps - 1
-    else:
-        steps = p.steps
-        t_enc = int(min(p.denoising_strength, 0.999) * steps)
-
-    return steps, t_enc
-
-
-approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2}
-
-
-def single_sample_to_image(sample, approximation=None):
-    if approximation is None:
-        approximation = approximation_indexes.get(opts.show_progress_type, 0)
-
-    if approximation == 2:
-        x_sample = sd_vae_approx.cheap_approximation(sample)
-    elif approximation == 1:
-        x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
-    else:
-        x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
-
-    x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
-    x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
-    x_sample = x_sample.astype(np.uint8)
-    return Image.fromarray(x_sample)
-
-
-def sample_to_image(samples, index=0, approximation=None):
-    return single_sample_to_image(samples[index], approximation)
-
-
-def samples_to_image_grid(samples, approximation=None):
-    return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
-
-
-def store_latent(decoded):
-    state.current_latent = decoded
-
-    if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
-        if not shared.parallel_processing_allowed:
-            shared.state.assign_current_image(sample_to_image(decoded))
-
-
-class InterruptedException(BaseException):
-    pass
-
-
-class VanillaStableDiffusionSampler:
-    def __init__(self, constructor, sd_model):
-        self.sampler = constructor(sd_model)
-        self.is_plms = hasattr(self.sampler, 'p_sample_plms')
-        self.orig_p_sample_ddim = self.sampler.p_sample_plms if self.is_plms else self.sampler.p_sample_ddim
-        self.mask = None
-        self.nmask = None
-        self.init_latent = None
-        self.sampler_noises = None
-        self.step = 0
-        self.stop_at = None
-        self.eta = None
-        self.default_eta = 0.0
-        self.config = None
-        self.last_latent = None
-
-        self.conditioning_key = sd_model.model.conditioning_key
-
-    def number_of_needed_noises(self, p):
-        return 0
-
-    def launch_sampling(self, steps, func):
-        state.sampling_steps = steps
-        state.sampling_step = 0
-
-        try:
-            return func()
-        except InterruptedException:
-            return self.last_latent
-
-    def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
-        if state.interrupted or state.skipped:
-            raise InterruptedException
-
-        if self.stop_at is not None and self.step > self.stop_at:
-            raise InterruptedException
-
-        # Have to unwrap the inpainting conditioning here to perform pre-processing
-        image_conditioning = None
-        if isinstance(cond, dict):
-            image_conditioning = cond["c_concat"][0]
-            cond = cond["c_crossattn"][0]
-            unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
-
-        conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
-        unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
-
-        assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
-        cond = tensor
-
-        # for DDIM, shapes must match, we can't just process cond and uncond independently;
-        # filling unconditional_conditioning with repeats of the last vector to match length is
-        # not 100% correct but should work well enough
-        if unconditional_conditioning.shape[1] < cond.shape[1]:
-            last_vector = unconditional_conditioning[:, -1:]
-            last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
-            unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
-        elif unconditional_conditioning.shape[1] > cond.shape[1]:
-            unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
-
-        if self.mask is not None:
-            img_orig = self.sampler.model.q_sample(self.init_latent, ts)
-            x_dec = img_orig * self.mask + self.nmask * x_dec
-
-        # Wrap the image conditioning back up since the DDIM code can accept the dict directly.
-        # Note that they need to be lists because it just concatenates them later.
-        if image_conditioning is not None:
-            cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
-            unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
-
-        res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
-
-        if self.mask is not None:
-            self.last_latent = self.init_latent * self.mask + self.nmask * res[1]
-        else:
-            self.last_latent = res[1]
-
-        store_latent(self.last_latent)
-
-        self.step += 1
-        state.sampling_step = self.step
-        shared.total_tqdm.update()
-
-        return res
-
-    def initialize(self, p):
-        self.eta = p.eta if p.eta is not None else opts.eta_ddim
-
-        for fieldname in ['p_sample_ddim', 'p_sample_plms']:
-            if hasattr(self.sampler, fieldname):
-                setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
-
-        self.mask = p.mask if hasattr(p, 'mask') else None
-        self.nmask = p.nmask if hasattr(p, 'nmask') else None
-
-    def adjust_steps_if_invalid(self, p, num_steps):
-        if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
-            valid_step = 999 / (1000 // num_steps)
-            if valid_step == floor(valid_step):
-                return int(valid_step) + 1
-        
-        return num_steps
-
-    def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
-        steps, t_enc = setup_img2img_steps(p, steps)
-        steps = self.adjust_steps_if_invalid(p, steps)
-        self.initialize(p)
-
-        self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
-        x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
-
-        self.init_latent = x
-        self.last_latent = x
-        self.step = 0
-
-        # Wrap the conditioning models with additional image conditioning for inpainting model
-        if image_conditioning is not None:
-            conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
-            unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
-
-        samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
-
-        return samples
-
-    def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
-        self.initialize(p)
-
-        self.init_latent = None
-        self.last_latent = x
-        self.step = 0
-
-        steps = self.adjust_steps_if_invalid(p, steps or p.steps)
-
-        # Wrap the conditioning models with additional image conditioning for inpainting model
-        # dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape
-        if image_conditioning is not None:
-            conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]}
-            unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]}
-
-        samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
-
-        return samples_ddim
-
-
-class CFGDenoiser(torch.nn.Module):
-    def __init__(self, model):
-        super().__init__()
-        self.inner_model = model
-        self.mask = None
-        self.nmask = None
-        self.init_latent = None
-        self.step = 0
-
-    def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
-        denoised_uncond = x_out[-uncond.shape[0]:]
-        denoised = torch.clone(denoised_uncond)
-
-        for i, conds in enumerate(conds_list):
-            for cond_index, weight in conds:
-                denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
-
-        return denoised
-
-    def forward(self, x, sigma, uncond, cond, cond_scale, image_cond):
-        if state.interrupted or state.skipped:
-            raise InterruptedException
-
-        conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
-        uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
-
-        batch_size = len(conds_list)
-        repeats = [len(conds_list[i]) for i in range(batch_size)]
-
-        x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
-        image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
-        sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
-
-        denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)
-        cfg_denoiser_callback(denoiser_params)
-        x_in = denoiser_params.x
-        image_cond_in = denoiser_params.image_cond
-        sigma_in = denoiser_params.sigma
-
-        if tensor.shape[1] == uncond.shape[1]:
-            cond_in = torch.cat([tensor, uncond])
-
-            if shared.batch_cond_uncond:
-                x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
-            else:
-                x_out = torch.zeros_like(x_in)
-                for batch_offset in range(0, x_out.shape[0], batch_size):
-                    a = batch_offset
-                    b = a + batch_size
-                    x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
-        else:
-            x_out = torch.zeros_like(x_in)
-            batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
-            for batch_offset in range(0, tensor.shape[0], batch_size):
-                a = batch_offset
-                b = min(a + batch_size, tensor.shape[0])
-                x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [tensor[a:b]], "c_concat": [image_cond_in[a:b]]})
-
-            x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
-
-        devices.test_for_nans(x_out, "unet")
-
-        if opts.live_preview_content == "Prompt":
-            store_latent(x_out[0:uncond.shape[0]])
-        elif opts.live_preview_content == "Negative prompt":
-            store_latent(x_out[-uncond.shape[0]:])
-
-        denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
-
-        if self.mask is not None:
-            denoised = self.init_latent * self.mask + self.nmask * denoised
-
-        self.step += 1
-
-        return denoised
-
-
-class TorchHijack:
-    def __init__(self, sampler_noises):
-        # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based
-        # implementation.
-        self.sampler_noises = deque(sampler_noises)
-
-    def __getattr__(self, item):
-        if item == 'randn_like':
-            return self.randn_like
-
-        if hasattr(torch, item):
-            return getattr(torch, item)
-
-        raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
-
-    def randn_like(self, x):
-        if self.sampler_noises:
-            noise = self.sampler_noises.popleft()
-            if noise.shape == x.shape:
-                return noise
-
-        if x.device.type == 'mps':
-            return torch.randn_like(x, device=devices.cpu).to(x.device)
-        else:
-            return torch.randn_like(x)
-
-
-# MPS fix for randn in torchsde
-def torchsde_randn(size, dtype, device, seed):
-    if device.type == 'mps':
-        generator = torch.Generator(devices.cpu).manual_seed(int(seed))
-        return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device)
-    else:
-        generator = torch.Generator(device).manual_seed(int(seed))
-        return torch.randn(size, dtype=dtype, device=device, generator=generator)
-
-
-torchsde._brownian.brownian_interval._randn = torchsde_randn
-
-
-class KDiffusionSampler:
-    def __init__(self, funcname, sd_model):
-        denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser
-
-        self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
-        self.funcname = funcname
-        self.func = getattr(k_diffusion.sampling, self.funcname)
-        self.extra_params = sampler_extra_params.get(funcname, [])
-        self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
-        self.sampler_noises = None
-        self.stop_at = None
-        self.eta = None
-        self.default_eta = 1.0
-        self.config = None
-        self.last_latent = None
-
-        self.conditioning_key = sd_model.model.conditioning_key
-
-    def callback_state(self, d):
-        step = d['i']
-        latent = d["denoised"]
-        if opts.live_preview_content == "Combined":
-            store_latent(latent)
-        self.last_latent = latent
-
-        if self.stop_at is not None and step > self.stop_at:
-            raise InterruptedException
-
-        state.sampling_step = step
-        shared.total_tqdm.update()
-
-    def launch_sampling(self, steps, func):
-        state.sampling_steps = steps
-        state.sampling_step = 0
-
-        try:
-            return func()
-        except InterruptedException:
-            return self.last_latent
-
-    def number_of_needed_noises(self, p):
-        return p.steps
-
-    def initialize(self, p):
-        self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
-        self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
-        self.model_wrap_cfg.step = 0
-        self.eta = p.eta or opts.eta_ancestral
-
-        k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
-
-        extra_params_kwargs = {}
-        for param_name in self.extra_params:
-            if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
-                extra_params_kwargs[param_name] = getattr(p, param_name)
-
-        if 'eta' in inspect.signature(self.func).parameters:
-            extra_params_kwargs['eta'] = self.eta
-
-        return extra_params_kwargs
-
-    def get_sigmas(self, p, steps):
-        discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
-        if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
-            discard_next_to_last_sigma = True
-            p.extra_generation_params["Discard penultimate sigma"] = True
-
-        steps += 1 if discard_next_to_last_sigma else 0
-
-        if p.sampler_noise_scheduler_override:
-            sigmas = p.sampler_noise_scheduler_override(steps)
-        elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
-            sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
-
-            sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device)
-        else:
-            sigmas = self.model_wrap.get_sigmas(steps)
-
-        if discard_next_to_last_sigma:
-            sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
-
-        return sigmas
-
-    def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
-        steps, t_enc = setup_img2img_steps(p, steps)
-
-        sigmas = self.get_sigmas(p, steps)
-
-        sigma_sched = sigmas[steps - t_enc - 1:]
-        xi = x + noise * sigma_sched[0]
-        
-        extra_params_kwargs = self.initialize(p)
-        if 'sigma_min' in inspect.signature(self.func).parameters:
-            ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
-            extra_params_kwargs['sigma_min'] = sigma_sched[-2]
-        if 'sigma_max' in inspect.signature(self.func).parameters:
-            extra_params_kwargs['sigma_max'] = sigma_sched[0]
-        if 'n' in inspect.signature(self.func).parameters:
-            extra_params_kwargs['n'] = len(sigma_sched) - 1
-        if 'sigma_sched' in inspect.signature(self.func).parameters:
-            extra_params_kwargs['sigma_sched'] = sigma_sched
-        if 'sigmas' in inspect.signature(self.func).parameters:
-            extra_params_kwargs['sigmas'] = sigma_sched
-
-        self.model_wrap_cfg.init_latent = x
-        self.last_latent = x
-
-        samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
-            'cond': conditioning, 
-            'image_cond': image_conditioning, 
-            'uncond': unconditional_conditioning, 
-            'cond_scale': p.cfg_scale
-        }, disable=False, callback=self.callback_state, **extra_params_kwargs))
-
-        return samples
-
-    def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None):
-        steps = steps or p.steps
-
-        sigmas = self.get_sigmas(p, steps)
-
-        x = x * sigmas[0]
-
-        extra_params_kwargs = self.initialize(p)
-        if 'sigma_min' in inspect.signature(self.func).parameters:
-            extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
-            extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
-            if 'n' in inspect.signature(self.func).parameters:
-                extra_params_kwargs['n'] = steps
-        else:
-            extra_params_kwargs['sigmas'] = sigmas
-
-        self.last_latent = x
-        samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
-            'cond': conditioning, 
-            'image_cond': image_conditioning, 
-            'uncond': unconditional_conditioning, 
-            'cond_scale': p.cfg_scale
-        }, disable=False, callback=self.callback_state, **extra_params_kwargs))
-
-        return samples
-
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py
new file mode 100644
index 000000000..3c03d442e
--- /dev/null
+++ b/modules/sd_samplers_common.py
@@ -0,0 +1,78 @@
+from collections import namedtuple
+import numpy as np
+import torch
+from PIL import Image
+import torchsde._brownian.brownian_interval
+from modules import devices, processing, images, sd_vae_approx
+
+from modules.shared import opts, state
+import modules.shared as shared
+
+SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
+
+
+def setup_img2img_steps(p, steps=None):
+    if opts.img2img_fix_steps or steps is not None:
+        requested_steps = (steps or p.steps)
+        steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
+        t_enc = requested_steps - 1
+    else:
+        steps = p.steps
+        t_enc = int(min(p.denoising_strength, 0.999) * steps)
+
+    return steps, t_enc
+
+
+approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2}
+
+
+def single_sample_to_image(sample, approximation=None):
+    if approximation is None:
+        approximation = approximation_indexes.get(opts.show_progress_type, 0)
+
+    if approximation == 2:
+        x_sample = sd_vae_approx.cheap_approximation(sample)
+    elif approximation == 1:
+        x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
+    else:
+        x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
+
+    x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
+    x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
+    x_sample = x_sample.astype(np.uint8)
+    return Image.fromarray(x_sample)
+
+
+def sample_to_image(samples, index=0, approximation=None):
+    return single_sample_to_image(samples[index], approximation)
+
+
+def samples_to_image_grid(samples, approximation=None):
+    return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
+
+
+def store_latent(decoded):
+    state.current_latent = decoded
+
+    if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
+        if not shared.parallel_processing_allowed:
+            shared.state.assign_current_image(sample_to_image(decoded))
+
+
+class InterruptedException(BaseException):
+    pass
+
+
+# MPS fix for randn in torchsde
+# XXX move this to separate file for MPS
+def torchsde_randn(size, dtype, device, seed):
+    if device.type == 'mps':
+        generator = torch.Generator(devices.cpu).manual_seed(int(seed))
+        return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device)
+    else:
+        generator = torch.Generator(device).manual_seed(int(seed))
+        return torch.randn(size, dtype=dtype, device=device, generator=generator)
+
+
+torchsde._brownian.brownian_interval._randn = torchsde_randn
+
diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py
new file mode 100644
index 000000000..d03131cd4
--- /dev/null
+++ b/modules/sd_samplers_compvis.py
@@ -0,0 +1,160 @@
+import math
+import ldm.models.diffusion.ddim
+import ldm.models.diffusion.plms
+
+import numpy as np
+import torch
+
+from modules.shared import state
+from modules import sd_samplers_common, prompt_parser, shared
+
+
+samplers_data_compvis = [
+    sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}),
+    sd_samplers_common.SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
+]
+
+
+class VanillaStableDiffusionSampler:
+    def __init__(self, constructor, sd_model):
+        self.sampler = constructor(sd_model)
+        self.is_plms = hasattr(self.sampler, 'p_sample_plms')
+        self.orig_p_sample_ddim = self.sampler.p_sample_plms if self.is_plms else self.sampler.p_sample_ddim
+        self.mask = None
+        self.nmask = None
+        self.init_latent = None
+        self.sampler_noises = None
+        self.step = 0
+        self.stop_at = None
+        self.eta = None
+        self.config = None
+        self.last_latent = None
+
+        self.conditioning_key = sd_model.model.conditioning_key
+
+    def number_of_needed_noises(self, p):
+        return 0
+
+    def launch_sampling(self, steps, func):
+        state.sampling_steps = steps
+        state.sampling_step = 0
+
+        try:
+            return func()
+        except sd_samplers_common.InterruptedException:
+            return self.last_latent
+
+    def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
+        if state.interrupted or state.skipped:
+            raise sd_samplers_common.InterruptedException
+
+        if self.stop_at is not None and self.step > self.stop_at:
+            raise sd_samplers_common.InterruptedException
+
+        # Have to unwrap the inpainting conditioning here to perform pre-processing
+        image_conditioning = None
+        if isinstance(cond, dict):
+            image_conditioning = cond["c_concat"][0]
+            cond = cond["c_crossattn"][0]
+            unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
+
+        conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
+        unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
+
+        assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
+        cond = tensor
+
+        # for DDIM, shapes must match, we can't just process cond and uncond independently;
+        # filling unconditional_conditioning with repeats of the last vector to match length is
+        # not 100% correct but should work well enough
+        if unconditional_conditioning.shape[1] < cond.shape[1]:
+            last_vector = unconditional_conditioning[:, -1:]
+            last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
+            unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
+        elif unconditional_conditioning.shape[1] > cond.shape[1]:
+            unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
+
+        if self.mask is not None:
+            img_orig = self.sampler.model.q_sample(self.init_latent, ts)
+            x_dec = img_orig * self.mask + self.nmask * x_dec
+
+        # Wrap the image conditioning back up since the DDIM code can accept the dict directly.
+        # Note that they need to be lists because it just concatenates them later.
+        if image_conditioning is not None:
+            cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
+            unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
+
+        res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
+
+        if self.mask is not None:
+            self.last_latent = self.init_latent * self.mask + self.nmask * res[1]
+        else:
+            self.last_latent = res[1]
+
+        sd_samplers_common.store_latent(self.last_latent)
+
+        self.step += 1
+        state.sampling_step = self.step
+        shared.total_tqdm.update()
+
+        return res
+
+    def initialize(self, p):
+        self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim
+        if self.eta != 0.0:
+            p.extra_generation_params["Eta DDIM"] = self.eta
+
+        for fieldname in ['p_sample_ddim', 'p_sample_plms']:
+            if hasattr(self.sampler, fieldname):
+                setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
+
+        self.mask = p.mask if hasattr(p, 'mask') else None
+        self.nmask = p.nmask if hasattr(p, 'nmask') else None
+
+    def adjust_steps_if_invalid(self, p, num_steps):
+        if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
+            valid_step = 999 / (1000 // num_steps)
+            if valid_step == math.floor(valid_step):
+                return int(valid_step) + 1
+        
+        return num_steps
+
+    def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+        steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
+        steps = self.adjust_steps_if_invalid(p, steps)
+        self.initialize(p)
+
+        self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
+        x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
+
+        self.init_latent = x
+        self.last_latent = x
+        self.step = 0
+
+        # Wrap the conditioning models with additional image conditioning for inpainting model
+        if image_conditioning is not None:
+            conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
+            unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
+
+        samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
+
+        return samples
+
+    def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+        self.initialize(p)
+
+        self.init_latent = None
+        self.last_latent = x
+        self.step = 0
+
+        steps = self.adjust_steps_if_invalid(p, steps or p.steps)
+
+        # Wrap the conditioning models with additional image conditioning for inpainting model
+        # dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape
+        if image_conditioning is not None:
+            conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]}
+            unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]}
+
+        samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
+
+        return samples_ddim
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
new file mode 100644
index 000000000..aa7f106b3
--- /dev/null
+++ b/modules/sd_samplers_kdiffusion.py
@@ -0,0 +1,298 @@
+from collections import deque
+import torch
+import inspect
+import k_diffusion.sampling
+from modules import prompt_parser, devices, sd_samplers_common
+
+from modules.shared import opts, state
+import modules.shared as shared
+from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
+
+samplers_k_diffusion = [
+    ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}),
+    ('Euler', 'sample_euler', ['k_euler'], {}),
+    ('LMS', 'sample_lms', ['k_lms'], {}),
+    ('Heun', 'sample_heun', ['k_heun'], {}),
+    ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}),
+    ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}),
+    ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
+    ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
+    ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}),
+    ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
+    ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
+    ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
+    ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
+    ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
+    ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
+    ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
+    ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}),
+]
+
+samplers_data_k_diffusion = [
+    sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
+    for label, funcname, aliases, options in samplers_k_diffusion
+    if hasattr(k_diffusion.sampling, funcname)
+]
+
+sampler_extra_params = {
+    'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
+    'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
+    'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
+}
+
+
+class CFGDenoiser(torch.nn.Module):
+    """
+    Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet)
+    that can take a noisy picture and produce a noise-free picture using two guidances (prompts)
+    instead of one. Originally, the second prompt is just an empty string, but we use non-empty
+    negative prompt.
+    """
+
+    def __init__(self, model):
+        super().__init__()
+        self.inner_model = model
+        self.mask = None
+        self.nmask = None
+        self.init_latent = None
+        self.step = 0
+
+    def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
+        denoised_uncond = x_out[-uncond.shape[0]:]
+        denoised = torch.clone(denoised_uncond)
+
+        for i, conds in enumerate(conds_list):
+            for cond_index, weight in conds:
+                denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
+
+        return denoised
+
+    def forward(self, x, sigma, uncond, cond, cond_scale, image_cond):
+        if state.interrupted or state.skipped:
+            raise sd_samplers_common.InterruptedException
+
+        conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
+        uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
+
+        batch_size = len(conds_list)
+        repeats = [len(conds_list[i]) for i in range(batch_size)]
+
+        x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
+        image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
+        sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
+
+        denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)
+        cfg_denoiser_callback(denoiser_params)
+        x_in = denoiser_params.x
+        image_cond_in = denoiser_params.image_cond
+        sigma_in = denoiser_params.sigma
+
+        if tensor.shape[1] == uncond.shape[1]:
+            cond_in = torch.cat([tensor, uncond])
+
+            if shared.batch_cond_uncond:
+                x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
+            else:
+                x_out = torch.zeros_like(x_in)
+                for batch_offset in range(0, x_out.shape[0], batch_size):
+                    a = batch_offset
+                    b = a + batch_size
+                    x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
+        else:
+            x_out = torch.zeros_like(x_in)
+            batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
+            for batch_offset in range(0, tensor.shape[0], batch_size):
+                a = batch_offset
+                b = min(a + batch_size, tensor.shape[0])
+                x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [tensor[a:b]], "c_concat": [image_cond_in[a:b]]})
+
+            x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
+
+        devices.test_for_nans(x_out, "unet")
+
+        if opts.live_preview_content == "Prompt":
+            sd_samplers_common.store_latent(x_out[0:uncond.shape[0]])
+        elif opts.live_preview_content == "Negative prompt":
+            sd_samplers_common.store_latent(x_out[-uncond.shape[0]:])
+
+        denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
+
+        if self.mask is not None:
+            denoised = self.init_latent * self.mask + self.nmask * denoised
+
+        self.step += 1
+
+        return denoised
+
+
+class TorchHijack:
+    def __init__(self, sampler_noises):
+        # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based
+        # implementation.
+        self.sampler_noises = deque(sampler_noises)
+
+    def __getattr__(self, item):
+        if item == 'randn_like':
+            return self.randn_like
+
+        if hasattr(torch, item):
+            return getattr(torch, item)
+
+        raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
+
+    def randn_like(self, x):
+        if self.sampler_noises:
+            noise = self.sampler_noises.popleft()
+            if noise.shape == x.shape:
+                return noise
+
+        if x.device.type == 'mps':
+            return torch.randn_like(x, device=devices.cpu).to(x.device)
+        else:
+            return torch.randn_like(x)
+
+
+class KDiffusionSampler:
+    def __init__(self, funcname, sd_model):
+        denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser
+
+        self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
+        self.funcname = funcname
+        self.func = getattr(k_diffusion.sampling, self.funcname)
+        self.extra_params = sampler_extra_params.get(funcname, [])
+        self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
+        self.sampler_noises = None
+        self.stop_at = None
+        self.eta = None
+        self.config = None
+        self.last_latent = None
+
+        self.conditioning_key = sd_model.model.conditioning_key
+
+    def callback_state(self, d):
+        step = d['i']
+        latent = d["denoised"]
+        if opts.live_preview_content == "Combined":
+            sd_samplers_common.store_latent(latent)
+        self.last_latent = latent
+
+        if self.stop_at is not None and step > self.stop_at:
+            raise sd_samplers_common.InterruptedException
+
+        state.sampling_step = step
+        shared.total_tqdm.update()
+
+    def launch_sampling(self, steps, func):
+        state.sampling_steps = steps
+        state.sampling_step = 0
+
+        try:
+            return func()
+        except sd_samplers_common.InterruptedException:
+            return self.last_latent
+
+    def number_of_needed_noises(self, p):
+        return p.steps
+
+    def initialize(self, p):
+        self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
+        self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
+        self.model_wrap_cfg.step = 0
+        self.eta = p.eta if p.eta is not None else opts.eta_ancestral
+
+        k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
+
+        extra_params_kwargs = {}
+        for param_name in self.extra_params:
+            if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
+                extra_params_kwargs[param_name] = getattr(p, param_name)
+
+        if 'eta' in inspect.signature(self.func).parameters:
+            if self.eta != 1.0:
+                p.extra_generation_params["Eta"] = self.eta
+
+            extra_params_kwargs['eta'] = self.eta
+
+        return extra_params_kwargs
+
+    def get_sigmas(self, p, steps):
+        discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
+        if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
+            discard_next_to_last_sigma = True
+            p.extra_generation_params["Discard penultimate sigma"] = True
+
+        steps += 1 if discard_next_to_last_sigma else 0
+
+        if p.sampler_noise_scheduler_override:
+            sigmas = p.sampler_noise_scheduler_override(steps)
+        elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
+            sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
+
+            sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device)
+        else:
+            sigmas = self.model_wrap.get_sigmas(steps)
+
+        if discard_next_to_last_sigma:
+            sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
+
+        return sigmas
+
+    def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+        steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
+
+        sigmas = self.get_sigmas(p, steps)
+
+        sigma_sched = sigmas[steps - t_enc - 1:]
+        xi = x + noise * sigma_sched[0]
+        
+        extra_params_kwargs = self.initialize(p)
+        if 'sigma_min' in inspect.signature(self.func).parameters:
+            ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
+            extra_params_kwargs['sigma_min'] = sigma_sched[-2]
+        if 'sigma_max' in inspect.signature(self.func).parameters:
+            extra_params_kwargs['sigma_max'] = sigma_sched[0]
+        if 'n' in inspect.signature(self.func).parameters:
+            extra_params_kwargs['n'] = len(sigma_sched) - 1
+        if 'sigma_sched' in inspect.signature(self.func).parameters:
+            extra_params_kwargs['sigma_sched'] = sigma_sched
+        if 'sigmas' in inspect.signature(self.func).parameters:
+            extra_params_kwargs['sigmas'] = sigma_sched
+
+        self.model_wrap_cfg.init_latent = x
+        self.last_latent = x
+
+        samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
+            'cond': conditioning, 
+            'image_cond': image_conditioning, 
+            'uncond': unconditional_conditioning, 
+            'cond_scale': p.cfg_scale
+        }, disable=False, callback=self.callback_state, **extra_params_kwargs))
+
+        return samples
+
+    def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None):
+        steps = steps or p.steps
+
+        sigmas = self.get_sigmas(p, steps)
+
+        x = x * sigmas[0]
+
+        extra_params_kwargs = self.initialize(p)
+        if 'sigma_min' in inspect.signature(self.func).parameters:
+            extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
+            extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
+            if 'n' in inspect.signature(self.func).parameters:
+                extra_params_kwargs['n'] = steps
+        else:
+            extra_params_kwargs['sigmas'] = sigmas
+
+        self.last_latent = x
+        samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
+            'cond': conditioning, 
+            'image_cond': image_conditioning, 
+            'uncond': unconditional_conditioning, 
+            'cond_scale': p.cfg_scale
+        }, disable=False, callback=self.callback_state, **extra_params_kwargs))
+
+        return samples
+
diff --git a/modules/shared.py b/modules/shared.py
index 474fcc429..96a2572fe 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -127,12 +127,13 @@ restricted_opts = {
 ui_reorder_categories = [
     "inpaint",
     "sampler",
+    "checkboxes",
+    "hires_fix",
     "dimensions",
     "cfg",
     "seed",
-    "checkboxes",
-    "hires_fix",
     "batch",
+    "override_settings",
     "scripts",
 ]
 
@@ -346,10 +347,10 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), {
 }))
 
 options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
-    "save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
-    "grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
+    "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
+    "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
     "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
-    "directories_filename_pattern": OptionInfo("", "Directory name pattern", component_args=hide_dirs),
+    "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs),
     "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
 }))
 
@@ -405,7 +406,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
     "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
     "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
     "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
-    "extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
     "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
 }))
 
@@ -431,7 +431,9 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"),
 }))
 
 options_templates.update(options_section(('extra_networks', "Extra Networks"), {
-    "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, { "choices": ["cards", "thumbs"] }),
+    "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, {"choices": ["cards", "thumbs"]}),
+    "extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+    "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": [""] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
 }))
 
 options_templates.update(options_section(('ui', "User interface"), {
@@ -439,7 +441,7 @@ options_templates.update(options_section(('ui', "User interface"), {
     "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
     "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
     "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
-    "disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
+    "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
     "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
     "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
     "font": OptionInfo("", "Font for image grids that have text"),
@@ -604,11 +606,37 @@ class Options:
 
         self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
 
+    def cast_value(self, key, value):
+        """casts an arbitrary to the same type as this setting's value with key
+        Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
+        """
+
+        if value is None:
+            return None
+
+        default_value = self.data_labels[key].default
+        if default_value is None:
+            default_value = getattr(self, key, None)
+        if default_value is None:
+            return None
+
+        expected_type = type(default_value)
+        if expected_type == bool and value == "False":
+            value = False
+        else:
+            value = expected_type(value)
+
+        return value
+
+
 
 opts = Options()
 if os.path.exists(config_filename):
     opts.load(config_filename)
 
+settings_components = None
+"""assinged from ui.py, a mapping on setting anmes to gradio components repsponsible for those settings"""
+
 latent_upscale_default_mode = "Latent"
 latent_upscale_modes = {
     "Latent": {"mode": "bilinear", "antialias": False},
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 6cf00e65d..a1a406c22 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -112,6 +112,7 @@ class EmbeddingDatabase:
         self.skipped_embeddings = {}
         self.expected_shape = -1
         self.embedding_dirs = {}
+        self.previously_displayed_embeddings = ()
 
     def add_embedding_dir(self, path):
         self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path)
@@ -228,9 +229,12 @@ class EmbeddingDatabase:
             self.load_from_dir(embdir)
             embdir.update()
 
-        print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
-        if len(self.skipped_embeddings) > 0:
-            print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}")
+        displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys()))
+        if self.previously_displayed_embeddings != displayed_embeddings:
+            self.previously_displayed_embeddings = displayed_embeddings
+            print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
+            if len(self.skipped_embeddings) > 0:
+                print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}")
 
     def find_embedding_at_position(self, tokens, offset):
         token = tokens[offset]
diff --git a/modules/txt2img.py b/modules/txt2img.py
index e945fd698..16841d0f2 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -1,5 +1,6 @@
 import modules.scripts
 from modules import sd_samplers
+from modules.generation_parameters_copypaste import create_override_settings_dict
 from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \
     StableDiffusionProcessingImg2Img, process_images
 from modules.shared import opts, cmd_opts
@@ -8,7 +9,9 @@ import modules.processing as processing
 from modules.ui import plaintext_to_html
 
 
-def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, *args):
+def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args):
+    override_settings = create_override_settings_dict(override_settings_texts)
+
     p = StableDiffusionProcessingTxt2Img(
         sd_model=shared.sd_model,
         outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
@@ -38,6 +41,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
         hr_second_pass_steps=hr_second_pass_steps,
         hr_resize_x=hr_resize_x,
         hr_resize_y=hr_resize_y,
+        override_settings=override_settings,
     )
 
     p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/ui.py b/modules/ui.py
index 9f4cfda1a..f910c5823 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -380,6 +380,7 @@ def apply_setting(key, value):
     opts.save(shared.config_filename)
     return getattr(opts, key)
 
+
 def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
     def refresh():
         refresh_method()
@@ -433,6 +434,18 @@ def get_value_for_setting(key):
     return gr.update(value=value, **args)
 
 
+def create_override_settings_dropdown(tabname, row):
+    dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True)
+
+    dropdown.change(
+        fn=lambda x: gr.Dropdown.update(visible=len(x) > 0),
+        inputs=[dropdown],
+        outputs=[dropdown],
+    )
+
+    return dropdown
+
+
 def create_ui():
     import modules.img2img
     import modules.txt2img
@@ -503,6 +516,10 @@ def create_ui():
                                 batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
                                 batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
 
+                    elif category == "override_settings":
+                        with FormRow(elem_id="txt2img_override_settings_row") as row:
+                            override_settings = create_override_settings_dropdown('txt2img', row)
+
                     elif category == "scripts":
                         with FormGroup(elem_id="txt2img_script_container"):
                             custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
@@ -524,7 +541,6 @@ def create_ui():
                 )
 
             txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
-            parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt)
 
             connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
             connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
@@ -555,6 +571,7 @@ def create_ui():
                     hr_second_pass_steps,
                     hr_resize_x,
                     hr_resize_y,
+                    override_settings,
                 ] + custom_inputs,
 
                 outputs=[
@@ -615,6 +632,9 @@ def create_ui():
                 *modules.scripts.scripts_txt2img.infotext_fields
             ]
             parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields)
+            parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
+                paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None, override_settings_component=override_settings,
+            ))
 
             txt2img_preview_params = [
                 txt2img_prompt,
@@ -762,6 +782,10 @@ def create_ui():
                                 batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
                                 batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
 
+                    elif category == "override_settings":
+                        with FormRow(elem_id="img2img_override_settings_row") as row:
+                            override_settings = create_override_settings_dropdown('img2img', row)
+
                     elif category == "scripts":
                         with FormGroup(elem_id="img2img_script_container"):
                             custom_inputs = modules.scripts.scripts_img2img.setup_ui()
@@ -796,7 +820,6 @@ def create_ui():
                                 )
 
             img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
-            parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt)
 
             connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
             connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
@@ -849,7 +872,8 @@ def create_ui():
                     inpainting_mask_invert,
                     img2img_batch_input_dir,
                     img2img_batch_output_dir,
-                    img2img_batch_inpaint_mask_dir
+                    img2img_batch_inpaint_mask_dir,
+                    override_settings,
                 ] + custom_inputs,
                 outputs=[
                     img2img_gallery,
@@ -937,6 +961,9 @@ def create_ui():
             ]
             parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields)
             parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields)
+            parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
+                paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None, override_settings_component=override_settings,
+            ))
 
     modules.scripts.scripts_current = None
 
@@ -954,7 +981,11 @@ def create_ui():
                 html2 = gr.HTML()
                 with gr.Row():
                     buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
-                parameters_copypaste.bind_buttons(buttons, image, generation_info)
+
+                for tabname, button in buttons.items():
+                    parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
+                        paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image,
+                    ))
 
         image.change(
             fn=wrap_gradio_call(modules.extras.run_pnginfo),
@@ -1363,6 +1394,7 @@ def create_ui():
 
     components = []
     component_dict = {}
+    shared.settings_components = component_dict
 
     script_callbacks.ui_settings_callback()
     opts.reorder()
@@ -1529,8 +1561,7 @@ def create_ui():
                 component = create_setting_component(k, is_quicksettings=True)
                 component_dict[k] = component
 
-        parameters_copypaste.integrate_settings_paste_fields(component_dict)
-        parameters_copypaste.run_bind()
+        parameters_copypaste.connect_paste_params_buttons()
 
         with gr.Tabs(elem_id="tabs") as tabs:
             for interface, label, ifid in interfaces:
@@ -1560,6 +1591,14 @@ def create_ui():
                 outputs=[component, text_settings],
             )
 
+        button_set_checkpoint = gr.Button('Change checkpoint', elem_id='change_checkpoint', visible=False)
+        button_set_checkpoint.click(
+            fn=lambda value, _: run_settings_single(value, key='sd_model_checkpoint'),
+            _js="function(v){ var res = desiredCheckpointName; desiredCheckpointName = ''; return [res || v, null]; }",
+            inputs=[component_dict['sd_model_checkpoint'], dummy_component],
+            outputs=[component_dict['sd_model_checkpoint'], text_settings],
+        )
+
         component_keys = [k for k in opts.data_labels.keys() if k in component_dict]
 
         def get_settings_values():
@@ -1692,14 +1731,14 @@ def create_ui():
 
 
 def reload_javascript():
-    head = f'<script type="text/javascript" src="file={os.path.abspath("script.js")}"></script>\n'
+    head = f'<script type="text/javascript" src="file={os.path.abspath("script.js")}?{os.path.getmtime("script.js")}"></script>\n'
 
     inline = f"{localization.localization_js(shared.opts.localization)};"
     if cmd_opts.theme is not None:
         inline += f"set_theme('{cmd_opts.theme}');"
 
     for script in modules.scripts.list_scripts("javascript", ".js"):
-        head += f'<script type="text/javascript" src="file={script.path}"></script>\n'
+        head += f'<script type="text/javascript" src="file={script.path}?{os.path.getmtime(script.path)}"></script>\n'
 
     head += f'<script type="text/javascript">{inline}</script>\n'
 
diff --git a/modules/ui_common.py b/modules/ui_common.py
index 9405ac1f6..fd047f318 100644
--- a/modules/ui_common.py
+++ b/modules/ui_common.py
@@ -198,5 +198,9 @@ Requested path was: {f}
                 html_info = gr.HTML(elem_id=f'html_info_{tabname}')
                 html_log = gr.HTML(elem_id=f'html_log_{tabname}')
 
-            parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None)
+            for paste_tabname, paste_button in buttons.items():
+                parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
+                    paste_button=paste_button, tabname=paste_tabname, source_tabname="txt2img" if tabname == "txt2img" else None, source_image_component=result_gallery
+                ))
+
             return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index 66a418651..37d30e1f2 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -13,7 +13,7 @@ import shutil
 import errno
 
 from modules import extensions, shared, paths
-
+from modules.call_queue import wrap_gradio_gpu_call
 
 available_extensions = {"extensions": []}
 
@@ -50,12 +50,17 @@ def apply_and_restart(disable_list, update_list):
     shared.state.need_restart = True
 
 
-def check_updates():
+def check_updates(id_task, disable_list):
     check_access()
 
-    for ext in extensions.extensions:
-        if ext.remote is None:
-            continue
+    disabled = json.loads(disable_list)
+    assert type(disabled) == list, f"wrong disable_list data for apply_and_restart: {disable_list}"
+
+    exts = [ext for ext in extensions.extensions if ext.remote is not None and ext.name not in disabled]
+    shared.state.job_count = len(exts)
+
+    for ext in exts:
+        shared.state.textinfo = ext.name
 
         try:
             ext.check_updates()
@@ -63,7 +68,9 @@ def check_updates():
             print(f"Error checking updates for {ext.name}:", file=sys.stderr)
             print(traceback.format_exc(), file=sys.stderr)
 
-    return extension_table()
+        shared.state.nextjob()
+
+    return extension_table(), ""
 
 
 def extension_table():
@@ -273,12 +280,13 @@ def create_ui():
         with gr.Tabs(elem_id="tabs_extensions") as tabs:
             with gr.TabItem("Installed"):
 
-                with gr.Row():
+                with gr.Row(elem_id="extensions_installed_top"):
                     apply = gr.Button(value="Apply and restart UI", variant="primary")
                     check = gr.Button(value="Check for updates")
                     extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False)
                     extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False)
 
+                info = gr.HTML()
                 extensions_table = gr.HTML(lambda: extension_table())
 
                 apply.click(
@@ -289,10 +297,10 @@ def create_ui():
                 )
 
                 check.click(
-                    fn=check_updates,
+                    fn=wrap_gradio_gpu_call(check_updates, extra_outputs=[gr.update()]),
                     _js="extensions_check",
-                    inputs=[],
-                    outputs=[extensions_table],
+                    inputs=[info, extensions_disabled_list],
+                    outputs=[extensions_table, info],
                 )
 
             with gr.TabItem("Available"):
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py
index c6ff889a8..833679680 100644
--- a/modules/ui_extra_networks.py
+++ b/modules/ui_extra_networks.py
@@ -1,4 +1,7 @@
+import glob
 import os.path
+import urllib.parse
+from pathlib import Path
 
 from modules import shared
 import gradio as gr
@@ -8,12 +11,31 @@ import html
 from modules.generation_parameters_copypaste import image_from_url_text
 
 extra_pages = []
+allowed_dirs = set()
 
 
 def register_page(page):
     """registers extra networks page for the UI; recommend doing it in on_before_ui() callback for extensions"""
 
     extra_pages.append(page)
+    allowed_dirs.clear()
+    allowed_dirs.update(set(sum([x.allowed_directories_for_previews() for x in extra_pages], [])))
+
+
+def add_pages_to_demo(app):
+    def fetch_file(filename: str = ""):
+        from starlette.responses import FileResponse
+
+        if not any([Path(x).resolve() in Path(filename).resolve().parents for x in allowed_dirs]):
+            raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
+
+        if os.path.splitext(filename)[1].lower() != ".png":
+            raise ValueError(f"File cannot be fetched: {filename}. Only png.")
+
+        # would profit from returning 304
+        return FileResponse(filename, headers={"Accept-Ranges": "bytes"})
+
+    app.add_api_route("/sd_extra_networks/thumb", fetch_file, methods=["GET"])
 
 
 class ExtraNetworksPage:
@@ -26,10 +48,44 @@ class ExtraNetworksPage:
     def refresh(self):
         pass
 
+    def link_preview(self, filename):
+        return "./sd_extra_networks/thumb?filename=" + urllib.parse.quote(filename.replace('\\', '/')) + "&mtime=" + str(os.path.getmtime(filename))
+
+    def search_terms_from_path(self, filename, possible_directories=None):
+        abspath = os.path.abspath(filename)
+
+        for parentdir in (possible_directories if possible_directories is not None else self.allowed_directories_for_previews()):
+            parentdir = os.path.abspath(parentdir)
+            if abspath.startswith(parentdir):
+                return abspath[len(parentdir):].replace('\\', '/')
+
+        return ""
+
     def create_html(self, tabname):
         view = shared.opts.extra_networks_default_view
         items_html = ''
 
+        subdirs = {}
+        for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]:
+            for x in glob.glob(os.path.join(parentdir, '**/*'), recursive=True):
+                if not os.path.isdir(x):
+                    continue
+
+                subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/")
+                while subdir.startswith("/"):
+                    subdir = subdir[1:]
+
+                subdirs[subdir] = 1
+
+        if subdirs:
+            subdirs = {"": 1, **subdirs}
+
+        subdirs_html = "".join([f"""
+<button class='gr-button gr-button-lg gr-button-secondary{" search-all" if subdir=="" else ""}' onclick='extraNetworksSearchButton("{tabname}_extra_tabs", event)'>
+{html.escape(subdir if subdir!="" else "all")}
+</button>
+""" for subdir in subdirs])
+
         for item in self.list_items():
             items_html += self.create_html_for_item(item, tabname)
 
@@ -38,6 +94,9 @@ class ExtraNetworksPage:
             items_html = shared.html("extra-networks-no-cards.html").format(dirs=dirs)
 
         res = f"""
+<div id='{tabname}_{self.name}_subdirs' class='extra-network-subdirs extra-network-subdirs-{view}'>
+{subdirs_html}
+</div>
 <div id='{tabname}_{self.name}_cards' class='extra-network-{view}'>
 {items_html}
 </div>
@@ -54,14 +113,19 @@ class ExtraNetworksPage:
     def create_html_for_item(self, item, tabname):
         preview = item.get("preview", None)
 
+        onclick = item.get("onclick", None)
+        if onclick is None:
+            onclick = '"' + html.escape(f"""return cardClicked({json.dumps(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
+
         args = {
             "preview_html": "style='background-image: url(\"" + html.escape(preview) + "\")'" if preview else '',
-            "prompt": item["prompt"],
+            "prompt": item.get("prompt", None),
             "tabname": json.dumps(tabname),
             "local_preview": json.dumps(item["local_preview"]),
             "name": item["name"],
-            "card_clicked": '"' + html.escape(f"""return cardClicked({json.dumps(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"',
+            "card_clicked": onclick,
             "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"',
+            "search_term": item.get("search_term", ""),
         }
 
         return self.card_page.format(**args)
@@ -143,7 +207,7 @@ def path_is_parent(parent_path, child_path):
     parent_path = os.path.abspath(parent_path)
     child_path = os.path.abspath(child_path)
 
-    return os.path.commonpath([parent_path]) == os.path.commonpath([parent_path, child_path])
+    return child_path.startswith(parent_path)
 
 
 def setup_ui(ui, gallery):
@@ -173,7 +237,8 @@ def setup_ui(ui, gallery):
 
     ui.button_save_preview.click(
         fn=save_preview,
-        _js="function(x, y, z){console.log(x, y, z); return [selected_gallery_index(), y, z]}",
+        _js="function(x, y, z){return [selected_gallery_index(), y, z]}",
         inputs=[ui.preview_target_filename, gallery, ui.preview_target_filename],
         outputs=[*ui.pages]
     )
+
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py
new file mode 100644
index 000000000..04097a794
--- /dev/null
+++ b/modules/ui_extra_networks_checkpoints.py
@@ -0,0 +1,39 @@
+import html
+import json
+import os
+import urllib.parse
+
+from modules import shared, ui_extra_networks, sd_models
+
+
+class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
+    def __init__(self):
+        super().__init__('Checkpoints')
+
+    def refresh(self):
+        shared.refresh_checkpoints()
+
+    def list_items(self):
+        checkpoint: sd_models.CheckpointInfo
+        for name, checkpoint in sd_models.checkpoints_list.items():
+            path, ext = os.path.splitext(checkpoint.filename)
+            previews = [path + ".png", path + ".preview.png"]
+
+            preview = None
+            for file in previews:
+                if os.path.isfile(file):
+                    preview = self.link_preview(file)
+                    break
+
+            yield {
+                "name": checkpoint.name_for_extra,
+                "filename": path,
+                "preview": preview,
+                "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
+                "onclick": '"' + html.escape(f"""return selectCheckpoint({json.dumps(name)})""") + '"',
+                "local_preview": path + ".png",
+            }
+
+    def allowed_directories_for_previews(self):
+        return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
+
diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py
index 65d000cf5..578510887 100644
--- a/modules/ui_extra_networks_hypernets.py
+++ b/modules/ui_extra_networks_hypernets.py
@@ -19,13 +19,14 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
             preview = None
             for file in previews:
                 if os.path.isfile(file):
-                    preview = "./file=" + file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(file))
+                    preview = self.link_preview(file)
                     break
 
             yield {
                 "name": name,
                 "filename": path,
                 "preview": preview,
+                "search_term": self.search_terms_from_path(path),
                 "prompt": json.dumps(f"<hypernet:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
                 "local_preview": path + ".png",
             }
diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py
index dbd23d2df..bb64eb81e 100644
--- a/modules/ui_extra_networks_textual_inversion.py
+++ b/modules/ui_extra_networks_textual_inversion.py
@@ -19,12 +19,13 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
 
             preview = None
             if os.path.isfile(preview_file):
-                preview = "./file=" + preview_file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(preview_file))
+                preview = self.link_preview(preview_file)
 
             yield {
                 "name": embedding.name,
                 "filename": embedding.filename,
                 "preview": preview,
+                "search_term": self.search_terms_from_path(embedding.filename),
                 "prompt": json.dumps(embedding.name),
                 "local_preview": path + ".preview.png",
             }
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index f01160559..3df404834 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -383,6 +383,15 @@ class Script(scripts.Script):
         y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button])
         z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button])
 
+        self.infotext_fields = (
+            (x_type, "X Type"),
+            (x_values, "X Values"),
+            (y_type, "Y Type"),
+            (y_values, "Y Values"),
+            (z_type, "Z Type"),
+            (z_values, "Z Values"),
+        )
+
         return [x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds]
 
     def run(self, p, x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds):
@@ -542,6 +551,7 @@ class Script(scripts.Script):
 
             if grid_infotext[0] is None:
                 pc.extra_generation_params = copy(pc.extra_generation_params)
+                pc.extra_generation_params['Script'] = self.title()
 
                 if x_opt.label != 'Nothing':
                     pc.extra_generation_params["X Type"] = x_opt.label
diff --git a/style.css b/style.css
index dd9141040..05572f662 100644
--- a/style.css
+++ b/style.css
@@ -74,7 +74,12 @@
 #txt2img_gallery img, #img2img_gallery img{
     object-fit: scale-down;
 }
-
+#txt2img_actions_column, #img2img_actions_column {
+	margin: 0.35rem 0.75rem 0.35rem 0;	
+}
+#script_list {
+    padding: .625rem .75rem 0 .625rem;
+}
 .justify-center.overflow-x-scroll {
     justify-content: left;
 }
@@ -126,6 +131,7 @@
 
 #txt2img_actions_column, #img2img_actions_column{
     gap: 0;
+    margin-right: .75rem;
 }
 
 #txt2img_tools, #img2img_tools{
@@ -150,6 +156,7 @@
 
 #txt2img_styles_row, #img2img_styles_row{
     gap: 0.25em;
+    margin-top: 0.3em;
 }
 
 #txt2img_styles_row > button, #img2img_styles_row > button{
@@ -311,11 +318,11 @@ input[type="range"]{
 .min-h-\[6rem\] { min-height: unset !important; }
 
 .progressDiv{
-    position: absolute;
+    position: relative;
     height: 20px;
-    top: -20px;
     background: #b4c0cc;
     border-radius: 3px !important;
+    margin-bottom: -3px;
 }
 
 .dark .progressDiv{
@@ -535,7 +542,7 @@ input[type="range"]{
 }
 
 #quicksettings {
-    gap: 0.4em;
+    width: fit-content;
 }
 
 #quicksettings > div, #quicksettings > fieldset{
@@ -545,6 +552,7 @@ input[type="range"]{
     border: none;
     box-shadow: none;
     background: none;
+    margin-right: 10px;
 }
 
 #quicksettings > div > div > div > label > span {
@@ -567,7 +575,7 @@ canvas[key="mask"] {
     right: 0.5em;
     top: -0.6em;
     z-index: 400;
-    width: 8em;
+    width: 6em;
 }
 #quicksettings .gr-box > div > div > input.gr-text-input {
   top: -1.12em;
@@ -665,11 +673,27 @@ canvas[key="mask"] {
 
 #quicksettings .gr-button-tool{
     margin: 0;
+    border-color: unset;
+	background-color: unset;
 }
 
-
+#modelmerger_interp_description>p {
+    margin: 0!important;
+    text-align: center;
+}
+#modelmerger_interp_description {
+    margin: 0.35rem 0.75rem 1.23rem;
+}
 #img2img_settings > div.gr-form, #txt2img_settings > div.gr-form {
     padding-top: 0.9em;
+    padding-bottom: 0.9em;
+}
+#txt2img_settings {
+    padding-top: 1.16em;
+    padding-bottom: 0.9em;
+}
+#img2img_settings {
+    padding-bottom: 0.9em;
 }
 
 #img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form, #train_tabs div.gr-form .gr-form{
@@ -741,6 +765,7 @@ footer {
 
 .dark .gr-compact{
     background-color: rgb(31 41 55 / var(--tw-bg-opacity));
+	margin-left: 0;
 }
 
 .gr-compact{
@@ -782,7 +807,13 @@ footer {
     margin: 0.3em;
 }
 
+.extra-network-subdirs{
+    padding: 0.2em 0.35em;
+}
 
+.extra-network-subdirs button{
+    margin: 0 0.15em;
+}
 
 #txt2img_extra_networks .search, #img2img_extra_networks .search{
     display: inline-block;
@@ -925,3 +956,6 @@ footer {
     color: red;
 }
 
+[id*='_prompt_container'] > div {
+	margin: 0!important;
+}
diff --git a/webui-macos-env.sh b/webui-macos-env.sh
index fa187dd10..37cac4fb0 100644
--- a/webui-macos-env.sh
+++ b/webui-macos-env.sh
@@ -10,7 +10,7 @@ then
 fi
 
 export install_dir="$HOME"
-export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --use-cpu interrogate"
+export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate"
 export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1"
 export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git"
 export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71"
diff --git a/webui.py b/webui.py
index 41f32f5ca..0d0b83649 100644
--- a/webui.py
+++ b/webui.py
@@ -12,7 +12,7 @@ from packaging import version
 import logging
 logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
 
-from modules import import_hook, errors, extra_networks
+from modules import import_hook, errors, extra_networks, ui_extra_networks_checkpoints
 from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion
 from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call
 
@@ -119,6 +119,7 @@ def initialize():
     ui_extra_networks.intialize()
     ui_extra_networks.register_page(ui_extra_networks_textual_inversion.ExtraNetworksPageTextualInversion())
     ui_extra_networks.register_page(ui_extra_networks_hypernets.ExtraNetworksPageHypernetworks())
+    ui_extra_networks.register_page(ui_extra_networks_checkpoints.ExtraNetworksPageCheckpoints())
 
     extra_networks.initialize()
     extra_networks.register_extra_network(extra_networks_hypernet.ExtraNetworkHypernet())
@@ -227,6 +228,8 @@ def webui():
         if launch_api:
             create_api(app)
 
+        ui_extra_networks.add_pages_to_demo(app)
+
         modules.script_callbacks.app_started_callback(shared.demo, app)
 
         wait_on_server(shared.demo)
@@ -254,6 +257,7 @@ def webui():
         ui_extra_networks.intialize()
         ui_extra_networks.register_page(ui_extra_networks_textual_inversion.ExtraNetworksPageTextualInversion())
         ui_extra_networks.register_page(ui_extra_networks_hypernets.ExtraNetworksPageHypernetworks())
+        ui_extra_networks.register_page(ui_extra_networks_checkpoints.ExtraNetworksPageCheckpoints())
 
         extra_networks.initialize()
         extra_networks.register_extra_network(extra_networks_hypernet.ExtraNetworkHypernet())