diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index fa3614ff0..b6ad68307 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -137,6 +137,9 @@ def apply_refiner(sampler): if completed_ratio <= shared.opts.sd_refiner_switch_at: return False + if shared.opts.sd_refiner_checkpoint == "None": + return False + if shared.sd_model.sd_checkpoint_info.title == shared.opts.sd_refiner_checkpoint: return False diff --git a/modules/shared.py b/modules/shared.py index 2fd299048..9935d2a7c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -462,7 +462,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"), - "sd_refiner_checkpoint": OptionInfo(None, "Refiner checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints).info("switch to another model in the middle of generation"), + "sd_refiner_checkpoint": OptionInfo("None", "Refiner checkpoint", gr.Dropdown, lambda: {"choices": ["None"] + list_checkpoint_tiles()}, refresh=refresh_checkpoints).info("switch to another model in the middle of generation"), "sd_refiner_switch_at": OptionInfo(1.0, "Refiner switch at", gr.Slider, {"minimum": 0.01, "maximum": 1.0, "step": 0.01}).info("fraction of sampling steps when the swtch to refiner model should happen; 1=never, 0.5=switch in the middle of generation"), }))