Merge branch 'master' into test_resolve_conflicts

This commit is contained in:
MalumaDev 2022-10-18 17:27:30 +02:00 committed by GitHub
commit c2765c9bcd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 173 additions and 91 deletions

View File

@ -523,7 +523,6 @@ Affandi,0.7170285,nudity
Diane Arbus,0.655138,digipa-high-impact Diane Arbus,0.655138,digipa-high-impact
Joseph Ducreux,0.65247905,digipa-high-impact Joseph Ducreux,0.65247905,digipa-high-impact
Berthe Morisot,0.7165984,fineart Berthe Morisot,0.7165984,fineart
Hilma AF Klint,0.71643853,scribbles
Hilma af Klint,0.71643853,scribbles Hilma af Klint,0.71643853,scribbles
Filippino Lippi,0.7163017,fineart Filippino Lippi,0.7163017,fineart
Leonid Afremov,0.7163005,fineart Leonid Afremov,0.7163005,fineart
@ -738,14 +737,12 @@ Abraham Mignon,0.60605425,fineart
Albert Bloch,0.69573116,nudity Albert Bloch,0.69573116,nudity
Charles Dana Gibson,0.67155975,fineart Charles Dana Gibson,0.67155975,fineart
Alexandre-Évariste Fragonard,0.6507174,fineart Alexandre-Évariste Fragonard,0.6507174,fineart
Alexandre-Évariste Fragonard,0.6507174,fineart
Ernst Fuchs,0.6953538,nudity Ernst Fuchs,0.6953538,nudity
Alfredo Jaar,0.6952965,digipa-high-impact Alfredo Jaar,0.6952965,digipa-high-impact
Judy Chicago,0.6952246,weird Judy Chicago,0.6952246,weird
Frans van Mieris the Younger,0.6951849,fineart Frans van Mieris the Younger,0.6951849,fineart
Aertgen van Leyden,0.6951305,fineart Aertgen van Leyden,0.6951305,fineart
Emily Carr,0.69512105,fineart Emily Carr,0.69512105,fineart
Frances Macdonald,0.6950408,scribbles
Frances MacDonald,0.6950408,scribbles Frances MacDonald,0.6950408,scribbles
Hannah Höch,0.69495845,scribbles Hannah Höch,0.69495845,scribbles
Gillis Rombouts,0.58770025,fineart Gillis Rombouts,0.58770025,fineart
@ -895,7 +892,6 @@ Richard McGuire,0.6820089,scribbles
Anni Albers,0.65708244,digipa-high-impact Anni Albers,0.65708244,digipa-high-impact
Aleksey Savrasov,0.65207493,fineart Aleksey Savrasov,0.65207493,fineart
Wayne Barlowe,0.6537874,fineart Wayne Barlowe,0.6537874,fineart
Giorgio De Chirico,0.6815907,fineart
Giorgio de Chirico,0.6815907,fineart Giorgio de Chirico,0.6815907,fineart
Ernest Procter,0.6815795,fineart Ernest Procter,0.6815795,fineart
Adriaen Brouwer,0.6815058,fineart Adriaen Brouwer,0.6815058,fineart
@ -1241,7 +1237,6 @@ Betty Churcher,0.65387225,fineart
Claes Corneliszoon Moeyaert,0.65386075,fineart Claes Corneliszoon Moeyaert,0.65386075,fineart
David Bomberg,0.6537477,fineart David Bomberg,0.6537477,fineart
Abraham Bosschaert,0.6535562,fineart Abraham Bosschaert,0.6535562,fineart
Giuseppe De Nittis,0.65354455,fineart
Giuseppe de Nittis,0.65354455,fineart Giuseppe de Nittis,0.65354455,fineart
John La Farge,0.65342575,fineart John La Farge,0.65342575,fineart
Frits Thaulow,0.65341854,fineart Frits Thaulow,0.65341854,fineart
@ -1522,7 +1517,6 @@ Gertrude Harvey,0.5903887,fineart
Grant Wood,0.6266253,fineart Grant Wood,0.6266253,fineart
Fyodor Vasilyev,0.5234919,digipa-med-impact Fyodor Vasilyev,0.5234919,digipa-med-impact
Cagnaccio di San Pietro,0.6261671,fineart Cagnaccio di San Pietro,0.6261671,fineart
Cagnaccio Di San Pietro,0.6261671,fineart
Doris Boulton-Maude,0.62593174,fineart Doris Boulton-Maude,0.62593174,fineart
Adolf Hirémy-Hirschl,0.5946784,fineart Adolf Hirémy-Hirschl,0.5946784,fineart
Harold von Schmidt,0.6256755,fineart Harold von Schmidt,0.6256755,fineart
@ -2411,7 +2405,6 @@ Hermann Feierabend,0.5346168,digipa-high-impact
Antonio Donghi,0.4610982,digipa-low-impact Antonio Donghi,0.4610982,digipa-low-impact
Adonna Khare,0.4858036,digipa-med-impact Adonna Khare,0.4858036,digipa-med-impact
James Stokoe,0.5015107,digipa-med-impact James Stokoe,0.5015107,digipa-med-impact
Art & Language,0.5341332,digipa-high-impact
Agustín Fernández,0.53403986,fineart Agustín Fernández,0.53403986,fineart
Germán Londoño,0.5338712,fineart Germán Londoño,0.5338712,fineart
Emmanuelle Moureaux,0.5335641,digipa-high-impact Emmanuelle Moureaux,0.5335641,digipa-high-impact

1 artist score category
523 Diane Arbus 0.655138 digipa-high-impact
524 Joseph Ducreux 0.65247905 digipa-high-impact
525 Berthe Morisot 0.7165984 fineart
Hilma AF Klint 0.71643853 scribbles
526 Hilma af Klint 0.71643853 scribbles
527 Filippino Lippi 0.7163017 fineart
528 Leonid Afremov 0.7163005 fineart
737 Albert Bloch 0.69573116 nudity
738 Charles Dana Gibson 0.67155975 fineart
739 Alexandre-Évariste Fragonard 0.6507174 fineart
Alexandre-Évariste Fragonard 0.6507174 fineart
740 Ernst Fuchs 0.6953538 nudity
741 Alfredo Jaar 0.6952965 digipa-high-impact
742 Judy Chicago 0.6952246 weird
743 Frans van Mieris the Younger 0.6951849 fineart
744 Aertgen van Leyden 0.6951305 fineart
745 Emily Carr 0.69512105 fineart
Frances Macdonald 0.6950408 scribbles
746 Frances MacDonald 0.6950408 scribbles
747 Hannah Höch 0.69495845 scribbles
748 Gillis Rombouts 0.58770025 fineart
892 Anni Albers 0.65708244 digipa-high-impact
893 Aleksey Savrasov 0.65207493 fineart
894 Wayne Barlowe 0.6537874 fineart
Giorgio De Chirico 0.6815907 fineart
895 Giorgio de Chirico 0.6815907 fineart
896 Ernest Procter 0.6815795 fineart
897 Adriaen Brouwer 0.6815058 fineart
1237 Claes Corneliszoon Moeyaert 0.65386075 fineart
1238 David Bomberg 0.6537477 fineart
1239 Abraham Bosschaert 0.6535562 fineart
Giuseppe De Nittis 0.65354455 fineart
1240 Giuseppe de Nittis 0.65354455 fineart
1241 John La Farge 0.65342575 fineart
1242 Frits Thaulow 0.65341854 fineart
1517 Grant Wood 0.6266253 fineart
1518 Fyodor Vasilyev 0.5234919 digipa-med-impact
1519 Cagnaccio di San Pietro 0.6261671 fineart
Cagnaccio Di San Pietro 0.6261671 fineart
1520 Doris Boulton-Maude 0.62593174 fineart
1521 Adolf Hirémy-Hirschl 0.5946784 fineart
1522 Harold von Schmidt 0.6256755 fineart
2405 Antonio Donghi 0.4610982 digipa-low-impact
2406 Adonna Khare 0.4858036 digipa-med-impact
2407 James Stokoe 0.5015107 digipa-med-impact
Art & Language 0.5341332 digipa-high-impact
2408 Agustín Fernández 0.53403986 fineart
2409 Germán Londoño 0.5338712 fineart
2410 Emmanuelle Moureaux 0.5335641 digipa-high-impact

View File

@ -9,9 +9,38 @@ addEventListener('keydown', (event) => {
let minus = "ArrowDown" let minus = "ArrowDown"
if (event.key != plus && event.key != minus) return; if (event.key != plus && event.key != minus) return;
selectionStart = target.selectionStart; let selectionStart = target.selectionStart;
selectionEnd = target.selectionEnd; let selectionEnd = target.selectionEnd;
if(selectionStart == selectionEnd) return; // If the user hasn't selected anything, let's select their current parenthesis block
if (selectionStart === selectionEnd) {
// Find opening parenthesis around current cursor
const before = target.value.substring(0, selectionStart);
let beforeParen = before.lastIndexOf("(");
if (beforeParen == -1) return;
let beforeParenClose = before.lastIndexOf(")");
while (beforeParenClose !== -1 && beforeParenClose > beforeParen) {
beforeParen = before.lastIndexOf("(", beforeParen - 1);
beforeParenClose = before.lastIndexOf(")", beforeParenClose - 1);
}
// Find closing parenthesis around current cursor
const after = target.value.substring(selectionStart);
let afterParen = after.indexOf(")");
if (afterParen == -1) return;
let afterParenOpen = after.indexOf("(");
while (afterParenOpen !== -1 && afterParen > afterParenOpen) {
afterParen = after.indexOf(")", afterParen + 1);
afterParenOpen = after.indexOf("(", afterParenOpen + 1);
}
if (beforeParen === -1 || afterParen === -1) return;
// Set the selection to the text between the parenthesis
const parenContent = target.value.substring(beforeParen + 1, selectionStart + afterParen);
const lastColon = parenContent.lastIndexOf(":");
selectionStart = beforeParen + 1;
selectionEnd = selectionStart + lastColon;
target.setSelectionRange(selectionStart, selectionEnd);
}
event.preventDefault(); event.preventDefault();

View File

@ -1,5 +1,12 @@
// various functions for interation with ui.py not large enough to warrant putting them in separate files // various functions for interation with ui.py not large enough to warrant putting them in separate files
function set_theme(theme){
gradioURL = window.location.href
if (!gradioURL.includes('?__theme=')) {
window.location.replace(gradioURL + '?__theme=' + theme);
}
}
function selected_gallery_index(){ function selected_gallery_index(){
var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem .gallery-item') var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem .gallery-item')
var button = gradioApp().querySelector('[style="display: block;"].tabitem .gallery-item.\\!ring-2') var button = gradioApp().querySelector('[style="display: block;"].tabitem .gallery-item.\\!ring-2')

View File

@ -87,6 +87,23 @@ def git_clone(url, dir, name, commithash=None):
run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def version_check(commit):
try:
import requests
commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
if commit != "<none>" and commits['commit']['sha'] != commit:
print("--------------------------------------------------------")
print("| You are not up to date with the most recent release. |")
print("| Consider running `git pull` to update. |")
print("--------------------------------------------------------")
elif commits['commit']['sha'] == commit:
print("You are up to date with the most recent release.")
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("versipm check failed",e)
def prepare_enviroment(): def prepare_enviroment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113") torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
@ -110,13 +127,14 @@ def prepare_enviroment():
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
args = shlex.split(commandline_args) sys.argv += shlex.split(commandline_args)
args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test') sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
args, reinstall_xformers = extract_arg(args, '--reinstall-xformers') sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
xformers = '--xformers' in args sys.argv, update_check = extract_arg(sys.argv, '--update-check')
deepdanbooru = '--deepdanbooru' in args xformers = '--xformers' in sys.argv
ngrok = '--ngrok' in args deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv
try: try:
commit = run(f"{git} rev-parse HEAD").strip() commit = run(f"{git} rev-parse HEAD").strip()
@ -163,9 +181,10 @@ def prepare_enviroment():
run_pip(f"install -r {requirements_file}", "requirements for Web UI") run_pip(f"install -r {requirements_file}", "requirements for Web UI")
sys.argv += args if update_check:
version_check(commit)
if "--exit" in args: if "--exit" in sys.argv:
print("Exiting because of --exit argument") print("Exiting because of --exit argument")
exit(0) exit(0)

View File

@ -157,8 +157,7 @@ def get_deepbooru_tags_from_model(model, tags, pil_image, threshold, deepbooru_o
# sort by reverse by likelihood and normal for alpha, and format tag text as requested # sort by reverse by likelihood and normal for alpha, and format tag text as requested
unsorted_tags_in_theshold.sort(key=lambda y: y[sort_ndx], reverse=(not alpha_sort)) unsorted_tags_in_theshold.sort(key=lambda y: y[sort_ndx], reverse=(not alpha_sort))
for weight, tag in unsorted_tags_in_theshold: for weight, tag in unsorted_tags_in_theshold:
# note: tag_outformat will still have a colon if include_ranks is True tag_outformat = tag
tag_outformat = tag.replace(':', ' ')
if use_spaces: if use_spaces:
tag_outformat = tag_outformat.replace('_', ' ') tag_outformat = tag_outformat.replace('_', ' ')
if use_escape: if use_escape:

View File

@ -216,8 +216,11 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam
if theta_func1: if theta_func1:
for key in tqdm.tqdm(theta_1.keys()): for key in tqdm.tqdm(theta_1.keys()):
if 'model' in key: if 'model' in key:
t2 = theta_2.get(key, torch.zeros_like(theta_1[key])) if key in theta_2:
theta_1[key] = theta_func1(theta_1[key], t2) t2 = theta_2.get(key, torch.zeros_like(theta_1[key]))
theta_1[key] = theta_func1(theta_1[key], t2)
else:
theta_1[key] = torch.zeros_like(theta_1[key])
del theta_2, teritary_model del theta_2, teritary_model
for key in tqdm.tqdm(theta_0.keys()): for key in tqdm.tqdm(theta_0.keys()):

View File

@ -419,11 +419,6 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds,
subseed_strength=p.subseed_strength) subseed_strength=p.subseed_strength)
if state.interrupted or state.skipped:
# if we are interrupted, sample returns just noise
# use the image collected previously in sampler loop
samples_ddim = shared.state.current_latent
samples_ddim = samples_ddim.to(devices.dtype_vae) samples_ddim = samples_ddim.to(devices.dtype_vae)
x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim) x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)

View File

@ -96,6 +96,7 @@ def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
class ScriptRunner: class ScriptRunner:
def __init__(self): def __init__(self):
self.scripts = [] self.scripts = []
self.titles = []
def setup_ui(self, is_img2img): def setup_ui(self, is_img2img):
for script_class, path in scripts_data: for script_class, path in scripts_data:
@ -107,9 +108,10 @@ class ScriptRunner:
self.scripts.append(script) self.scripts.append(script)
titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.scripts] self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.scripts]
dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index") dropdown = gr.Dropdown(label="Script", choices=["None"] + self.titles, value="None", type="index")
dropdown.save_to_config = True
inputs = [dropdown] inputs = [dropdown]
for script in self.scripts: for script in self.scripts:
@ -139,6 +141,15 @@ class ScriptRunner:
return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))] return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
def init_field(title):
if title == 'None':
return
script_index = self.titles.index(title)
script = self.scripts[script_index]
for i in range(script.args_from, script.args_to):
inputs[i].visible = True
dropdown.init_field = init_field
dropdown.change( dropdown.change(
fn=select_script, fn=select_script,
inputs=[dropdown], inputs=[dropdown],

View File

@ -296,10 +296,16 @@ def xformers_attnblock_forward(self, x):
try: try:
h_ = x h_ = x
h_ = self.norm(h_) h_ = self.norm(h_)
q1 = self.q(h_).contiguous() q = self.q(h_)
k1 = self.k(h_).contiguous() k = self.k(h_)
v = self.v(h_).contiguous() v = self.v(h_)
out = xformers.ops.memory_efficient_attention(q1, k1, v) b, c, h, w = q.shape
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
q = q.contiguous()
k = k.contiguous()
v = v.contiguous()
out = xformers.ops.memory_efficient_attention(q, k, v)
out = rearrange(out, 'b (h w) c -> b c h w', h=h)
out = self.proj_out(out) out = self.proj_out(out)
return x + out return x + out
except NotImplementedError: except NotImplementedError:

View File

@ -98,25 +98,8 @@ def store_latent(decoded):
shared.state.current_image = sample_to_image(decoded) shared.state.current_image = sample_to_image(decoded)
class InterruptedException(BaseException):
def extended_tdqm(sequence, *args, desc=None, **kwargs): pass
state.sampling_steps = len(sequence)
state.sampling_step = 0
seq = sequence if cmd_opts.disable_console_progressbars else tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs)
for x in seq:
if state.interrupted or state.skipped:
break
yield x
state.sampling_step += 1
shared.total_tqdm.update()
ldm.models.diffusion.ddim.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
ldm.models.diffusion.plms.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
class VanillaStableDiffusionSampler: class VanillaStableDiffusionSampler:
@ -128,14 +111,32 @@ class VanillaStableDiffusionSampler:
self.init_latent = None self.init_latent = None
self.sampler_noises = None self.sampler_noises = None
self.step = 0 self.step = 0
self.stop_at = None
self.eta = None self.eta = None
self.default_eta = 0.0 self.default_eta = 0.0
self.config = None self.config = None
self.last_latent = None
def number_of_needed_noises(self, p): def number_of_needed_noises(self, p):
return 0 return 0
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs): def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
if state.interrupted or state.skipped:
raise InterruptedException
if self.stop_at is not None and self.step > self.stop_at:
raise InterruptedException
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step) unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
@ -159,11 +160,16 @@ class VanillaStableDiffusionSampler:
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs) res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
if self.mask is not None: if self.mask is not None:
store_latent(self.init_latent * self.mask + self.nmask * res[1]) self.last_latent = self.init_latent * self.mask + self.nmask * res[1]
else: else:
store_latent(res[1]) self.last_latent = res[1]
store_latent(self.last_latent)
self.step += 1 self.step += 1
state.sampling_step = self.step
shared.total_tqdm.update()
return res return res
def initialize(self, p): def initialize(self, p):
@ -192,7 +198,7 @@ class VanillaStableDiffusionSampler:
self.init_latent = x self.init_latent = x
self.step = 0 self.step = 0
samples = self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning) samples = self.launch_sampling(steps, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples return samples
@ -206,9 +212,9 @@ class VanillaStableDiffusionSampler:
# existing code fails with certain step counts, like 9 # existing code fails with certain step counts, like 9
try: try:
samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta) samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
except Exception: except Exception:
samples_ddim, _ = self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta) samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim return samples_ddim
@ -223,6 +229,9 @@ class CFGDenoiser(torch.nn.Module):
self.step = 0 self.step = 0
def forward(self, x, sigma, uncond, cond, cond_scale): def forward(self, x, sigma, uncond, cond, cond_scale):
if state.interrupted or state.skipped:
raise InterruptedException
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
@ -268,25 +277,6 @@ class CFGDenoiser(torch.nn.Module):
return denoised return denoised
def extended_trange(sampler, count, *args, **kwargs):
state.sampling_steps = count
state.sampling_step = 0
seq = range(count) if cmd_opts.disable_console_progressbars else tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs)
for x in seq:
if state.interrupted or state.skipped:
break
if sampler.stop_at is not None and x > sampler.stop_at:
break
yield x
state.sampling_step += 1
shared.total_tqdm.update()
class TorchHijack: class TorchHijack:
def __init__(self, kdiff_sampler): def __init__(self, kdiff_sampler):
self.kdiff_sampler = kdiff_sampler self.kdiff_sampler = kdiff_sampler
@ -314,9 +304,28 @@ class KDiffusionSampler:
self.eta = None self.eta = None
self.default_eta = 1.0 self.default_eta = 1.0
self.config = None self.config = None
self.last_latent = None
def callback_state(self, d): def callback_state(self, d):
store_latent(d["denoised"]) step = d['i']
latent = d["denoised"]
store_latent(latent)
self.last_latent = latent
if self.stop_at is not None and step > self.stop_at:
raise InterruptedException
state.sampling_step = step
shared.total_tqdm.update()
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def number_of_needed_noises(self, p): def number_of_needed_noises(self, p):
return p.steps return p.steps
@ -339,9 +348,6 @@ class KDiffusionSampler:
self.sampler_noise_index = 0 self.sampler_noise_index = 0
self.eta = p.eta or opts.eta_ancestral self.eta = p.eta or opts.eta_ancestral
if hasattr(k_diffusion.sampling, 'trange'):
k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(self, *args, **kwargs)
if self.sampler_noises is not None: if self.sampler_noises is not None:
k_diffusion.sampling.torch = TorchHijack(self) k_diffusion.sampling.torch = TorchHijack(self)
@ -383,8 +389,9 @@ class KDiffusionSampler:
self.model_wrap_cfg.init_latent = x self.model_wrap_cfg.init_latent = x
return self.func(self.model_wrap_cfg, xi, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs) samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None): def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):
steps = steps or p.steps steps = steps or p.steps
@ -406,6 +413,8 @@ class KDiffusionSampler:
extra_params_kwargs['n'] = steps extra_params_kwargs['n'] = steps
else: else:
extra_params_kwargs['sigmas'] = sigmas extra_params_kwargs['sigmas'] = sigmas
samples = self.func(self.model_wrap_cfg, x, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples return samples

View File

@ -73,13 +73,13 @@ parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image upload
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv')) parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False) parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None) parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
cmd_opts = parser.parse_args() cmd_opts = parser.parse_args()
restricted_opts = [ restricted_opts = [
"samples_filename_pattern", "samples_filename_pattern",
@ -308,6 +308,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(False, "Add model name to generation information"), "add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
"font": OptionInfo("", "Font for image grids that have text"), "font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),

View File

@ -45,7 +45,7 @@ class StyleDatabase:
if not os.path.exists(path): if not os.path.exists(path):
return return
with open(path, "r", encoding="utf8", newline='') as file: with open(path, "r", encoding="utf-8-sig", newline='') as file:
reader = csv.DictReader(file) reader = csv.DictReader(file)
for row in reader: for row in reader:
# Support loading old CSV format with "name, text"-columns # Support loading old CSV format with "name, text"-columns
@ -79,7 +79,7 @@ class StyleDatabase:
def save_styles(self, path: str) -> None: def save_styles(self, path: str) -> None:
# Write to temporary file first, so we don't nuke the file if something goes wrong # Write to temporary file first, so we don't nuke the file if something goes wrong
fd, temp_path = tempfile.mkstemp(".csv") fd, temp_path = tempfile.mkstemp(".csv")
with os.fdopen(fd, "w", encoding="utf8", newline='') as file: with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file:
# _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple,
# and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict()
writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) writer = csv.DictWriter(file, fieldnames=PromptStyle._fields)

View File

@ -547,6 +547,10 @@ def apply_setting(key, value):
if value is None: if value is None:
return gr.update() return gr.update()
# dont allow model to be swapped when model hash exists in prompt
if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap:
return gr.update()
if key == "sd_model_checkpoint": if key == "sd_model_checkpoint":
ckpt_info = sd_models.get_closet_checkpoint_match(value) ckpt_info = sd_models.get_closet_checkpoint_match(value)
@ -1809,7 +1813,7 @@ Requested path was: {f}
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
def loadsave(path, x): def loadsave(path, x):
def apply_field(obj, field, condition=None): def apply_field(obj, field, condition=None, init_field=None):
key = path + "/" + field key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None: if getattr(obj,'custom_script_source',None) is not None:
@ -1825,6 +1829,8 @@ Requested path was: {f}
print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.') print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.')
else: else:
setattr(obj, field, saved_value) setattr(obj, field, saved_value)
if init_field is not None:
init_field(saved_value)
if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible: if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
apply_field(x, 'visible') apply_field(x, 'visible')
@ -1850,7 +1856,8 @@ Requested path was: {f}
# Since there are many dropdowns that shouldn't be saved, # Since there are many dropdowns that shouldn't be saved,
# we only mark dropdowns that should be saved. # we only mark dropdowns that should be saved.
if type(x) == gr.Dropdown and getattr(x, 'save_to_config', False): if type(x) == gr.Dropdown and getattr(x, 'save_to_config', False):
apply_field(x, 'value', lambda val: val in x.choices) apply_field(x, 'value', lambda val: val in x.choices, getattr(x, 'init_field', None))
apply_field(x, 'visible')
visit(txt2img_interface, loadsave, "txt2img") visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img") visit(img2img_interface, loadsave, "img2img")
@ -1872,6 +1879,9 @@ for filename in sorted(os.listdir(jsdir)):
with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile: with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile:
javascript += f"\n<script>{jsfile.read()}</script>" javascript += f"\n<script>{jsfile.read()}</script>"
if cmd_opts.theme is not None:
javascript += f"\n<script>set_theme('{cmd_opts.theme}');</script>\n"
javascript += f"\n<script>{localization.localization_js(shared.opts.localization)}</script>" javascript += f"\n<script>{localization.localization_js(shared.opts.localization)}</script>"
if 'gradio_routes_templates_response' not in globals(): if 'gradio_routes_templates_response' not in globals():

View File

@ -33,7 +33,7 @@ goto :launch
:skip_venv :skip_venv
:launch :launch
%PYTHON% launch.py %PYTHON% launch.py %*
pause pause
exit /b exit /b

View File

@ -138,4 +138,4 @@ fi
printf "\n%s\n" "${delimiter}" printf "\n%s\n" "${delimiter}"
printf "Launching launch.py..." printf "Launching launch.py..."
printf "\n%s\n" "${delimiter}" printf "\n%s\n" "${delimiter}"
"${python_cmd}" "${LAUNCH_SCRIPT}" "${python_cmd}" "${LAUNCH_SCRIPT}" "$@"