From 956e1d8d90ed369986c03eded4fb583fa00960e8 Mon Sep 17 00:00:00 2001
From: XDOneDude <106700244+XDOneDude@users.noreply.github.com>
Date: Fri, 18 Aug 2023 21:25:59 -0400
Subject: [PATCH 1/2] xformers update

---
 modules/errors.py       | 2 +-
 modules/launch_utils.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/modules/errors.py b/modules/errors.py
index 192cd8ffd..d4238e632 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -94,7 +94,7 @@ def check_versions():
     import gradio
 
     expected_torch_version = "2.0.0"
-    expected_xformers_version = "0.0.20"
+    expected_xformers_version = "0.0.21"
     expected_gradio_version = "3.39.0"
 
     if version.parse(torch.__version__) < version.parse(expected_torch_version):
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 7e4d5a613..c54e199fe 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -310,7 +310,7 @@ def prepare_environment():
     torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
     requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
 
-    xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
+    xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.21')
     clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
     openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
 

From 61c1261e4e50385aab68b84c0f886911466044bb Mon Sep 17 00:00:00 2001
From: XDOneDude <106700244+XDOneDude@users.noreply.github.com>
Date: Fri, 18 Aug 2023 21:56:15 -0400
Subject: [PATCH 2/2] more grammar fixes

---
 modules/processing.py    | 24 ++++++++++++------------
 modules/prompt_parser.py |  2 +-
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/modules/processing.py b/modules/processing.py
index e62db62fd..0315e1fdb 100755
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -386,14 +386,14 @@ class StableDiffusionProcessing:
         return self.token_merging_ratio or opts.token_merging_ratio
 
     def setup_prompts(self):
-        if type(self.prompt) == list:
+        if isinstance(self.prompt,list):
             self.all_prompts = self.prompt
-        elif type(self.negative_prompt) == list:
+        elif isinstance(self.negative_prompt, list):
             self.all_prompts = [self.prompt] * len(self.negative_prompt)
         else:
             self.all_prompts = self.batch_size * self.n_iter * [self.prompt]
 
-        if type(self.negative_prompt) == list:
+        if isinstance(self.negative_prompt, list):
             self.all_negative_prompts = self.negative_prompt
         else:
             self.all_negative_prompts = [self.negative_prompt] * len(self.all_prompts)
@@ -512,10 +512,10 @@ class Processed:
         self.s_noise = p.s_noise
         self.s_min_uncond = p.s_min_uncond
         self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
-        self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
-        self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
-        self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
-        self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
+        self.prompt = self.prompt if not isinstance(self.prompt, list) else self.prompt[0]
+        self.negative_prompt = self.negative_prompt if not isinstance(self.negative_prompt, list) else self.negative_prompt[0]
+        self.seed = int(self.seed if not isinstance(self.seed, list) else self.seed[0]) if self.seed is not None else -1
+        self.subseed = int(self.subseed if not isinstance(self.subseed, list) else self.subseed[0]) if self.subseed is not None else -1
         self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
 
         self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
@@ -741,7 +741,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
 def process_images_inner(p: StableDiffusionProcessing) -> Processed:
     """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
 
-    if type(p.prompt) == list:
+    if isinstance(p.prompt, list):
         assert(len(p.prompt) > 0)
     else:
         assert p.prompt is not None
@@ -772,12 +772,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
 
     p.setup_prompts()
 
-    if type(seed) == list:
+    if isinstance(seed, list):
         p.all_seeds = seed
     else:
         p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
 
-    if type(subseed) == list:
+    if isinstance(subseed, list):
         p.all_subseeds = subseed
     else:
         p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
@@ -1268,12 +1268,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
         if self.hr_negative_prompt == '':
             self.hr_negative_prompt = self.negative_prompt
 
-        if type(self.hr_prompt) == list:
+        if isinstance(self.hr_prompt, list):
             self.all_hr_prompts = self.hr_prompt
         else:
             self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt]
 
-        if type(self.hr_negative_prompt) == list:
+        if isinstance(self.hr_negative_prompt, list):
             self.all_hr_negative_prompts = self.hr_negative_prompt
         else:
             self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt]
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 32d214e3a..e811ae99d 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -86,7 +86,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
                 yield args[(step - 1) % len(args)]
             def start(self, args):
                 def flatten(x):
-                    if type(x) == str:
+                    if isinstance(x, str):
                         yield x
                     else:
                         for gen in x: