Compare commits

..

No commits in common. "master" and "v1.8.0" have entirely different histories.

157 changed files with 1333 additions and 5586 deletions

View File

@ -78,8 +78,6 @@ module.exports = {
//extraNetworks.js //extraNetworks.js
requestGet: "readonly", requestGet: "readonly",
popup: "readonly", popup: "readonly",
// profilerVisualization.js
createVisualizationTable: "readonly",
// from python // from python
localization: "readonly", localization: "readonly",
// progrssbar.js // progrssbar.js

View File

@ -91,7 +91,7 @@ body:
id: logs id: logs
attributes: attributes:
label: Console logs label: Console logs
description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occurred. If it's very long, provide a link to pastebin or similar service. description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service.
render: Shell render: Shell
validations: validations:
required: true required: true

View File

@ -11,8 +11,8 @@ jobs:
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
steps: steps:
- name: Checkout Code - name: Checkout Code
uses: actions/checkout@v4 uses: actions/checkout@v3
- uses: actions/setup-python@v5 - uses: actions/setup-python@v4
with: with:
python-version: 3.11 python-version: 3.11
# NB: there's no cache: pip here since we're not installing anything # NB: there's no cache: pip here since we're not installing anything
@ -20,7 +20,7 @@ jobs:
# not to have GHA download an (at the time of writing) 4 GB cache # not to have GHA download an (at the time of writing) 4 GB cache
# of PyTorch and other dependencies. # of PyTorch and other dependencies.
- name: Install Ruff - name: Install Ruff
run: pip install ruff==0.3.3 run: pip install ruff==0.1.6
- name: Run Ruff - name: Run Ruff
run: ruff . run: ruff .
lint-js: lint-js:
@ -29,9 +29,9 @@ jobs:
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
steps: steps:
- name: Checkout Code - name: Checkout Code
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Install Node.js - name: Install Node.js
uses: actions/setup-node@v4 uses: actions/setup-node@v3
with: with:
node-version: 18 node-version: 18
- run: npm i --ci - run: npm i --ci

View File

@ -11,9 +11,9 @@ jobs:
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
steps: steps:
- name: Checkout Code - name: Checkout Code
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Set up Python 3.10 - name: Set up Python 3.10
uses: actions/setup-python@v5 uses: actions/setup-python@v4
with: with:
python-version: 3.10.6 python-version: 3.10.6
cache: pip cache: pip
@ -22,7 +22,7 @@ jobs:
launch.py launch.py
- name: Cache models - name: Cache models
id: cache-models id: cache-models
uses: actions/cache@v4 uses: actions/cache@v3
with: with:
path: models path: models
key: "2023-12-30" key: "2023-12-30"
@ -68,13 +68,13 @@ jobs:
python -m coverage report -i python -m coverage report -i
python -m coverage html -i python -m coverage html -i
- name: Upload main app output - name: Upload main app output
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
if: always() if: always()
with: with:
name: output name: output
path: output.txt path: output.txt
- name: Upload coverage HTML - name: Upload coverage HTML
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v3
if: always() if: always()
with: with:
name: htmlcov name: htmlcov

4
.gitignore vendored
View File

@ -2,7 +2,6 @@ __pycache__
*.ckpt *.ckpt
*.safetensors *.safetensors
*.pth *.pth
.DS_Store
/ESRGAN/* /ESRGAN/*
/SwinIR/* /SwinIR/*
/repositories /repositories
@ -39,6 +38,3 @@ notification.mp3
/package-lock.json /package-lock.json
/.coverage* /.coverage*
/test/test_outputs /test/test_outputs
/cache
trace.json
/sysinfo-????-??-??-??-??.json

View File

@ -1,283 +1,4 @@
## 1.10.1 ## 1.8.0-RC
### Bug Fixes:
* fix image upscale on cpu ([#16275](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16275))
## 1.10.0
### Features:
* A lot of performance improvements (see below in Performance section)
* Stable Diffusion 3 support ([#16030](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16030), [#16164](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16164), [#16212](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16212))
* Recommended Euler sampler; DDIM and other timestamp samplers currently not supported
* T5 text model is disabled by default, enable it in settings
* New schedulers:
* Align Your Steps ([#15751](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15751))
* KL Optimal ([#15608](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15608))
* Normal ([#16149](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16149))
* DDIM ([#16149](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16149))
* Simple ([#16142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16142))
* Beta ([#16235](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16235))
* New sampler: DDIM CFG++ ([#16035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16035))
### Minor:
* Option to skip CFG on early steps ([#15607](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15607))
* Add --models-dir option ([#15742](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15742))
* Allow mobile users to open context menu by using two fingers press ([#15682](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15682))
* Infotext: add Lora name as TI hashes for bundled Textual Inversion ([#15679](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15679))
* Check model's hash after downloading it to prevent corruped downloads ([#15602](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15602))
* More extension tag filtering options ([#15627](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15627))
* When saving AVIF, use JPEG's quality setting ([#15610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15610))
* Add filename pattern: `[basename]` ([#15978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15978))
* Add option to enable clip skip for clip L on SDXL ([#15992](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15992))
* Option to prevent screen sleep during generation ([#16001](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16001))
* ToggleLivePriview button in image viewer ([#16065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16065))
* Remove ui flashing on reloading and fast scrollong ([#16153](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16153))
* option to disable save button log.csv ([#16242](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16242))
### Extensions and API:
* Add process_before_every_sampling hook ([#15984](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15984))
* Return HTTP 400 instead of 404 on invalid sampler error ([#16140](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16140))
### Performance:
* [Performance 1/6] use_checkpoint = False ([#15803](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15803))
* [Performance 2/6] Replace einops.rearrange with torch native ops ([#15804](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15804))
* [Performance 4/6] Precompute is_sdxl_inpaint flag ([#15806](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15806))
* [Performance 5/6] Prevent unnecessary extra networks bias backup ([#15816](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15816))
* [Performance 6/6] Add --precision half option to avoid casting during inference ([#15820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15820))
* [Performance] LDM optimization patches ([#15824](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15824))
* [Performance] Keep sigmas on CPU ([#15823](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15823))
* Check for nans in unet only once, after all steps have been completed
* Added pption to run torch profiler for image generation
### Bug Fixes:
* Fix for grids without comprehensive infotexts ([#15958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15958))
* feat: lora partial update precede full update ([#15943](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15943))
* Fix bug where file extension had an extra '.' under some circumstances ([#15893](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15893))
* Fix corrupt model initial load loop ([#15600](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15600))
* Allow old sampler names in API ([#15656](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15656))
* more old sampler scheduler compatibility ([#15681](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15681))
* Fix Hypertile xyz ([#15831](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15831))
* XYZ CSV skipinitialspace ([#15832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15832))
* fix soft inpainting on mps and xpu, torch_utils.float64 ([#15815](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15815))
* fix extention update when not on main branch ([#15797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15797))
* update pickle safe filenames
* use relative path for webui-assets css ([#15757](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15757))
* When creating a virtual environment, upgrade pip in webui.bat/webui.sh ([#15750](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15750))
* Fix AttributeError ([#15738](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15738))
* use script_path for webui root in launch_utils ([#15705](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15705))
* fix extra batch mode P Transparency ([#15664](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15664))
* use gradio theme colors in css ([#15680](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15680))
* Fix dragging text within prompt input ([#15657](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15657))
* Add correct mimetype for .mjs files ([#15654](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15654))
* QOL Items - handle metadata issues more cleanly for SD models, Loras and embeddings ([#15632](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15632))
* replace wsl-open with wslpath and explorer.exe ([#15968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15968))
* Fix SDXL Inpaint ([#15976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15976))
* multi size grid ([#15988](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15988))
* fix Replace preview ([#16118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16118))
* Possible fix of wrong scale in weight decomposition ([#16151](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16151))
* Ensure use of python from venv on Mac and Linux ([#16116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16116))
* Prioritize python3.10 over python3 if both are available on Linux and Mac (with fallback) ([#16092](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16092))
* stoping generation extras ([#16085](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16085))
* Fix SD2 loading ([#16078](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16078), [#16079](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16079))
* fix infotext Lora hashes for hires fix different lora ([#16062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16062))
* Fix sampler scheduler autocorrection warning ([#16054](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16054))
* fix ui flashing on reloading and fast scrollong ([#16153](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16153))
* fix upscale logic ([#16239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16239))
* [bug] do not break progressbar on non-job actions (add wrap_gradio_call_no_job) ([#16202](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16202))
* fix OSError: cannot write mode P as JPEG ([#16194](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16194))
### Other:
* fix changelog #15883 -> #15882 ([#15907](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15907))
* ReloadUI backgroundColor --background-fill-primary ([#15864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15864))
* Use different torch versions for Intel and ARM Macs ([#15851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15851))
* XYZ override rework ([#15836](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15836))
* scroll extensions table on overflow ([#15830](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15830))
* img2img batch upload method ([#15817](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15817))
* chore: sync v1.8.0 packages according to changelog ([#15783](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15783))
* Add AVIF MIME type support to mimetype definitions ([#15739](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15739))
* Update imageviewer.js ([#15730](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15730))
* no-referrer ([#15641](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15641))
* .gitignore trace.json ([#15980](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15980))
* Bump spandrel to 0.3.4 ([#16144](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16144))
* Defunct --max-batch-count ([#16119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16119))
* docs: update bug_report.yml ([#16102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16102))
* Maintaining Project Compatibility for Python 3.9 Users Without Upgrade Requirements. ([#16088](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16088), [#16169](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16169), [#16192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16192))
* Update torch for ARM Macs to 2.3.1 ([#16059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16059))
* remove deprecated setting dont_fix_second_order_samplers_schedule ([#16061](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16061))
* chore: fix typos ([#16060](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16060))
* shlex.join launch args in console log ([#16170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16170))
* activate venv .bat ([#16231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16231))
* add ids to the resize tabs in img2img ([#16218](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16218))
* update installation guide linux ([#16178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16178))
* Robust sysinfo ([#16173](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16173))
* do not send image size on paste inpaint ([#16180](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16180))
* Fix noisy DS_Store files for MacOS ([#16166](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16166))
## 1.9.4
### Bug Fixes:
* pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882))
## 1.9.3
### Bug Fixes:
* fix get_crop_region_v2 ([#15594](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15594))
## 1.9.2
### Extensions and API:
* restore 1.8.0-style naming of scripts
## 1.9.1
### Minor:
* Add avif support ([#15582](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15582))
* Add filename patterns: `[sampler_scheduler]` and `[scheduler]` ([#15581](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15581))
### Extensions and API:
* undo adding scripts to sys.modules
* Add schedulers API endpoint ([#15577](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15577))
* Remove API upscaling factor limits ([#15560](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15560))
### Bug Fixes:
* Fix images do not match / Coordinate 'right' is less than 'left' ([#15534](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15534))
* fix: remove_callbacks_for_function should also remove from the ordered map ([#15533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15533))
* fix x1 upscalers ([#15555](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15555))
* Fix cls.__module__ value in extension script ([#15532](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15532))
* fix typo in function call (eror -> error) ([#15531](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15531))
### Other:
* Hide 'No Image data blocks found.' message ([#15567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15567))
* Allow webui.sh to be runnable from arbitrary directories containing a .git file ([#15561](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15561))
* Compatibility with Debian 11, Fedora 34+ and openSUSE 15.4+ ([#15544](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15544))
* numpy DeprecationWarning product -> prod ([#15547](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15547))
* get_crop_region_v2 ([#15583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15583), [#15587](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15587))
## 1.9.0
### Features:
* Make refiner switchover based on model timesteps instead of sampling steps ([#14978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14978))
* add an option to have old-style directory view instead of tree view; stylistic changes for extra network sorting/search controls
* add UI for reordering callbacks, support for specifying callback order in extension metadata ([#15205](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15205))
* Sgm uniform scheduler for SDXL-Lightning models ([#15325](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15325))
* Scheduler selection in main UI ([#15333](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15333), [#15361](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15361), [#15394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15394))
### Minor:
* "open images directory" button now opens the actual dir ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947))
* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973))
* make extra network card description plaintext by default, with an option to re-enable HTML as it was
* resize handle for extra networks ([#15041](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15041))
* cmd args: `--unix-filenames-sanitization` and `--filenames-max-length` ([#15031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15031))
* show extra networks parameters in HTML table rather than raw JSON ([#15131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15131))
* Add DoRA (weight-decompose) support for LoRA/LoHa/LoKr ([#15160](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15160), [#15283](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15283))
* Add '--no-prompt-history' cmd args for disable last generation prompt history ([#15189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15189))
* update preview on Replace Preview ([#15201](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15201))
* only fetch updates for extensions' active git branches ([#15233](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15233))
* put upscale postprocessing UI into an accordion ([#15223](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15223))
* Support dragdrop for URLs to read infotext ([#15262](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15262))
* use diskcache library for caching ([#15287](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15287), [#15299](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15299))
* Allow PNG-RGBA for Extras Tab ([#15334](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15334))
* Support cover images embedded in safetensors metadata ([#15319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15319))
* faster interrupt when using NN upscale ([#15380](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15380))
* Extras upscaler: an input field to limit maximul side length for the output image ([#15293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15293), [#15415](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15415), [#15417](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15417), [#15425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15425))
* add an option to hide postprocessing options in Extras tab
### Extensions and API:
* ResizeHandleRow - allow overriden column scale parametr ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004))
* call script_callbacks.ui_settings_callback earlier; fix extra-options-section built-in extension killing the ui if using a setting that doesn't exist
* make it possible to use zoom.js outside webui context ([#15286](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15286), [#15288](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15288))
* allow variants for extension name in metadata.ini ([#15290](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15290))
* make reloading UI scripts optional when doing Reload UI, and off by default
* put request: gr.Request at start of img2img function similar to txt2img
* open_folder as util ([#15442](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15442))
* make it possible to import extensions' script files as `import scripts.<filename>` ([#15423](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15423))
### Performance:
* performance optimization for extra networks HTML pages
* optimization for extra networks filtering
* optimization for extra networks sorting
### Bug Fixes:
* prevent escape button causing an interrupt when no generation has been made yet
* [bug] avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966))
* possible fix for reload button not appearing in some cases for extra networks.
* fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006))
* Fix resize-handle visability for vertical layout (mobile) ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010))
* register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012))
* Protect alphas_cumprod during refiner switchover ([#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979))
* Fix EXIF orientation in API image loading ([#15062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15062))
* Only override emphasis if actually used in prompt ([#15141](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15141))
* Fix emphasis infotext missing from `params.txt` ([#15142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15142))
* fix extract_style_text_from_prompt #15132 ([#15135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15135))
* Fix Soft Inpaint for AnimateDiff ([#15148](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15148))
* edit-attention: deselect surrounding whitespace ([#15178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15178))
* chore: fix font not loaded ([#15183](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15183))
* use natural sort in extra networks when ordering by path
* Fix built-in lora system bugs caused by torch.nn.MultiheadAttention ([#15190](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15190))
* Avoid error from None in get_learned_conditioning ([#15191](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15191))
* Add entry to MassFileLister after writing metadata ([#15199](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15199))
* fix issue with Styles when Hires prompt is used ([#15269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15269), [#15276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15276))
* Strip comments from hires fix prompt ([#15263](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15263))
* Make imageviewer event listeners browser consistent ([#15261](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15261))
* Fix AttributeError in OFT when trying to get MultiheadAttention weight ([#15260](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15260))
* Add missing .mean() back ([#15239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15239))
* fix "Restore progress" button ([#15221](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15221))
* fix ui-config for InputAccordion [custom_script_source] ([#15231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15231))
* handle 0 wheel deltaY ([#15268](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15268))
* prevent alt menu for firefox ([#15267](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15267))
* fix: fix syntax errors ([#15179](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15179))
* restore outputs path ([#15307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15307))
* Escape btn_copy_path filename ([#15316](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15316))
* Fix extra networks buttons when filename contains an apostrophe ([#15331](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15331))
* escape brackets in lora random prompt generator ([#15343](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15343))
* fix: Python version check for PyTorch installation compatibility ([#15390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15390))
* fix typo in call_queue.py ([#15386](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15386))
* fix: when find already_loaded model, remove loaded by array index ([#15382](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15382))
* minor bug fix of sd model memory management ([#15350](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15350))
* Fix CodeFormer weight ([#15414](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15414))
* Fix: Remove script callbacks in ordered_callbacks_map ([#15428](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15428))
* fix limited file write (thanks, Sylwia)
* Fix extra-single-image API not doing upscale failed ([#15465](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15465))
* error handling paste_field callables ([#15470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15470))
### Hardware:
* Add training support and change lspci for Ascend NPU ([#14981](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14981))
* Update to ROCm5.7 and PyTorch ([#14820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14820))
* Better workaround for Navi1, removing --pre for Navi3 ([#15224](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15224))
* Ascend NPU wiki page ([#15228](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15228))
### Other:
* Update comment for Pad prompt/negative prompt v0 to add a warning about truncation, make it override the v1 implementation
* support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002))
* Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995))
* Use `absolute` path for normalized filepath ([#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035))
* resizeHandle handle double tap ([#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065))
* --dat-models-path cmd flag ([#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039))
* Add a direct link to the binary release ([#15059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15059))
* upscaler_utils: Reduce logging ([#15084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15084))
* Fix various typos with crate-ci/typos ([#15116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15116))
* fix_jpeg_live_preview ([#15102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15102))
* [alternative fix] can't load webui if selected wrong extra option in ui ([#15121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15121))
* Error handling for unsupported transparency ([#14958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14958))
* Add model description to searched terms ([#15198](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15198))
* bump action version ([#15272](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15272))
* PEP 604 annotations ([#15259](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15259))
* Automatically Set the Scale by value when user selects an Upscale Model ([#15244](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15244))
* move postprocessing-for-training into builtin extensions ([#15222](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15222))
* type hinting in shared.py ([#15211](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15211))
* update ruff to 0.3.3
* Update pytorch lightning utilities ([#15310](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15310))
* Add Size as an XYZ Grid option ([#15354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15354))
* Use HF_ENDPOINT variable for HuggingFace domain with default ([#15443](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15443))
* re-add update_file_entry ([#15446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15446))
* create_infotext allow index and callable, re-work Hires prompt infotext ([#15460](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15460))
* update restricted_opts to include more options for --hide-ui-dir-config ([#15492](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15492))
## 1.8.0
### Features: ### Features:
* Update torch to version 2.1.2 * Update torch to version 2.1.2
@ -293,7 +14,7 @@
* Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690), [#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039)) * Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690), [#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039))
* Extra Networks Tree View ([#14588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14588), [#14900](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14900)) * Extra Networks Tree View ([#14588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14588), [#14900](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14900))
* NPU Support ([#14801](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14801)) * NPU Support ([#14801](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14801))
* Prompt comments support * Propmpt comments support
### Minor: ### Minor:
* Allow pasting in WIDTHxHEIGHT strings into the width/height fields ([#14296](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14296)) * Allow pasting in WIDTHxHEIGHT strings into the width/height fields ([#14296](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14296))
@ -338,9 +59,9 @@
* modules/api/api.py: add api endpoint to refresh embeddings list ([#14715](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14715)) * modules/api/api.py: add api endpoint to refresh embeddings list ([#14715](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14715))
* set_named_arg ([#14773](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14773)) * set_named_arg ([#14773](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14773))
* add before_token_counter callback and use it for prompt comments * add before_token_counter callback and use it for prompt comments
* ResizeHandleRow - allow overridden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004)) * ResizeHandleRow - allow overriden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004))
### Performance: ### Performance
* Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528)) * Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528))
* Reduce unnecessary re-indexing extra networks directory ([#14512](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14512)) * Reduce unnecessary re-indexing extra networks directory ([#14512](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14512))
* Avoid unnecessary `isfile`/`exists` calls ([#14527](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14527)) * Avoid unnecessary `isfile`/`exists` calls ([#14527](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14527))
@ -380,7 +101,7 @@
* Gracefully handle mtime read exception from cache ([#14933](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14933)) * Gracefully handle mtime read exception from cache ([#14933](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14933))
* Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932)) * Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932))
* Disable prompt token counters option actually disables token counting rather than just hiding results. * Disable prompt token counters option actually disables token counting rather than just hiding results.
* avoid double upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966)) * avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966))
* Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995)) * Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995))
* fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006)) * fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006))
* Fix resize-handle for mobile ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010), [#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065)) * Fix resize-handle for mobile ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010), [#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065))
@ -450,7 +171,7 @@
* infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page * infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page
* add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046)) * add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046))
* support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126)) * support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126))
* allow use of multiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125)) * allow use of mutiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125))
* make extra network card description plaintext by default, with an option (Treat card description as HTML) to re-enable HTML as it was (originally by [#13241](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13241)) * make extra network card description plaintext by default, with an option (Treat card description as HTML) to re-enable HTML as it was (originally by [#13241](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13241))
### Extensions and API: ### Extensions and API:
@ -587,7 +308,7 @@
* new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential ([#12300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12300), [#12519](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12519), [#12542](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12542)) * new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential ([#12300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12300), [#12519](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12519), [#12542](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12542))
* rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers: * rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers:
* makes all of them work with img2img * makes all of them work with img2img
* makes prompt composition possible (AND) * makes prompt composition posssible (AND)
* makes them available for SDXL * makes them available for SDXL
* always show extra networks tabs in the UI ([#11808](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11808)) * always show extra networks tabs in the UI ([#11808](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11808))
* use less RAM when creating models ([#11958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11958), [#12599](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12599)) * use less RAM when creating models ([#11958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11958), [#12599](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12599))
@ -763,7 +484,7 @@
* user metadata system for custom networks * user metadata system for custom networks
* extended Lora metadata editor: set activation text, default weight, view tags, training info * extended Lora metadata editor: set activation text, default weight, view tags, training info
* Lora extension rework to include other types of networks (all that were previously handled by LyCORIS extension) * Lora extension rework to include other types of networks (all that were previously handled by LyCORIS extension)
* show github stars for extensions * show github stars for extenstions
* img2img batch mode can read extra stuff from png info * img2img batch mode can read extra stuff from png info
* img2img batch works with subdirectories * img2img batch works with subdirectories
* hotkeys to move prompt elements: alt+left/right * hotkeys to move prompt elements: alt+left/right
@ -982,7 +703,7 @@
* do not wait for Stable Diffusion model to load at startup * do not wait for Stable Diffusion model to load at startup
* add filename patterns: `[denoising]` * add filename patterns: `[denoising]`
* directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for * directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for
* LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metadata of the file, if present, instead of filename (both can be used to activate LoRA) * LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metdata of the file, if present, instead of filename (both can be used to activate LoRA)
* LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active * LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
* LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer) * LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer)
* LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss) * LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss)
@ -1012,7 +733,7 @@
* fix gamepad navigation * fix gamepad navigation
* make the lightbox fullscreen image function properly * make the lightbox fullscreen image function properly
* fix squished thumbnails in extras tab * fix squished thumbnails in extras tab
* keep "search" filter for extra networks when user refreshes the tab (previously it showed everything after you refreshed) * keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed)
* fix webui showing the same image if you configure the generation to always save results into same file * fix webui showing the same image if you configure the generation to always save results into same file
* fix bug with upscalers not working properly * fix bug with upscalers not working properly
* fix MPS on PyTorch 2.0.1, Intel Macs * fix MPS on PyTorch 2.0.1, Intel Macs
@ -1030,7 +751,7 @@
* switch to PyTorch 2.0.0 (except for AMD GPUs) * switch to PyTorch 2.0.0 (except for AMD GPUs)
* visual improvements to custom code scripts * visual improvements to custom code scripts
* add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]` * add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]`
* add support for saving init images in img2img, and record their hashes in infotext for reproducibility * add support for saving init images in img2img, and record their hashes in infotext for reproducability
* automatically select current word when adjusting weight with ctrl+up/down * automatically select current word when adjusting weight with ctrl+up/down
* add dropdowns for X/Y/Z plot * add dropdowns for X/Y/Z plot
* add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs * add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs

View File

@ -78,7 +78,7 @@ A web interface for Stable Diffusion, implemented using Gradio library.
- Clip skip - Clip skip
- Hypernetworks - Hypernetworks
- Loras (same as Hypernetworks but more pretty) - Loras (same as Hypernetworks but more pretty)
- A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt - A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
- Can select to load a different VAE from settings screen - Can select to load a different VAE from settings screen
- Estimated completion time in progress bar - Estimated completion time in progress bar
- API - API
@ -98,7 +98,6 @@ Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-di
- [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) - [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended)
- [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. - [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
- [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page) - [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page)
- [Ascend NPUs](https://github.com/wangshuai09/stable-diffusion-webui/wiki/Install-and-run-on-Ascend-NPUs) (external wiki page)
Alternatively, use online services (like Google Colab): Alternatively, use online services (like Google Colab):
@ -122,38 +121,16 @@ Alternatively, use online services (like Google Colab):
# Debian-based: # Debian-based:
sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0 sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
# Red Hat-based: # Red Hat-based:
sudo dnf install wget git python3 gperftools-libs libglvnd-glx sudo dnf install wget git python3 gperftools-libs libglvnd-glx
# openSUSE-based: # openSUSE-based:
sudo zypper install wget git python3 libtcmalloc4 libglvnd sudo zypper install wget git python3 libtcmalloc4 libglvnd
# Arch-based: # Arch-based:
sudo pacman -S wget git python3 sudo pacman -S wget git python3
``` ```
If your system is very new, you need to install python3.11 or python3.10:
```bash
# Ubuntu 24.04
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt update
sudo apt install python3.11
# Manjaro/Arch
sudo pacman -S yay
yay -S python311 # do not confuse with python3.11 package
# Only for 3.11
# Then set up env variable in launch script
export python_cmd="python3.11"
# or in webui-user.sh
python_cmd="python3.11"
```
2. Navigate to the directory you would like the webui to be installed and execute the following command: 2. Navigate to the directory you would like the webui to be installed and execute the following command:
```bash ```bash
wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh
``` ```
Or just clone the repo wherever you want:
```bash
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui
```
3. Run `webui.sh`. 3. Run `webui.sh`.
4. Check `webui-user.sh` for options. 4. Check `webui-user.sh` for options.
### Installation on Apple Silicon ### Installation on Apple Silicon
@ -172,7 +149,7 @@ For the purposes of getting Google and other search engines to crawl the wiki, h
## Credits ## Credits
Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file. Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.
- Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers, https://github.com/mcmonkey4eva/sd3-ref - Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git - k-diffusion - https://github.com/crowsonkb/k-diffusion.git
- Spandrel - https://github.com/chaiNNer-org/spandrel implementing - Spandrel - https://github.com/chaiNNer-org/spandrel implementing
- GFPGAN - https://github.com/TencentARC/GFPGAN.git - GFPGAN - https://github.com/TencentARC/GFPGAN.git

View File

@ -1,5 +0,0 @@
[default.extend-words]
# Part of "RGBa" (Pillow's pre-multiplied alpha RGB mode)
Ba = "Ba"
# HSA is something AMD uses for their GPUs
HSA = "HSA"

View File

@ -40,7 +40,7 @@ model:
use_spatial_transformer: True use_spatial_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 768 context_dim: 768
use_checkpoint: False use_checkpoint: True
legacy: False legacy: False
first_stage_config: first_stage_config:

View File

@ -41,7 +41,7 @@ model:
use_linear_in_transformer: True use_linear_in_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 1024 context_dim: 1024
use_checkpoint: False use_checkpoint: True
legacy: False legacy: False
first_stage_config: first_stage_config:

View File

@ -45,7 +45,7 @@ model:
use_spatial_transformer: True use_spatial_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 768 context_dim: 768
use_checkpoint: False use_checkpoint: True
legacy: False legacy: False
first_stage_config: first_stage_config:

View File

@ -1,5 +0,0 @@
model:
target: modules.models.sd3.sd3_model.SD3Inferencer
params:
shift: 3
state_dict: null

View File

@ -21,7 +21,7 @@ model:
params: params:
adm_in_channels: 2816 adm_in_channels: 2816
num_classes: sequential num_classes: sequential
use_checkpoint: False use_checkpoint: True
in_channels: 9 in_channels: 9
out_channels: 4 out_channels: 4
model_channels: 320 model_channels: 320

View File

@ -40,7 +40,7 @@ model:
use_spatial_transformer: True use_spatial_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 768 context_dim: 768
use_checkpoint: False use_checkpoint: True
legacy: False legacy: False
first_stage_config: first_stage_config:

View File

@ -40,7 +40,7 @@ model:
use_spatial_transformer: True use_spatial_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 768 context_dim: 768
use_checkpoint: False use_checkpoint: True
legacy: False legacy: False
first_stage_config: first_stage_config:

View File

@ -301,7 +301,7 @@ class DDPMV1(pl.LightningModule):
elif self.parameterization == "x0": elif self.parameterization == "x0":
target = x_start target = x_start
else: else:
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
@ -572,7 +572,7 @@ class LatentDiffusionV1(DDPMV1):
:param h: height :param h: height
:param w: width :param w: width
:return: normalized distance to image border, :return: normalized distance to image border,
with min distance = 0 at border and max dist = 0.5 at image center wtith min distance = 0 at border and max dist = 0.5 at image center
""" """
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
arr = self.meshgrid(h, w) / lower_right_corner arr = self.meshgrid(h, w) / lower_right_corner
@ -880,7 +880,7 @@ class LatentDiffusionV1(DDPMV1):
def apply_model(self, x_noisy, t, cond, return_ids=False): def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict): if isinstance(cond, dict):
# hybrid case, cond is expected to be a dict # hybrid case, cond is exptected to be a dict
pass pass
else: else:
if not isinstance(cond, list): if not isinstance(cond, list):
@ -916,7 +916,7 @@ class LatentDiffusionV1(DDPMV1):
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
elif self.cond_stage_key == 'coordinates_bbox': elif self.cond_stage_key == 'coordinates_bbox':
assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size' assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
# assuming padding of unfold is always 0 and its dilation is always 1 # assuming padding of unfold is always 0 and its dilation is always 1
n_patches_per_row = int((w - ks[0]) / stride[0] + 1) n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
@ -926,7 +926,7 @@ class LatentDiffusionV1(DDPMV1):
num_downs = self.first_stage_model.encoder.num_resolutions - 1 num_downs = self.first_stage_model.encoder.num_resolutions - 1
rescale_latent = 2 ** (num_downs) rescale_latent = 2 ** (num_downs)
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
# need to rescale the tl patch coordinates to be in between (0,1) # need to rescale the tl patch coordinates to be in between (0,1)
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)

View File

@ -9,8 +9,6 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
self.errors = {} self.errors = {}
"""mapping of network names to the number of errors the network had during operation""" """mapping of network names to the number of errors the network had during operation"""
remove_symbols = str.maketrans('', '', ":,")
def activate(self, p, params_list): def activate(self, p, params_list):
additional = shared.opts.sd_lora additional = shared.opts.sd_lora
@ -45,15 +43,22 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims) networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
if shared.opts.lora_add_hashes_to_infotext: if shared.opts.lora_add_hashes_to_infotext:
if not getattr(p, "is_hr_pass", False) or not hasattr(p, "lora_hashes"): network_hashes = []
p.lora_hashes = {}
for item in networks.loaded_networks: for item in networks.loaded_networks:
if item.network_on_disk.shorthash and item.mentioned_name: shorthash = item.network_on_disk.shorthash
p.lora_hashes[item.mentioned_name.translate(self.remove_symbols)] = item.network_on_disk.shorthash if not shorthash:
continue
if p.lora_hashes: alias = item.mentioned_name
p.extra_generation_params["Lora hashes"] = ', '.join(f'{k}: {v}' for k, v in p.lora_hashes.items()) if not alias:
continue
alias = alias.replace(":", "").replace(",", "")
network_hashes.append(f"{alias}: {shorthash}")
if network_hashes:
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
def deactivate(self, p): def deactivate(self, p):
if self.errors: if self.errors:

View File

@ -30,7 +30,7 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]:
In LoRA with Kroneckor Product, first value is a value for weight scale. In LoRA with Kroneckor Product, first value is a value for weight scale.
secon value is a value for weight. secon value is a value for weight.
Because of non-commutative property, AB BA. Meaning of two matrices is slightly different. Becuase of non-commutative property, AB BA. Meaning of two matrices is slightly different.
examples) examples)
factor factor

View File

@ -7,7 +7,6 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from modules import sd_models, cache, errors, hashes, shared from modules import sd_models, cache, errors, hashes, shared
import modules.models.sd3.mmdit
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module']) NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
@ -30,6 +29,7 @@ class NetworkOnDisk:
def read_metadata(): def read_metadata():
metadata = sd_models.read_metadata_from_safetensors(filename) metadata = sd_models.read_metadata_from_safetensors(filename)
metadata.pop('ssmd_cover_images', None) # those are cover images, and they are too big to display in UI as text
return metadata return metadata
@ -115,17 +115,8 @@ class NetworkModule:
self.sd_key = weights.sd_key self.sd_key = weights.sd_key
self.sd_module = weights.sd_module self.sd_module = weights.sd_module
if isinstance(self.sd_module, modules.models.sd3.mmdit.QkvLinear): if hasattr(self.sd_module, 'weight'):
s = self.sd_module.weight.shape
self.shape = (s[0] // 3, s[1])
elif hasattr(self.sd_module, 'weight'):
self.shape = self.sd_module.weight.shape self.shape = self.sd_module.weight.shape
elif isinstance(self.sd_module, nn.MultiheadAttention):
# For now, only self-attn use Pytorch's MHA
# So assume all qkvo proj have same shape
self.shape = self.sd_module.out_proj.weight.shape
else:
self.shape = None
self.ops = None self.ops = None
self.extra_kwargs = {} self.extra_kwargs = {}
@ -155,9 +146,6 @@ class NetworkModule:
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
self.scale = weights.w["scale"].item() if "scale" in weights.w else None self.scale = weights.w["scale"].item() if "scale" in weights.w else None
self.dora_scale = weights.w.get("dora_scale", None)
self.dora_norm_dims = len(self.shape) - 1
def multiplier(self): def multiplier(self):
if 'transformer' in self.sd_key[:20]: if 'transformer' in self.sd_key[:20]:
return self.network.te_multiplier return self.network.te_multiplier
@ -172,27 +160,6 @@ class NetworkModule:
return 1.0 return 1.0
def apply_weight_decompose(self, updown, orig_weight):
# Match the device/dtype
orig_weight = orig_weight.to(updown.dtype)
dora_scale = self.dora_scale.to(device=orig_weight.device, dtype=updown.dtype)
updown = updown.to(orig_weight.device)
merged_scale1 = updown + orig_weight
merged_scale1_norm = (
merged_scale1.transpose(0, 1)
.reshape(merged_scale1.shape[1], -1)
.norm(dim=1, keepdim=True)
.reshape(merged_scale1.shape[1], *[1] * self.dora_norm_dims)
.transpose(0, 1)
)
dora_merged = (
merged_scale1 * (dora_scale / merged_scale1_norm)
)
final_updown = dora_merged - orig_weight
return final_updown
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None): def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
if self.bias is not None: if self.bias is not None:
updown = updown.reshape(self.bias.shape) updown = updown.reshape(self.bias.shape)
@ -208,12 +175,7 @@ class NetworkModule:
if ex_bias is not None: if ex_bias is not None:
ex_bias = ex_bias * self.multiplier() ex_bias = ex_bias * self.multiplier()
updown = updown * self.calc_scale() return updown * self.calc_scale() * self.multiplier(), ex_bias
if self.dora_scale is not None:
updown = self.apply_weight_decompose(updown, orig_weight)
return updown * self.multiplier(), ex_bias
def calc_updown(self, target): def calc_updown(self, target):
raise NotImplementedError() raise NotImplementedError()

View File

@ -1,7 +1,6 @@
import torch import torch
import lyco_helpers import lyco_helpers
import modules.models.sd3.mmdit
import network import network
from modules import devices from modules import devices
@ -11,13 +10,6 @@ class ModuleTypeLora(network.ModuleType):
if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]): if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]):
return NetworkModuleLora(net, weights) return NetworkModuleLora(net, weights)
if all(x in weights.w for x in ["lora_A.weight", "lora_B.weight"]):
w = weights.w.copy()
weights.w.clear()
weights.w.update({"lora_up.weight": w["lora_B.weight"], "lora_down.weight": w["lora_A.weight"]})
return NetworkModuleLora(net, weights)
return None return None
@ -37,7 +29,7 @@ class NetworkModuleLora(network.NetworkModule):
if weight is None and none_ok: if weight is None and none_ok:
return None return None
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention, modules.models.sd3.mmdit.QkvLinear] is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention]
is_conv = type(self.sd_module) in [torch.nn.Conv2d] is_conv = type(self.sd_module) in [torch.nn.Conv2d]
if is_linear: if is_linear:

View File

@ -36,6 +36,13 @@ class NetworkModuleOFT(network.NetworkModule):
# self.alpha is unused # self.alpha is unused
self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size)
# LyCORIS BOFT
if self.oft_blocks.dim() == 4:
self.is_boft = True
self.rescale = weights.w.get('rescale', None)
if self.rescale is not None:
self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1))
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
is_conv = type(self.sd_module) in [torch.nn.Conv2d] is_conv = type(self.sd_module) in [torch.nn.Conv2d]
is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported
@ -47,13 +54,6 @@ class NetworkModuleOFT(network.NetworkModule):
elif is_other_linear: elif is_other_linear:
self.out_dim = self.sd_module.embed_dim self.out_dim = self.sd_module.embed_dim
# LyCORIS BOFT
if self.oft_blocks.dim() == 4:
self.is_boft = True
self.rescale = weights.w.get('rescale', None)
if self.rescale is not None and not is_other_linear:
self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1))
self.num_blocks = self.dim self.num_blocks = self.dim
self.block_size = self.out_dim // self.dim self.block_size = self.out_dim // self.dim
self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim

View File

@ -1,4 +1,3 @@
from __future__ import annotations
import gradio as gr import gradio as gr
import logging import logging
import os import os
@ -20,7 +19,6 @@ from typing import Union
from modules import shared, devices, sd_models, errors, scripts, sd_hijack from modules import shared, devices, sd_models, errors, scripts, sd_hijack
import modules.textual_inversion.textual_inversion as textual_inversion import modules.textual_inversion.textual_inversion as textual_inversion
import modules.models.sd3.mmdit
from lora_logger import logger from lora_logger import logger
@ -132,9 +130,7 @@ def assign_network_names_to_compvis_modules(sd_model):
network_layer_mapping[network_name] = module network_layer_mapping[network_name] = module
module.network_layer_name = network_name module.network_layer_name = network_name
else: else:
cond_stage_model = getattr(shared.sd_model.cond_stage_model, 'wrapped', shared.sd_model.cond_stage_model) for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules():
for name, module in cond_stage_model.named_modules():
network_name = name.replace(".", "_") network_name = name.replace(".", "_")
network_layer_mapping[network_name] = module network_layer_mapping[network_name] = module
module.network_layer_name = network_name module.network_layer_name = network_name
@ -147,14 +143,6 @@ def assign_network_names_to_compvis_modules(sd_model):
sd_model.network_layer_mapping = network_layer_mapping sd_model.network_layer_mapping = network_layer_mapping
class BundledTIHash(str):
def __init__(self, hash_str):
self.hash = hash_str
def __str__(self):
return self.hash if shared.opts.lora_bundled_ti_to_infotext else ''
def load_network(name, network_on_disk): def load_network(name, network_on_disk):
net = network.Network(name, network_on_disk) net = network.Network(name, network_on_disk)
net.mtime = os.path.getmtime(network_on_disk.filename) net.mtime = os.path.getmtime(network_on_disk.filename)
@ -167,26 +155,12 @@ def load_network(name, network_on_disk):
keys_failed_to_match = {} keys_failed_to_match = {}
is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping
if hasattr(shared.sd_model, 'diffusers_weight_map'):
diffusers_weight_map = shared.sd_model.diffusers_weight_map
elif hasattr(shared.sd_model, 'diffusers_weight_mapping'):
diffusers_weight_map = {}
for k, v in shared.sd_model.diffusers_weight_mapping():
diffusers_weight_map[k] = v
shared.sd_model.diffusers_weight_map = diffusers_weight_map
else:
diffusers_weight_map = None
matched_networks = {} matched_networks = {}
bundle_embeddings = {} bundle_embeddings = {}
for key_network, weight in sd.items(): for key_network, weight in sd.items():
key_network_without_network_parts, _, network_part = key_network.partition(".")
if diffusers_weight_map:
key_network_without_network_parts, network_name, network_weight = key_network.rsplit(".", 2)
network_part = network_name + '.' + network_weight
else:
key_network_without_network_parts, _, network_part = key_network.partition(".")
if key_network_without_network_parts == "bundle_emb": if key_network_without_network_parts == "bundle_emb":
emb_name, vec_name = network_part.split(".", 1) emb_name, vec_name = network_part.split(".", 1)
@ -198,11 +172,7 @@ def load_network(name, network_on_disk):
emb_dict[vec_name] = weight emb_dict[vec_name] = weight
bundle_embeddings[emb_name] = emb_dict bundle_embeddings[emb_name] = emb_dict
if diffusers_weight_map: key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)
key = diffusers_weight_map.get(key_network_without_network_parts, key_network_without_network_parts)
else:
key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)
sd_module = shared.sd_model.network_layer_mapping.get(key, None) sd_module = shared.sd_model.network_layer_mapping.get(key, None)
if sd_module is None: if sd_module is None:
@ -259,7 +229,6 @@ def load_network(name, network_on_disk):
for emb_name, data in bundle_embeddings.items(): for emb_name, data in bundle_embeddings.items():
embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name) embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name)
embedding.loaded = None embedding.loaded = None
embedding.shorthash = BundledTIHash(name)
embeddings[emb_name] = embedding embeddings[emb_name] = embedding
net.bundle_embeddings = embeddings net.bundle_embeddings = embeddings
@ -291,16 +260,6 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
loaded_networks.clear() loaded_networks.clear()
unavailable_networks = []
for name in names:
if name.lower() in forbidden_network_aliases and available_networks.get(name) is None:
unavailable_networks.append(name)
elif available_network_aliases.get(name) is None:
unavailable_networks.append(name)
if unavailable_networks:
update_available_networks_by_names(unavailable_networks)
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names] networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
if any(x is None for x in networks_on_disk): if any(x is None for x in networks_on_disk):
list_available_networks() list_available_networks()
@ -366,28 +325,6 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
purge_networks_from_memory() purge_networks_from_memory()
def allowed_layer_without_weight(layer):
if isinstance(layer, torch.nn.LayerNorm) and not layer.elementwise_affine:
return True
return False
def store_weights_backup(weight):
if weight is None:
return None
return weight.to(devices.cpu, copy=True)
def restore_weights_backup(obj, field, weight):
if weight is None:
setattr(obj, field, None)
return
getattr(obj, field).copy_(weight)
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
weights_backup = getattr(self, "network_weights_backup", None) weights_backup = getattr(self, "network_weights_backup", None)
bias_backup = getattr(self, "network_bias_backup", None) bias_backup = getattr(self, "network_bias_backup", None)
@ -397,22 +334,28 @@ def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Li
if weights_backup is not None: if weights_backup is not None:
if isinstance(self, torch.nn.MultiheadAttention): if isinstance(self, torch.nn.MultiheadAttention):
restore_weights_backup(self, 'in_proj_weight', weights_backup[0]) self.in_proj_weight.copy_(weights_backup[0])
restore_weights_backup(self.out_proj, 'weight', weights_backup[1]) self.out_proj.weight.copy_(weights_backup[1])
else: else:
restore_weights_backup(self, 'weight', weights_backup) self.weight.copy_(weights_backup)
if isinstance(self, torch.nn.MultiheadAttention): if bias_backup is not None:
restore_weights_backup(self.out_proj, 'bias', bias_backup) if isinstance(self, torch.nn.MultiheadAttention):
self.out_proj.bias.copy_(bias_backup)
else:
self.bias.copy_(bias_backup)
else: else:
restore_weights_backup(self, 'bias', bias_backup) if isinstance(self, torch.nn.MultiheadAttention):
self.out_proj.bias = None
else:
self.bias = None
def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]): def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
""" """
Applies the currently selected set of networks to the weights of torch layer self. Applies the currently selected set of networks to the weights of torch layer self.
If weights already have this particular set of networks applied, does nothing. If weights already have this particular set of networks applied, does nothing.
If not, restores original weights from backup and alters weights according to networks. If not, restores orginal weights from backup and alters weights according to networks.
""" """
network_layer_name = getattr(self, 'network_layer_name', None) network_layer_name = getattr(self, 'network_layer_name', None)
@ -424,30 +367,24 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
weights_backup = getattr(self, "network_weights_backup", None) weights_backup = getattr(self, "network_weights_backup", None)
if weights_backup is None and wanted_names != (): if weights_backup is None and wanted_names != ():
if current_names != () and not allowed_layer_without_weight(self): if current_names != ():
raise RuntimeError(f"{network_layer_name} - no backup weights found and current weights are not unchanged") raise RuntimeError("no backup weights found and current weights are not unchanged")
if isinstance(self, torch.nn.MultiheadAttention): if isinstance(self, torch.nn.MultiheadAttention):
weights_backup = (store_weights_backup(self.in_proj_weight), store_weights_backup(self.out_proj.weight)) weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
else: else:
weights_backup = store_weights_backup(self.weight) weights_backup = self.weight.to(devices.cpu, copy=True)
self.network_weights_backup = weights_backup self.network_weights_backup = weights_backup
bias_backup = getattr(self, "network_bias_backup", None) bias_backup = getattr(self, "network_bias_backup", None)
if bias_backup is None and wanted_names != (): if bias_backup is None:
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
bias_backup = store_weights_backup(self.out_proj.bias) bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
elif getattr(self, 'bias', None) is not None: elif getattr(self, 'bias', None) is not None:
bias_backup = store_weights_backup(self.bias) bias_backup = self.bias.to(devices.cpu, copy=True)
else: else:
bias_backup = None bias_backup = None
# Unlike weight which always has value, some modules don't have bias.
# Only report if bias is not None and current bias are not unchanged.
if bias_backup is not None and current_names != ():
raise RuntimeError("no backup bias found and current bias are not unchanged")
self.network_bias_backup = bias_backup self.network_bias_backup = bias_backup
if current_names != wanted_names: if current_names != wanted_names:
@ -455,7 +392,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
for net in loaded_networks: for net in loaded_networks:
module = net.modules.get(network_layer_name, None) module = net.modules.get(network_layer_name, None)
if module is not None and hasattr(self, 'weight') and not isinstance(module, modules.models.sd3.mmdit.QkvLinear): if module is not None and hasattr(self, 'weight'):
try: try:
with torch.no_grad(): with torch.no_grad():
if getattr(self, 'fp16_weight', None) is None: if getattr(self, 'fp16_weight', None) is None:
@ -492,12 +429,9 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
try: try:
with torch.no_grad(): with torch.no_grad():
# Send "real" orig_weight into MHA's lora module updown_q, _ = module_q.calc_updown(self.in_proj_weight)
qw, kw, vw = self.in_proj_weight.chunk(3, 0) updown_k, _ = module_k.calc_updown(self.in_proj_weight)
updown_q, _ = module_q.calc_updown(qw) updown_v, _ = module_v.calc_updown(self.in_proj_weight)
updown_k, _ = module_k.calc_updown(kw)
updown_v, _ = module_v.calc_updown(vw)
del qw, kw, vw
updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight) updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight)
@ -515,24 +449,6 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
continue continue
if isinstance(self, modules.models.sd3.mmdit.QkvLinear) and module_q and module_k and module_v:
try:
with torch.no_grad():
# Send "real" orig_weight into MHA's lora module
qw, kw, vw = self.weight.chunk(3, 0)
updown_q, _ = module_q.calc_updown(qw)
updown_k, _ = module_k.calc_updown(kw)
updown_v, _ = module_v.calc_updown(vw)
del qw, kw, vw
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
self.weight += updown_qkv
except RuntimeError as e:
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
continue
if module is None: if module is None:
continue continue
@ -647,16 +563,22 @@ def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs) return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
def process_network_files(names: list[str] | None = None): def list_available_networks():
available_networks.clear()
available_network_aliases.clear()
forbidden_network_aliases.clear()
available_network_hash_lookup.clear()
forbidden_network_aliases.update({"none": 1, "Addams": 1})
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
for filename in candidates: for filename in candidates:
if os.path.isdir(filename): if os.path.isdir(filename):
continue continue
name = os.path.splitext(os.path.basename(filename))[0] name = os.path.splitext(os.path.basename(filename))[0]
# if names is provided, only load networks with names in the list
if names and name not in names:
continue
try: try:
entry = network.NetworkOnDisk(name, filename) entry = network.NetworkOnDisk(name, filename)
except OSError: # should catch FileNotFoundError and PermissionError etc. except OSError: # should catch FileNotFoundError and PermissionError etc.
@ -672,22 +594,6 @@ def process_network_files(names: list[str] | None = None):
available_network_aliases[entry.alias] = entry available_network_aliases[entry.alias] = entry
def update_available_networks_by_names(names: list[str]):
process_network_files(names)
def list_available_networks():
available_networks.clear()
available_network_aliases.clear()
forbidden_network_aliases.clear()
available_network_hash_lookup.clear()
forbidden_network_aliases.update({"none": 1, "Addams": 1})
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
process_network_files()
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)") re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")

View File

@ -36,7 +36,6 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks), "sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}), "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"), "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
"lora_bundled_ti_to_infotext": shared.OptionInfo(True, "Add Lora name as TI hashes for bundled Textual Inversion").info('"Add Textual Inversion hashes to infotext" needs to be enabled'),
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"), "lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}), "lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}), "lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),

View File

@ -21,12 +21,10 @@ re_comma = re.compile(r" *, *")
def build_tags(metadata): def build_tags(metadata):
tags = {} tags = {}
ss_tag_frequency = metadata.get("ss_tag_frequency", {}) for _, tags_dict in metadata.get("ss_tag_frequency", {}).items():
if ss_tag_frequency is not None and hasattr(ss_tag_frequency, 'items'): for tag, tag_count in tags_dict.items():
for _, tags_dict in ss_tag_frequency.items(): tag = tag.strip()
for tag, tag_count in tags_dict.items(): tags[tag] = tags.get(tag, 0) + int(tag_count)
tag = tag.strip()
tags[tag] = tags.get(tag, 0) + int(tag_count)
if tags and is_non_comma_tagset(tags): if tags and is_non_comma_tagset(tags):
new_tags = {} new_tags = {}
@ -151,8 +149,6 @@ class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor)
v = random.random() * max_count v = random.random() * max_count
if count > v: if count > v:
for x in "({[]})":
tag = tag.replace(x, '\\' + x)
res.append(tag) res.append(tag)
return ", ".join(sorted(res)) return ", ".join(sorted(res))

View File

@ -31,7 +31,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
"name": name, "name": name,
"filename": lora_on_disk.filename, "filename": lora_on_disk.filename,
"shorthash": lora_on_disk.shorthash, "shorthash": lora_on_disk.shorthash,
"preview": self.find_preview(path) or self.find_embedded_preview(path, name, lora_on_disk.metadata), "preview": self.find_preview(path),
"description": self.find_description(path), "description": self.find_description(path),
"search_terms": search_terms, "search_terms": search_terms,
"local_preview": f"{path}.{shared.opts.samples_format}", "local_preview": f"{path}.{shared.opts.samples_format}",
@ -60,7 +60,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
else: else:
sd_version = lora_on_disk.sd_version sd_version = lora_on_disk.sd_version
if shared.opts.lora_show_all or not enable_filter or not shared.sd_model: if shared.opts.lora_show_all or not enable_filter:
pass pass
elif sd_version == network.SdVersion.Unknown: elif sd_version == network.SdVersion.Unknown:
model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1 model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1

View File

@ -29,7 +29,6 @@ onUiLoaded(async() => {
}); });
function getActiveTab(elements, all = false) { function getActiveTab(elements, all = false) {
if (!elements.img2imgTabs) return null;
const tabs = elements.img2imgTabs.querySelectorAll("button"); const tabs = elements.img2imgTabs.querySelectorAll("button");
if (all) return tabs; if (all) return tabs;
@ -44,7 +43,6 @@ onUiLoaded(async() => {
// Get tab ID // Get tab ID
function getTabId(elements) { function getTabId(elements) {
const activeTab = getActiveTab(elements); const activeTab = getActiveTab(elements);
if (!activeTab) return null;
return tabNameToElementId[activeTab.innerText]; return tabNameToElementId[activeTab.innerText];
} }
@ -254,7 +252,6 @@ onUiLoaded(async() => {
let isMoving = false; let isMoving = false;
let mouseX, mouseY; let mouseX, mouseY;
let activeElement; let activeElement;
let interactedWithAltKey = false;
const elements = Object.fromEntries( const elements = Object.fromEntries(
Object.keys(elementIDs).map(id => [ Object.keys(elementIDs).map(id => [
@ -280,7 +277,7 @@ onUiLoaded(async() => {
const targetElement = gradioApp().querySelector(elemId); const targetElement = gradioApp().querySelector(elemId);
if (!targetElement) { if (!targetElement) {
console.log("Element not found", elemId); console.log("Element not found");
return; return;
} }
@ -295,7 +292,7 @@ onUiLoaded(async() => {
// Create tooltip // Create tooltip
function createTooltip() { function createTooltip() {
const toolTipElement = const toolTipElemnt =
targetElement.querySelector(".image-container"); targetElement.querySelector(".image-container");
const tooltip = document.createElement("div"); const tooltip = document.createElement("div");
tooltip.className = "canvas-tooltip"; tooltip.className = "canvas-tooltip";
@ -358,7 +355,7 @@ onUiLoaded(async() => {
tooltip.appendChild(tooltipContent); tooltip.appendChild(tooltipContent);
// Add a hint element to the target element // Add a hint element to the target element
toolTipElement.appendChild(tooltip); toolTipElemnt.appendChild(tooltip);
} }
//Show tool tip if setting enable //Show tool tip if setting enable
@ -368,9 +365,9 @@ onUiLoaded(async() => {
// In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui. // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
function fixCanvas() { function fixCanvas() {
const activeTab = getActiveTab(elements)?.textContent.trim(); const activeTab = getActiveTab(elements).textContent.trim();
if (activeTab && activeTab !== "img2img") { if (activeTab !== "img2img") {
const img = targetElement.querySelector(`${elemId} img`); const img = targetElement.querySelector(`${elemId} img`);
if (img && img.style.display !== "none") { if (img && img.style.display !== "none") {
@ -511,10 +508,6 @@ onUiLoaded(async() => {
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
e.preventDefault(); e.preventDefault();
if (hotkeysConfig.canvas_hotkey_zoom === "Alt") {
interactedWithAltKey = true;
}
let zoomPosX, zoomPosY; let zoomPosX, zoomPosY;
let delta = 0.2; let delta = 0.2;
if (elemData[elemId].zoomLevel > 7) { if (elemData[elemId].zoomLevel > 7) {
@ -790,29 +783,23 @@ onUiLoaded(async() => {
targetElement.addEventListener("mouseleave", handleMouseLeave); targetElement.addEventListener("mouseleave", handleMouseLeave);
// Reset zoom when click on another tab // Reset zoom when click on another tab
if (elements.img2imgTabs) { elements.img2imgTabs.addEventListener("click", resetZoom);
elements.img2imgTabs.addEventListener("click", resetZoom); elements.img2imgTabs.addEventListener("click", () => {
elements.img2imgTabs.addEventListener("click", () => { // targetElement.style.width = "";
// targetElement.style.width = ""; if (parseInt(targetElement.style.width) > 865) {
if (parseInt(targetElement.style.width) > 865) { setTimeout(fitToElement, 0);
setTimeout(fitToElement, 0); }
} });
});
}
targetElement.addEventListener("wheel", e => { targetElement.addEventListener("wheel", e => {
// change zoom level // change zoom level
const operation = (e.deltaY || -e.wheelDelta) > 0 ? "-" : "+"; const operation = e.deltaY > 0 ? "-" : "+";
changeZoomLevel(operation, e); changeZoomLevel(operation, e);
// Handle brush size adjustment with ctrl key pressed // Handle brush size adjustment with ctrl key pressed
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) { if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) {
e.preventDefault(); e.preventDefault();
if (hotkeysConfig.canvas_hotkey_adjust === "Alt") {
interactedWithAltKey = true;
}
// Increase or decrease brush size based on scroll direction // Increase or decrease brush size based on scroll direction
adjustBrushSize(elemId, e.deltaY); adjustBrushSize(elemId, e.deltaY);
} }
@ -852,20 +839,6 @@ onUiLoaded(async() => {
document.addEventListener("keydown", handleMoveKeyDown); document.addEventListener("keydown", handleMoveKeyDown);
document.addEventListener("keyup", handleMoveKeyUp); document.addEventListener("keyup", handleMoveKeyUp);
// Prevent firefox from opening main menu when alt is used as a hotkey for zoom or brush size
function handleAltKeyUp(e) {
if (e.key !== "Alt" || !interactedWithAltKey) {
return;
}
e.preventDefault();
interactedWithAltKey = false;
}
document.addEventListener("keyup", handleAltKeyUp);
// Detect zoom level and update the pan speed. // Detect zoom level and update the pan speed.
function updatePanPosition(movementX, movementY) { function updatePanPosition(movementX, movementY) {
let panSpeed = 2; let panSpeed = 2;

View File

@ -8,8 +8,8 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas
"canvas_hotkey_grow_brush": shared.OptionInfo("W", "Enlarge the brush size"), "canvas_hotkey_grow_brush": shared.OptionInfo("W", "Enlarge the brush size"),
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"), "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
"canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas position"), "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, needed for testing"), "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
"canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),

View File

@ -1,7 +1,7 @@
import math import math
import gradio as gr import gradio as gr
from modules import scripts, shared, ui_components, ui_settings, infotext_utils, errors from modules import scripts, shared, ui_components, ui_settings, infotext_utils
from modules.ui_components import FormColumn from modules.ui_components import FormColumn
@ -42,11 +42,7 @@ class ExtraOptionsSection(scripts.Script):
setting_name = extra_options[index] setting_name = extra_options[index]
with FormColumn(): with FormColumn():
try: comp = ui_settings.create_setting_component(setting_name)
comp = ui_settings.create_setting_component(setting_name)
except KeyError:
errors.report(f"Can't add extra options for {setting_name} in ui")
continue
self.comps.append(comp) self.comps.append(comp)
self.setting_names.append(setting_name) self.setting_names.append(setting_name)

View File

@ -1,5 +1,6 @@
import hypertile import hypertile
from modules import scripts, script_callbacks, shared from modules import scripts, script_callbacks, shared
from scripts.hypertile_xyz import add_axis_options
class ScriptHypertile(scripts.Script): class ScriptHypertile(scripts.Script):
@ -92,6 +93,7 @@ def on_ui_settings():
"hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"), "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"),
"hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"), "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"),
"hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"), "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"),
"hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"), "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"),
"hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"), "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"),
"hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"), "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"),
@ -103,20 +105,5 @@ def on_ui_settings():
shared.opts.add_option(name, opt) shared.opts.add_option(name, opt)
def add_axis_options():
xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
xyz_grid.axis_options.extend([
xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet_secondpass', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_unet"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] Unet Max Depth'), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_unet"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] Unet Max Tile Size')),
xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_unet"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] Unet Swap Size')),
xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, xyz_grid.apply_override('hypertile_enable_vae', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_vae"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] VAE Max Depth'), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_vae"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] VAE Max Tile Size')),
xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_vae"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] VAE Swap Size')),
])
script_callbacks.on_ui_settings(on_ui_settings) script_callbacks.on_ui_settings(on_ui_settings)
script_callbacks.on_before_ui(add_axis_options) script_callbacks.on_before_ui(add_axis_options)

View File

@ -0,0 +1,51 @@
from modules import scripts
from modules.shared import opts
xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
def int_applier(value_name:str, min_range:int = -1, max_range:int = -1):
"""
Returns a function that applies the given value to the given value_name in opts.data.
"""
def validate(value_name:str, value:str):
value = int(value)
# validate value
if not min_range == -1:
assert value >= min_range, f"Value {value} for {value_name} must be greater than or equal to {min_range}"
if not max_range == -1:
assert value <= max_range, f"Value {value} for {value_name} must be less than or equal to {max_range}"
def apply_int(p, x, xs):
validate(value_name, x)
opts.data[value_name] = int(x)
return apply_int
def bool_applier(value_name:str):
"""
Returns a function that applies the given value to the given value_name in opts.data.
"""
def validate(value_name:str, value:str):
assert value.lower() in ["true", "false"], f"Value {value} for {value_name} must be either true or false"
def apply_bool(p, x, xs):
validate(value_name, x)
value_boolean = x.lower() == "true"
opts.data[value_name] = value_boolean
return apply_bool
def add_axis_options():
extra_axis_options = [
xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, bool_applier("hypertile_enable_unet"), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, bool_applier("hypertile_enable_unet_secondpass"), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, int_applier("hypertile_max_depth_unet", 0, 3), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, int_applier("hypertile_max_tile_unet", 0, 512)),
xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, int_applier("hypertile_swap_size_unet", 0, 64)),
xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, bool_applier("hypertile_enable_vae"), choices=xyz_grid.boolean_choice(reverse=True)),
xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, int_applier("hypertile_max_depth_vae", 0, 3), choices=lambda: [str(x) for x in range(4)]),
xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)),
xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)),
]
set_a = {opt.label for opt in xyz_grid.axis_options}
set_b = {opt.label for opt in extra_axis_options}
if set_a.intersection(set_b):
return
xyz_grid.axis_options.extend(extra_axis_options)

View File

@ -3,7 +3,6 @@ import gradio as gr
import math import math
from modules.ui_components import InputAccordion from modules.ui_components import InputAccordion
import modules.scripts as scripts import modules.scripts as scripts
from modules.torch_utils import float64
class SoftInpaintingSettings: class SoftInpaintingSettings:
@ -58,14 +57,10 @@ def latent_blend(settings, a, b, t):
# NOTE: We use inplace operations wherever possible. # NOTE: We use inplace operations wherever possible.
if len(t.shape) == 3: # [4][w][h] to [1][4][w][h]
# [4][w][h] to [1][4][w][h] t2 = t.unsqueeze(0)
t2 = t.unsqueeze(0) # [4][w][h] to [1][1][w][h] - the [4] seem redundant.
# [4][w][h] to [1][1][w][h] - the [4] seem redundant. t3 = t[0].unsqueeze(0).unsqueeze(0)
t3 = t[0].unsqueeze(0).unsqueeze(0)
else:
t2 = t
t3 = t[:, 0][:, None]
one_minus_t2 = 1 - t2 one_minus_t2 = 1 - t2
one_minus_t3 = 1 - t3 one_minus_t3 = 1 - t3
@ -80,11 +75,13 @@ def latent_blend(settings, a, b, t):
# Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.) # Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.)
# 64-bit operations are used here to allow large exponents. # 64-bit operations are used here to allow large exponents.
current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(float64(image_interp)).add_(0.00001) current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(torch.float64).add_(0.00001)
# Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1). # Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1).
a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(float64(a)).pow_(settings.inpaint_detail_preservation) * one_minus_t3 a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(float64(b)).pow_(settings.inpaint_detail_preservation) * t3 settings.inpaint_detail_preservation) * one_minus_t3
b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
settings.inpaint_detail_preservation) * t3
desired_magnitude = a_magnitude desired_magnitude = a_magnitude
desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation) desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation)
del a_magnitude, b_magnitude, t3, one_minus_t3 del a_magnitude, b_magnitude, t3, one_minus_t3
@ -107,7 +104,7 @@ def latent_blend(settings, a, b, t):
def get_modified_nmask(settings, nmask, sigma): def get_modified_nmask(settings, nmask, sigma):
""" """
Converts a negative mask representing the transparency of the original latent vectors being overlaid Converts a negative mask representing the transparency of the original latent vectors being overlayed
to a mask that is scaled according to the denoising strength for this step. to a mask that is scaled according to the denoising strength for this step.
Where: Where:
@ -138,10 +135,7 @@ def apply_adaptive_masks(
from PIL import Image, ImageOps, ImageFilter from PIL import Image, ImageOps, ImageFilter
# TODO: Bias the blending according to the latent mask, add adjustable parameter for bias control. # TODO: Bias the blending according to the latent mask, add adjustable parameter for bias control.
if len(nmask.shape) == 3: latent_mask = nmask[0].float()
latent_mask = nmask[0].float()
else:
latent_mask = nmask[:, 0].float()
# convert the original mask into a form we use to scale distances for thresholding # convert the original mask into a form we use to scale distances for thresholding
mask_scalar = 1 - (torch.clamp(latent_mask, min=0, max=1) ** (settings.mask_blend_scale / 2)) mask_scalar = 1 - (torch.clamp(latent_mask, min=0, max=1) ** (settings.mask_blend_scale / 2))
mask_scalar = (0.5 * (1 - settings.composite_mask_influence) mask_scalar = (0.5 * (1 - settings.composite_mask_influence)
@ -163,14 +157,7 @@ def apply_adaptive_masks(
percentile_min=0.25, percentile_max=0.75, min_width=1) percentile_min=0.25, percentile_max=0.75, min_width=1)
# The distance at which opacity of original decreases to 50% # The distance at which opacity of original decreases to 50%
if len(mask_scalar.shape) == 3: half_weighted_distance = settings.composite_difference_threshold * mask_scalar
if mask_scalar.shape[0] > i:
half_weighted_distance = settings.composite_difference_threshold * mask_scalar[i]
else:
half_weighted_distance = settings.composite_difference_threshold * mask_scalar[0]
else:
half_weighted_distance = settings.composite_difference_threshold * mask_scalar
converted_mask = converted_mask / half_weighted_distance converted_mask = converted_mask / half_weighted_distance
converted_mask = 1 / (1 + converted_mask ** settings.composite_difference_contrast) converted_mask = 1 / (1 + converted_mask ** settings.composite_difference_contrast)

View File

@ -1,5 +1,5 @@
<div class="copy-path-button card-button" <div class="copy-path-button card-button"
title="Copy path to clipboard" title="Copy path to clipboard"
onclick="extraNetworksCopyCardPath(event)" onclick="extraNetworksCopyCardPath(event, '{filename}')"
data-clipboard-text="{filename}"> data-clipboard-text="{filename}">
</div> </div>

View File

@ -1,4 +1,4 @@
<div class="edit-button card-button" <div class="edit-button card-button"
title="Edit metadata" title="Edit metadata"
onclick="extraNetworksEditUserMetadata(event, '{tabname}', '{extra_networks_tabname}')"> onclick="extraNetworksEditUserMetadata(event, '{tabname}', '{extra_networks_tabname}', '{name}')">
</div> </div>

View File

@ -1,4 +1,4 @@
<div class="metadata-button card-button" <div class="metadata-button card-button"
title="Show internal metadata" title="Show internal metadata"
onclick="extraNetworksRequestMetadata(event, '{extra_networks_tabname}')"> onclick="extraNetworksRequestMetadata(event, '{extra_networks_tabname}', '{name}')">
</div> </div>

View File

@ -1,8 +0,0 @@
<div class="extra-network-pane-content-dirs">
<div id='{tabname}_{extra_networks_tabname}_dirs' class='extra-network-dirs'>
{dirs_html}
</div>
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards'>
{items_html}
</div>
</div>

View File

@ -1,8 +0,0 @@
<div class="extra-network-pane-content-tree resize-handle-row">
<div id='{tabname}_{extra_networks_tabname}_tree' class='extra-network-tree' style='flex-basis: {extra_networks_tree_view_default_width}px'>
{tree_html}
</div>
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards' style='flex-grow: 1;'>
{items_html}
</div>
</div>

View File

@ -1,53 +1,23 @@
<div id='{tabname}_{extra_networks_tabname}_pane' class='extra-network-pane {tree_view_div_default_display_class}'> <div id='{tabname}_{extra_networks_tabname}_pane' class='extra-network-pane'>
<div class="extra-network-control" id="{tabname}_{extra_networks_tabname}_controls" style="display:none" > <div class="extra-network-control" id="{tabname}_{extra_networks_tabname}_controls" style="display:none" >
<div class="extra-network-control--search"> <div class="extra-network-control--search">
<input <input
id="{tabname}_{extra_networks_tabname}_extra_search" id="{tabname}_{extra_networks_tabname}_extra_search"
class="extra-network-control--search-text" class="extra-network-control--search-text"
type="search" type="search"
placeholder="Search" placeholder="Filter files"
> >
</div> </div>
<small>Sort: </small>
<div <div
id="{tabname}_{extra_networks_tabname}_extra_sort_path" id="{tabname}_{extra_networks_tabname}_extra_sort"
class="extra-network-control--sort{sort_path_active}" class="extra-network-control--sort"
data-sortkey="default" data-sortmode="{data_sortmode}"
data-sortkey="{data_sortkey}"
title="Sort by path" title="Sort by path"
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');" onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
> >
<i class="extra-network-control--icon extra-network-control--sort-icon"></i> <i class="extra-network-control--sort-icon"></i>
</div> </div>
<div
id="{tabname}_{extra_networks_tabname}_extra_sort_name"
class="extra-network-control--sort{sort_name_active}"
data-sortkey="name"
title="Sort by name"
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
</div>
<div
id="{tabname}_{extra_networks_tabname}_extra_sort_date_created"
class="extra-network-control--sort{sort_date_created_active}"
data-sortkey="date_created"
title="Sort by date created"
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
</div>
<div
id="{tabname}_{extra_networks_tabname}_extra_sort_date_modified"
class="extra-network-control--sort{sort_date_modified_active}"
data-sortkey="date_modified"
title="Sort by date modified"
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
>
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
</div>
<small> </small>
<div <div
id="{tabname}_{extra_networks_tabname}_extra_sort_dir" id="{tabname}_{extra_networks_tabname}_extra_sort_dir"
class="extra-network-control--sort-dir" class="extra-network-control--sort-dir"
@ -55,18 +25,15 @@
title="Sort ascending" title="Sort ascending"
onclick="extraNetworksControlSortDirOnClick(event, '{tabname}', '{extra_networks_tabname}');" onclick="extraNetworksControlSortDirOnClick(event, '{tabname}', '{extra_networks_tabname}');"
> >
<i class="extra-network-control--icon extra-network-control--sort-dir-icon"></i> <i class="extra-network-control--sort-dir-icon"></i>
</div> </div>
<small> </small>
<div <div
id="{tabname}_{extra_networks_tabname}_extra_tree_view" id="{tabname}_{extra_networks_tabname}_extra_tree_view"
class="extra-network-control--tree-view {tree_view_btn_extra_class}" class="extra-network-control--tree-view {tree_view_btn_extra_class}"
title="Enable Tree View" title="Enable Tree View"
onclick="extraNetworksControlTreeViewOnClick(event, '{tabname}', '{extra_networks_tabname}');" onclick="extraNetworksControlTreeViewOnClick(event, '{tabname}', '{extra_networks_tabname}');"
> >
<i class="extra-network-control--icon extra-network-control--tree-view-icon"></i> <i class="extra-network-control--tree-view-icon"></i>
</div> </div>
<div <div
id="{tabname}_{extra_networks_tabname}_extra_refresh" id="{tabname}_{extra_networks_tabname}_extra_refresh"
@ -74,8 +41,15 @@
title="Refresh page" title="Refresh page"
onclick="extraNetworksControlRefreshOnClick(event, '{tabname}', '{extra_networks_tabname}');" onclick="extraNetworksControlRefreshOnClick(event, '{tabname}', '{extra_networks_tabname}');"
> >
<i class="extra-network-control--icon extra-network-control--refresh-icon"></i> <i class="extra-network-control--refresh-icon"></i>
</div> </div>
</div> </div>
{pane_content} <div class="extra-network-pane-content">
</div> <div id='{tabname}_{extra_networks_tabname}_tree' class='extra-network-tree {tree_view_div_extra_class}'>
{tree_html}
</div>
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards'>
{items_html}
</div>
</div>
</div>

View File

@ -50,17 +50,17 @@ function dimensionChange(e, is_width, is_height) {
var scaledx = targetElement.naturalWidth * viewportscale; var scaledx = targetElement.naturalWidth * viewportscale;
var scaledy = targetElement.naturalHeight * viewportscale; var scaledy = targetElement.naturalHeight * viewportscale;
var clientRectTop = (viewportOffset.top + window.scrollY); var cleintRectTop = (viewportOffset.top + window.scrollY);
var clientRectLeft = (viewportOffset.left + window.scrollX); var cleintRectLeft = (viewportOffset.left + window.scrollX);
var clientRectCentreY = clientRectTop + (targetElement.clientHeight / 2); var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight / 2);
var clientRectCentreX = clientRectLeft + (targetElement.clientWidth / 2); var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth / 2);
var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight); var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight);
var arscaledx = currentWidth * arscale; var arscaledx = currentWidth * arscale;
var arscaledy = currentHeight * arscale; var arscaledy = currentHeight * arscale;
var arRectTop = clientRectCentreY - (arscaledy / 2); var arRectTop = cleintRectCentreY - (arscaledy / 2);
var arRectLeft = clientRectCentreX - (arscaledx / 2); var arRectLeft = cleintRectCentreX - (arscaledx / 2);
var arRectWidth = arscaledx; var arRectWidth = arscaledx;
var arRectHeight = arscaledy; var arRectHeight = arscaledy;

View File

@ -8,6 +8,9 @@ var contextMenuInit = function() {
}; };
function showContextMenu(event, element, menuEntries) { function showContextMenu(event, element, menuEntries) {
let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;
let oldMenu = gradioApp().querySelector('#context-menu'); let oldMenu = gradioApp().querySelector('#context-menu');
if (oldMenu) { if (oldMenu) {
oldMenu.remove(); oldMenu.remove();
@ -20,8 +23,10 @@ var contextMenuInit = function() {
contextMenu.style.background = baseStyle.background; contextMenu.style.background = baseStyle.background;
contextMenu.style.color = baseStyle.color; contextMenu.style.color = baseStyle.color;
contextMenu.style.fontFamily = baseStyle.fontFamily; contextMenu.style.fontFamily = baseStyle.fontFamily;
contextMenu.style.top = event.pageY + 'px'; contextMenu.style.top = posy + 'px';
contextMenu.style.left = event.pageX + 'px'; contextMenu.style.left = posx + 'px';
const contextMenuList = document.createElement('ul'); const contextMenuList = document.createElement('ul');
contextMenuList.className = 'context-menu-items'; contextMenuList.className = 'context-menu-items';
@ -38,6 +43,21 @@ var contextMenuInit = function() {
}); });
gradioApp().appendChild(contextMenu); gradioApp().appendChild(contextMenu);
let menuWidth = contextMenu.offsetWidth + 4;
let menuHeight = contextMenu.offsetHeight + 4;
let windowWidth = window.innerWidth;
let windowHeight = window.innerHeight;
if ((windowWidth - posx) < menuWidth) {
contextMenu.style.left = windowWidth - menuWidth + "px";
}
if ((windowHeight - posy) < menuHeight) {
contextMenu.style.top = windowHeight - menuHeight + "px";
}
} }
function appendContextMenuOption(targetElementSelector, entryName, entryFunction) { function appendContextMenuOption(targetElementSelector, entryName, entryFunction) {
@ -87,23 +107,16 @@ var contextMenuInit = function() {
oldMenu.remove(); oldMenu.remove();
} }
}); });
['contextmenu', 'touchstart'].forEach((eventType) => { gradioApp().addEventListener("contextmenu", function(e) {
gradioApp().addEventListener(eventType, function(e) { let oldMenu = gradioApp().querySelector('#context-menu');
let ev = e; if (oldMenu) {
if (eventType.startsWith('touch')) { oldMenu.remove();
if (e.touches.length !== 2) return; }
ev = e.touches[0]; menuSpecs.forEach(function(v, k) {
if (e.composedPath()[0].matches(k)) {
showContextMenu(e, e.composedPath()[0], v);
e.preventDefault();
} }
let oldMenu = gradioApp().querySelector('#context-menu');
if (oldMenu) {
oldMenu.remove();
}
menuSpecs.forEach(function(v, k) {
if (e.composedPath()[0].matches(k)) {
showContextMenu(ev, e.composedPath()[0], v);
e.preventDefault();
}
});
}); });
}); });
eventListenerApplied = true; eventListenerApplied = true;

View File

@ -56,15 +56,6 @@ function eventHasFiles(e) {
return false; return false;
} }
function isURL(url) {
try {
const _ = new URL(url);
return true;
} catch {
return false;
}
}
function dragDropTargetIsPrompt(target) { function dragDropTargetIsPrompt(target) {
if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true; if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true;
if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true; if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true;
@ -83,39 +74,22 @@ window.document.addEventListener('dragover', e => {
e.dataTransfer.dropEffect = 'copy'; e.dataTransfer.dropEffect = 'copy';
}); });
window.document.addEventListener('drop', async e => { window.document.addEventListener('drop', e => {
const target = e.composedPath()[0]; const target = e.composedPath()[0];
const url = e.dataTransfer.getData('text/uri-list') || e.dataTransfer.getData('text/plain'); if (!eventHasFiles(e)) return;
if (!eventHasFiles(e) && !isURL(url)) return;
if (dragDropTargetIsPrompt(target)) { if (dragDropTargetIsPrompt(target)) {
e.stopPropagation(); e.stopPropagation();
e.preventDefault(); e.preventDefault();
const isImg2img = get_tab_index('tabs') == 1; let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
let prompt_image_target = isImg2img ? "img2img_prompt_image" : "txt2img_prompt_image";
const imgParent = gradioApp().getElementById(prompt_image_target); const imgParent = gradioApp().getElementById(prompt_target);
const files = e.dataTransfer.files; const files = e.dataTransfer.files;
const fileInput = imgParent.querySelector('input[type="file"]'); const fileInput = imgParent.querySelector('input[type="file"]');
if (eventHasFiles(e) && fileInput) { if (fileInput) {
fileInput.files = files; fileInput.files = files;
fileInput.dispatchEvent(new Event('change')); fileInput.dispatchEvent(new Event('change'));
} else if (url) {
try {
const request = await fetch(url);
if (!request.ok) {
console.error('Error fetching URL:', url, request.status);
return;
}
const data = new DataTransfer();
data.items.add(new File([await request.blob()], 'image.png'));
fileInput.files = data.files;
fileInput.dispatchEvent(new Event('change'));
} catch (error) {
console.error('Error fetching URL:', url, error);
return;
}
} }
} }

View File

@ -64,14 +64,6 @@ function keyupEditAttention(event) {
selectionEnd++; selectionEnd++;
} }
// deselect surrounding whitespace
while (text[selectionStart] == " " && selectionStart < selectionEnd) {
selectionStart++;
}
while (text[selectionEnd - 1] == " " && selectionEnd > selectionStart) {
selectionEnd--;
}
target.setSelectionRange(selectionStart, selectionEnd); target.setSelectionRange(selectionStart, selectionEnd);
return true; return true;
} }

View File

@ -39,12 +39,12 @@ function setupExtraNetworksForTab(tabname) {
// tabname_full = {tabname}_{extra_networks_tabname} // tabname_full = {tabname}_{extra_networks_tabname}
var tabname_full = elem.id; var tabname_full = elem.id;
var search = gradioApp().querySelector("#" + tabname_full + "_extra_search"); var search = gradioApp().querySelector("#" + tabname_full + "_extra_search");
var sort_mode = gradioApp().querySelector("#" + tabname_full + "_extra_sort");
var sort_dir = gradioApp().querySelector("#" + tabname_full + "_extra_sort_dir"); var sort_dir = gradioApp().querySelector("#" + tabname_full + "_extra_sort_dir");
var refresh = gradioApp().querySelector("#" + tabname_full + "_extra_refresh"); var refresh = gradioApp().querySelector("#" + tabname_full + "_extra_refresh");
var currentSort = '';
// If any of the buttons above don't exist, we want to skip this iteration of the loop. // If any of the buttons above don't exist, we want to skip this iteration of the loop.
if (!search || !sort_dir || !refresh) { if (!search || !sort_mode || !sort_dir || !refresh) {
return; // `return` is equivalent of `continue` but for forEach loops. return; // `return` is equivalent of `continue` but for forEach loops.
} }
@ -52,7 +52,7 @@ function setupExtraNetworksForTab(tabname) {
var searchTerm = search.value.toLowerCase(); var searchTerm = search.value.toLowerCase();
gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card').forEach(function(elem) { gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card').forEach(function(elem) {
var searchOnly = elem.querySelector('.search_only'); var searchOnly = elem.querySelector('.search_only');
var text = Array.prototype.map.call(elem.querySelectorAll('.search_terms, .description'), function(t) { var text = Array.prototype.map.call(elem.querySelectorAll('.search_terms'), function(t) {
return t.textContent.toLowerCase(); return t.textContent.toLowerCase();
}).join(" "); }).join(" ");
@ -71,46 +71,42 @@ function setupExtraNetworksForTab(tabname) {
}; };
var applySort = function(force) { var applySort = function(force) {
var cards = gradioApp().querySelectorAll('#' + tabname_full + ' div.card'); var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card');
var parent = gradioApp().querySelector('#' + tabname_full + "_cards");
var reverse = sort_dir.dataset.sortdir == "Descending"; var reverse = sort_dir.dataset.sortdir == "Descending";
var activeSearchElem = gradioApp().querySelector('#' + tabname_full + "_controls .extra-network-control--sort.extra-network-control--enabled"); var sortKey = sort_mode.dataset.sortmode.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim() || "name";
var sortKey = activeSearchElem ? activeSearchElem.dataset.sortkey : "default"; sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1);
var sortKeyDataField = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1); var sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length;
var sortKeyStore = sortKey + "-" + sort_dir.dataset.sortdir + "-" + cards.length;
if (sortKeyStore == currentSort && !force) { if (sortKeyStore == sort_mode.dataset.sortkey && !force) {
return; return;
} }
currentSort = sortKeyStore; sort_mode.dataset.sortkey = sortKeyStore;
cards.forEach(function(card) {
card.originalParentElement = card.parentElement;
});
var sortedCards = Array.from(cards); var sortedCards = Array.from(cards);
sortedCards.sort(function(cardA, cardB) { sortedCards.sort(function(cardA, cardB) {
var a = cardA.dataset[sortKeyDataField]; var a = cardA.dataset[sortKey];
var b = cardB.dataset[sortKeyDataField]; var b = cardB.dataset[sortKey];
if (!isNaN(a) && !isNaN(b)) { if (!isNaN(a) && !isNaN(b)) {
return parseInt(a) - parseInt(b); return parseInt(a) - parseInt(b);
} }
return (a < b ? -1 : (a > b ? 1 : 0)); return (a < b ? -1 : (a > b ? 1 : 0));
}); });
if (reverse) { if (reverse) {
sortedCards.reverse(); sortedCards.reverse();
} }
cards.forEach(function(card) {
parent.innerHTML = ''; card.remove();
});
var frag = document.createDocumentFragment(); sortedCards.forEach(function(card) {
sortedCards.forEach(function(card) { card.originalParentElement.appendChild(card);
frag.appendChild(card);
}); });
parent.appendChild(frag);
}; };
search.addEventListener("input", function() { search.addEventListener("input", applyFilter);
applyFilter();
});
applySort(); applySort();
applyFilter(); applyFilter();
extraNetworksApplySort[tabname_full] = applySort; extraNetworksApplySort[tabname_full] = applySort;
@ -276,15 +272,6 @@ function saveCardPreview(event, tabname, filename) {
event.preventDefault(); event.preventDefault();
} }
function extraNetworksSearchButton(tabname, extra_networks_tabname, event) {
var searchTextarea = gradioApp().querySelector("#" + tabname + "_" + extra_networks_tabname + "_extra_search");
var button = event.target;
var text = button.classList.contains("search-all") ? "" : button.textContent.trim();
searchTextarea.value = text;
updateInput(searchTextarea);
}
function extraNetworksTreeProcessFileClick(event, btn, tabname, extra_networks_tabname) { function extraNetworksTreeProcessFileClick(event, btn, tabname, extra_networks_tabname) {
/** /**
* Processes `onclick` events when user clicks on files in tree. * Processes `onclick` events when user clicks on files in tree.
@ -303,7 +290,7 @@ function extraNetworksTreeProcessDirectoryClick(event, btn, tabname, extra_netwo
* Processes `onclick` events when user clicks on directories in tree. * Processes `onclick` events when user clicks on directories in tree.
* *
* Here is how the tree reacts to clicks for various states: * Here is how the tree reacts to clicks for various states:
* unselected unopened directory: Directory is selected and expanded. * unselected unopened directory: Diretory is selected and expanded.
* unselected opened directory: Directory is selected. * unselected opened directory: Directory is selected.
* selected opened directory: Directory is collapsed and deselected. * selected opened directory: Directory is collapsed and deselected.
* chevron is clicked: Directory is expanded or collapsed. Selected state unchanged. * chevron is clicked: Directory is expanded or collapsed. Selected state unchanged.
@ -396,17 +383,36 @@ function extraNetworksTreeOnClick(event, tabname, extra_networks_tabname) {
} }
function extraNetworksControlSortOnClick(event, tabname, extra_networks_tabname) { function extraNetworksControlSortOnClick(event, tabname, extra_networks_tabname) {
/** Handles `onclick` events for Sort Mode buttons. */ /**
* Handles `onclick` events for the Sort Mode button.
var self = event.currentTarget; *
var parent = event.currentTarget.parentElement; * Modifies the data attributes of the Sort Mode button to cycle between
* various sorting modes.
parent.querySelectorAll('.extra-network-control--sort').forEach(function(x) { *
x.classList.remove('extra-network-control--enabled'); * @param event The generated event.
}); * @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc.
* @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc.
self.classList.add('extra-network-control--enabled'); */
var curr_mode = event.currentTarget.dataset.sortmode;
var el_sort_dir = gradioApp().querySelector("#" + tabname + "_" + extra_networks_tabname + "_extra_sort_dir");
var sort_dir = el_sort_dir.dataset.sortdir;
if (curr_mode == "path") {
event.currentTarget.dataset.sortmode = "name";
event.currentTarget.dataset.sortkey = "sortName-" + sort_dir + "-640";
event.currentTarget.setAttribute("title", "Sort by filename");
} else if (curr_mode == "name") {
event.currentTarget.dataset.sortmode = "date_created";
event.currentTarget.dataset.sortkey = "sortDate_created-" + sort_dir + "-640";
event.currentTarget.setAttribute("title", "Sort by date created");
} else if (curr_mode == "date_created") {
event.currentTarget.dataset.sortmode = "date_modified";
event.currentTarget.dataset.sortkey = "sortDate_modified-" + sort_dir + "-640";
event.currentTarget.setAttribute("title", "Sort by date modified");
} else {
event.currentTarget.dataset.sortmode = "path";
event.currentTarget.dataset.sortkey = "sortPath-" + sort_dir + "-640";
event.currentTarget.setAttribute("title", "Sort by path");
}
applyExtraNetworkSort(tabname + "_" + extra_networks_tabname); applyExtraNetworkSort(tabname + "_" + extra_networks_tabname);
} }
@ -441,12 +447,8 @@ function extraNetworksControlTreeViewOnClick(event, tabname, extra_networks_tabn
* @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc. * @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc.
* @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc. * @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc.
*/ */
var button = event.currentTarget; gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_tree").classList.toggle("hidden");
button.classList.toggle("extra-network-control--enabled"); event.currentTarget.classList.toggle("extra-network-control--enabled");
var show = !button.classList.contains("extra-network-control--enabled");
var pane = gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_pane");
pane.classList.toggle("extra-network-dirs-hidden", show);
} }
function extraNetworksControlRefreshOnClick(event, tabname, extra_networks_tabname) { function extraNetworksControlRefreshOnClick(event, tabname, extra_networks_tabname) {
@ -507,76 +509,12 @@ function popupId(id) {
popup(storedPopupIds[id]); popup(storedPopupIds[id]);
} }
function extraNetworksFlattenMetadata(obj) {
const result = {};
// Convert any stringified JSON objects to actual objects
for (const key of Object.keys(obj)) {
if (typeof obj[key] === 'string') {
try {
const parsed = JSON.parse(obj[key]);
if (parsed && typeof parsed === 'object') {
obj[key] = parsed;
}
} catch (error) {
continue;
}
}
}
// Flatten the object
for (const key of Object.keys(obj)) {
if (typeof obj[key] === 'object' && obj[key] !== null) {
const nested = extraNetworksFlattenMetadata(obj[key]);
for (const nestedKey of Object.keys(nested)) {
result[`${key}/${nestedKey}`] = nested[nestedKey];
}
} else {
result[key] = obj[key];
}
}
// Special case for handling modelspec keys
for (const key of Object.keys(result)) {
if (key.startsWith("modelspec.")) {
result[key.replaceAll(".", "/")] = result[key];
delete result[key];
}
}
// Add empty keys to designate hierarchy
for (const key of Object.keys(result)) {
const parts = key.split("/");
for (let i = 1; i < parts.length; i++) {
const parent = parts.slice(0, i).join("/");
if (!result[parent]) {
result[parent] = "";
}
}
}
return result;
}
function extraNetworksShowMetadata(text) { function extraNetworksShowMetadata(text) {
try {
let parsed = JSON.parse(text);
if (parsed && typeof parsed === 'object') {
parsed = extraNetworksFlattenMetadata(parsed);
const table = createVisualizationTable(parsed, 0);
popup(table);
return;
}
} catch (error) {
console.error(error);
}
var elem = document.createElement('pre'); var elem = document.createElement('pre');
elem.classList.add('popup-metadata'); elem.classList.add('popup-metadata');
elem.textContent = text; elem.textContent = text;
popup(elem); popup(elem);
return;
} }
function requestGet(url, data, handler, errorHandler) { function requestGet(url, data, handler, errorHandler) {
@ -605,18 +543,16 @@ function requestGet(url, data, handler, errorHandler) {
xhr.send(js); xhr.send(js);
} }
function extraNetworksCopyCardPath(event) { function extraNetworksCopyCardPath(event, path) {
navigator.clipboard.writeText(event.target.getAttribute("data-clipboard-text")); navigator.clipboard.writeText(path);
event.stopPropagation(); event.stopPropagation();
} }
function extraNetworksRequestMetadata(event, extraPage) { function extraNetworksRequestMetadata(event, extraPage, cardName) {
var showError = function() { var showError = function() {
extraNetworksShowMetadata("there was an error getting metadata"); extraNetworksShowMetadata("there was an error getting metadata");
}; };
var cardName = event.target.parentElement.parentElement.getAttribute("data-name");
requestGet("./sd_extra_networks/metadata", {page: extraPage, item: cardName}, function(data) { requestGet("./sd_extra_networks/metadata", {page: extraPage, item: cardName}, function(data) {
if (data && data.metadata) { if (data && data.metadata) {
extraNetworksShowMetadata(data.metadata); extraNetworksShowMetadata(data.metadata);
@ -630,7 +566,7 @@ function extraNetworksRequestMetadata(event, extraPage) {
var extraPageUserMetadataEditors = {}; var extraPageUserMetadataEditors = {};
function extraNetworksEditUserMetadata(event, tabname, extraPage) { function extraNetworksEditUserMetadata(event, tabname, extraPage, cardName) {
var id = tabname + '_' + extraPage + '_edit_user_metadata'; var id = tabname + '_' + extraPage + '_edit_user_metadata';
var editor = extraPageUserMetadataEditors[id]; var editor = extraPageUserMetadataEditors[id];
@ -642,7 +578,6 @@ function extraNetworksEditUserMetadata(event, tabname, extraPage) {
extraPageUserMetadataEditors[id] = editor; extraPageUserMetadataEditors[id] = editor;
} }
var cardName = event.target.parentElement.parentElement.getAttribute("data-name");
editor.nameTextarea.value = cardName; editor.nameTextarea.value = cardName;
updateInput(editor.nameTextarea); updateInput(editor.nameTextarea);

View File

@ -6,8 +6,6 @@ function closeModal() {
function showModal(event) { function showModal(event) {
const source = event.target || event.srcElement; const source = event.target || event.srcElement;
const modalImage = gradioApp().getElementById("modalImage"); const modalImage = gradioApp().getElementById("modalImage");
const modalToggleLivePreviewBtn = gradioApp().getElementById("modal_toggle_live_preview");
modalToggleLivePreviewBtn.innerHTML = opts.js_live_preview_in_modal_lightbox ? "&#x1F5C7;" : "&#x1F5C6;";
const lb = gradioApp().getElementById("lightboxModal"); const lb = gradioApp().getElementById("lightboxModal");
modalImage.src = source.src; modalImage.src = source.src;
if (modalImage.style.display === 'none') { if (modalImage.style.display === 'none') {
@ -53,7 +51,14 @@ function modalImageSwitch(offset) {
var galleryButtons = all_gallery_buttons(); var galleryButtons = all_gallery_buttons();
if (galleryButtons.length > 1) { if (galleryButtons.length > 1) {
var result = selected_gallery_index(); var currentButton = selected_gallery_button();
var result = -1;
galleryButtons.forEach(function(v, i) {
if (v == currentButton) {
result = i;
}
});
if (result != -1) { if (result != -1) {
var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)]; var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)];
@ -126,15 +131,19 @@ function setupImageForLightbox(e) {
e.style.cursor = 'pointer'; e.style.cursor = 'pointer';
e.style.userSelect = 'none'; e.style.userSelect = 'none';
e.addEventListener('mousedown', function(evt) { var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1;
// For Firefox, listening on click first switched to next image then shows the lightbox.
// If you know how to fix this without switching to mousedown event, please.
// For other browsers the event is click to make it possiblr to drag picture.
var event = isFirefox ? 'mousedown' : 'click';
e.addEventListener(event, function(evt) {
if (evt.button == 1) { if (evt.button == 1) {
open(evt.target.src); open(evt.target.src);
evt.preventDefault(); evt.preventDefault();
return; return;
} }
}, true);
e.addEventListener('click', function(evt) {
if (!opts.js_modal_lightbox || evt.button != 0) return; if (!opts.js_modal_lightbox || evt.button != 0) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed); modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed);
@ -154,13 +163,6 @@ function modalZoomToggle(event) {
event.stopPropagation(); event.stopPropagation();
} }
function modalLivePreviewToggle(event) {
const modalToggleLivePreview = gradioApp().getElementById("modal_toggle_live_preview");
opts.js_live_preview_in_modal_lightbox = !opts.js_live_preview_in_modal_lightbox;
modalToggleLivePreview.innerHTML = opts.js_live_preview_in_modal_lightbox ? "&#x1F5C7;" : "&#x1F5C6;";
event.stopPropagation();
}
function modalTileImageToggle(event) { function modalTileImageToggle(event) {
const modalImage = gradioApp().getElementById("modalImage"); const modalImage = gradioApp().getElementById("modalImage");
const modal = gradioApp().getElementById("lightboxModal"); const modal = gradioApp().getElementById("lightboxModal");
@ -218,14 +220,6 @@ document.addEventListener("DOMContentLoaded", function() {
modalSave.title = "Save Image(s)"; modalSave.title = "Save Image(s)";
modalControls.appendChild(modalSave); modalControls.appendChild(modalSave);
const modalToggleLivePreview = document.createElement('span');
modalToggleLivePreview.className = 'modalToggleLivePreview cursor';
modalToggleLivePreview.id = "modal_toggle_live_preview";
modalToggleLivePreview.innerHTML = "&#x1F5C6;";
modalToggleLivePreview.onclick = modalLivePreviewToggle;
modalToggleLivePreview.title = "Toggle live preview";
modalControls.appendChild(modalToggleLivePreview);
const modalClose = document.createElement('span'); const modalClose = document.createElement('span');
modalClose.className = 'modalClose cursor'; modalClose.className = 'modalClose cursor';
modalClose.innerHTML = '&times;'; modalClose.innerHTML = '&times;';

View File

@ -33,141 +33,120 @@ function createRow(table, cellName, items) {
return res; return res;
} }
function createVisualizationTable(data, cutoff = 0, sort = "") { function showProfile(path, cutoff = 0.05) {
var table = document.createElement('table'); requestGet(path, {}, function(data) {
table.className = 'popup-table'; var table = document.createElement('table');
table.className = 'popup-table';
var keys = Object.keys(data); data.records['total'] = data.total;
if (sort === "number") { var keys = Object.keys(data.records).sort(function(a, b) {
keys = keys.sort(function(a, b) { return data.records[b] - data.records[a];
return data[b] - data[a];
}); });
} else { var items = keys.map(function(x) {
keys = keys.sort(); return {key: x, parts: x.split('/'), time: data.records[x]};
}
var items = keys.map(function(x) {
return {key: x, parts: x.split('/'), value: data[x]};
});
var maxLength = items.reduce(function(a, b) {
return Math.max(a, b.parts.length);
}, 0);
var cols = createRow(
table,
'th',
[
cutoff === 0 ? 'key' : 'record',
cutoff === 0 ? 'value' : 'seconds'
]
);
cols[0].colSpan = maxLength;
function arraysEqual(a, b) {
return !(a < b || b < a);
}
var addLevel = function(level, parent, hide) {
var matching = items.filter(function(x) {
return x.parts[level] && !x.parts[level + 1] && arraysEqual(x.parts.slice(0, level), parent);
}); });
if (sort === "number") { var maxLength = items.reduce(function(a, b) {
matching = matching.sort(function(a, b) { return Math.max(a, b.parts.length);
return b.value - a.value; }, 0);
});
} else { var cols = createRow(table, 'th', ['record', 'seconds']);
matching = matching.sort(); cols[0].colSpan = maxLength;
function arraysEqual(a, b) {
return !(a < b || b < a);
} }
var othersTime = 0;
var othersList = [];
var othersRows = [];
var childrenRows = [];
matching.forEach(function(x) {
var visible = (cutoff === 0 && !hide) || (x.value >= cutoff && !hide);
var cells = []; var addLevel = function(level, parent, hide) {
for (var i = 0; i < maxLength; i++) { var matching = items.filter(function(x) {
cells.push(x.parts[i]); return x.parts[level] && !x.parts[level + 1] && arraysEqual(x.parts.slice(0, level), parent);
} });
cells.push(cutoff === 0 ? x.value : x.value.toFixed(3)); var sorted = matching.sort(function(a, b) {
var cols = createRow(table, 'td', cells); return b.time - a.time;
for (i = 0; i < level; i++) { });
cols[i].className = 'muted'; var othersTime = 0;
} var othersList = [];
var othersRows = [];
var childrenRows = [];
sorted.forEach(function(x) {
var visible = x.time >= cutoff && !hide;
var tr = cols[0].parentNode; var cells = [];
if (!visible) { for (var i = 0; i < maxLength; i++) {
tr.classList.add("hidden"); cells.push(x.parts[i]);
} }
cells.push(x.time.toFixed(3));
var cols = createRow(table, 'td', cells);
for (i = 0; i < level; i++) {
cols[i].className = 'muted';
}
if (cutoff === 0 || x.value >= cutoff) { var tr = cols[0].parentNode;
childrenRows.push(tr); if (!visible) {
} else { tr.classList.add("hidden");
othersTime += x.value; }
othersList.push(x.parts[level]);
othersRows.push(tr); if (x.time >= cutoff) {
} childrenRows.push(tr);
} else {
othersTime += x.time;
othersList.push(x.parts[level]);
othersRows.push(tr);
}
var children = addLevel(level + 1, parent.concat([x.parts[level]]), true);
if (children.length > 0) {
var cell = cols[level];
var onclick = function() {
cell.classList.remove("link");
cell.removeEventListener("click", onclick);
children.forEach(function(x) {
x.classList.remove("hidden");
});
};
cell.classList.add("link");
cell.addEventListener("click", onclick);
}
});
if (othersTime > 0) {
var cells = [];
for (var i = 0; i < maxLength; i++) {
cells.push(parent[i]);
}
cells.push(othersTime.toFixed(3));
cells[level] = 'others';
var cols = createRow(table, 'td', cells);
for (i = 0; i < level; i++) {
cols[i].className = 'muted';
}
var children = addLevel(level + 1, parent.concat([x.parts[level]]), true);
if (children.length > 0) {
var cell = cols[level]; var cell = cols[level];
var tr = cell.parentNode;
var onclick = function() { var onclick = function() {
tr.classList.add("hidden");
cell.classList.remove("link"); cell.classList.remove("link");
cell.removeEventListener("click", onclick); cell.removeEventListener("click", onclick);
children.forEach(function(x) { othersRows.forEach(function(x) {
x.classList.remove("hidden"); x.classList.remove("hidden");
}); });
}; };
cell.title = othersList.join(", ");
cell.classList.add("link"); cell.classList.add("link");
cell.addEventListener("click", onclick); cell.addEventListener("click", onclick);
}
});
if (othersTime > 0) { if (hide) {
var cells = []; tr.classList.add("hidden");
for (var i = 0; i < maxLength; i++) { }
cells.push(parent[i]);
} childrenRows.push(tr);
cells.push(othersTime.toFixed(3));
cells[level] = 'others';
var cols = createRow(table, 'td', cells);
for (i = 0; i < level; i++) {
cols[i].className = 'muted';
} }
var cell = cols[level]; return childrenRows;
var tr = cell.parentNode; };
var onclick = function() {
tr.classList.add("hidden");
cell.classList.remove("link");
cell.removeEventListener("click", onclick);
othersRows.forEach(function(x) {
x.classList.remove("hidden");
});
};
cell.title = othersList.join(", "); addLevel(0, []);
cell.classList.add("link");
cell.addEventListener("click", onclick);
if (hide) {
tr.classList.add("hidden");
}
childrenRows.push(tr);
}
return childrenRows;
};
addLevel(0, []);
return table;
}
function showProfile(path, cutoff = 0.05) {
requestGet(path, {}, function(data) {
data.records['total'] = data.total;
const table = createVisualizationTable(data.records, cutoff, "number");
popup(table); popup(table);
}); });
} }

View File

@ -76,26 +76,6 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
var dateStart = new Date(); var dateStart = new Date();
var wasEverActive = false; var wasEverActive = false;
var parentProgressbar = progressbarContainer.parentNode; var parentProgressbar = progressbarContainer.parentNode;
var wakeLock = null;
var requestWakeLock = async function() {
if (!opts.prevent_screen_sleep_during_generation || wakeLock) return;
try {
wakeLock = await navigator.wakeLock.request('screen');
} catch (err) {
console.error('Wake Lock is not supported.');
}
};
var releaseWakeLock = async function() {
if (!opts.prevent_screen_sleep_during_generation || !wakeLock) return;
try {
await wakeLock.release();
wakeLock = null;
} catch (err) {
console.error('Wake Lock release failed', err);
}
};
var divProgress = document.createElement('div'); var divProgress = document.createElement('div');
divProgress.className = 'progressDiv'; divProgress.className = 'progressDiv';
@ -109,7 +89,6 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
var livePreview = null; var livePreview = null;
var removeProgressBar = function() { var removeProgressBar = function() {
releaseWakeLock();
if (!divProgress) return; if (!divProgress) return;
setTitle(""); setTitle("");
@ -121,7 +100,6 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
}; };
var funProgress = function(id_task) { var funProgress = function(id_task) {
requestWakeLock();
request("./internal/progress", {id_task: id_task, live_preview: false}, function(res) { request("./internal/progress", {id_task: id_task, live_preview: false}, function(res) {
if (res.completed) { if (res.completed) {
removeProgressBar(); removeProgressBar();

View File

@ -22,9 +22,6 @@
} }
function displayResizeHandle(parent) { function displayResizeHandle(parent) {
if (!parent.needHideOnMoblie) {
return true;
}
if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) {
parent.style.display = 'flex'; parent.style.display = 'flex';
parent.resizeHandle.style.display = "none"; parent.resizeHandle.style.display = "none";
@ -44,7 +41,7 @@
const ratio = newParentWidth / oldParentWidth; const ratio = newParentWidth / oldParentWidth;
const newWidthL = Math.max(Math.floor(ratio * widthL), parent.minLeftColWidth); const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH);
setLeftColGridTemplate(parent, newWidthL); setLeftColGridTemplate(parent, newWidthL);
R.parentWidth = newParentWidth; R.parentWidth = newParentWidth;
@ -67,24 +64,7 @@
parent.style.display = 'grid'; parent.style.display = 'grid';
parent.style.gap = '0'; parent.style.gap = '0';
let leftColTemplate = ""; const gridTemplateColumns = `${parent.children[0].style.flexGrow}fr ${PAD}px ${parent.children[1].style.flexGrow}fr`;
if (parent.children[0].style.flexGrow) {
leftColTemplate = `${parent.children[0].style.flexGrow}fr`;
parent.minLeftColWidth = GRADIO_MIN_WIDTH;
parent.minRightColWidth = GRADIO_MIN_WIDTH;
parent.needHideOnMoblie = true;
} else {
leftColTemplate = parent.children[0].style.flexBasis;
parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2) / 2;
parent.minRightColWidth = 0;
parent.needHideOnMoblie = false;
}
if (!leftColTemplate) {
leftColTemplate = '1fr';
}
const gridTemplateColumns = `${leftColTemplate} ${PAD}px ${parent.children[1].style.flexGrow}fr`;
parent.style.gridTemplateColumns = gridTemplateColumns; parent.style.gridTemplateColumns = gridTemplateColumns;
parent.style.originalGridTemplateColumns = gridTemplateColumns; parent.style.originalGridTemplateColumns = gridTemplateColumns;
@ -152,7 +132,7 @@
} else { } else {
delta = R.screenX - evt.changedTouches[0].screenX; delta = R.screenX - evt.changedTouches[0].screenX;
} }
const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - R.parent.minRightColWidth - PAD), R.parent.minLeftColWidth); const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH);
setLeftColGridTemplate(R.parent, leftColWidth); setLeftColGridTemplate(R.parent, leftColWidth);
} }
}); });
@ -191,15 +171,10 @@
setupResizeHandle = setup; setupResizeHandle = setup;
})(); })();
onUiLoaded(function() {
function setupAllResizeHandles() {
for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) { for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) {
if (!elem.querySelector('.resize-handle') && !elem.children[0].classList.contains("hidden")) { if (!elem.querySelector('.resize-handle')) {
setupResizeHandle(elem); setupResizeHandle(elem);
} }
} }
} });
onUiLoaded(setupAllResizeHandles);

View File

@ -26,14 +26,6 @@ function selected_gallery_index() {
return all_gallery_buttons().findIndex(elem => elem.classList.contains('selected')); return all_gallery_buttons().findIndex(elem => elem.classList.contains('selected'));
} }
function gallery_container_buttons(gallery_container) {
return gradioApp().querySelectorAll(`#${gallery_container} .thumbnail-item.thumbnail-small`);
}
function selected_gallery_index_id(gallery_container) {
return Array.from(gallery_container_buttons(gallery_container)).findIndex(elem => elem.classList.contains('selected'));
}
function extract_image_from_gallery(gallery) { function extract_image_from_gallery(gallery) {
if (gallery.length == 0) { if (gallery.length == 0) {
return [null]; return [null];
@ -144,7 +136,8 @@ function showSubmitInterruptingPlaceholder(tabname) {
function showRestoreProgressButton(tabname, show) { function showRestoreProgressButton(tabname, show) {
var button = gradioApp().getElementById(tabname + "_restore_progress"); var button = gradioApp().getElementById(tabname + "_restore_progress");
if (!button) return; if (!button) return;
button.style.setProperty('display', show ? 'flex' : 'none', 'important');
button.style.display = show ? "flex" : "none";
} }
function submit() { function submit() {
@ -216,7 +209,6 @@ function restoreProgressTxt2img() {
var id = localGet("txt2img_task_id"); var id = localGet("txt2img_task_id");
if (id) { if (id) {
showSubmitInterruptingPlaceholder('txt2img');
requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() {
showSubmitButtons('txt2img', true); showSubmitButtons('txt2img', true);
}, null, 0); }, null, 0);
@ -231,7 +223,6 @@ function restoreProgressImg2img() {
var id = localGet("img2img_task_id"); var id = localGet("img2img_task_id");
if (id) { if (id) {
showSubmitInterruptingPlaceholder('img2img');
requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() {
showSubmitButtons('img2img', true); showSubmitButtons('img2img', true);
}, null, 0); }, null, 0);
@ -307,7 +298,6 @@ onAfterUiUpdate(function() {
var jsdata = textarea.value; var jsdata = textarea.value;
opts = JSON.parse(jsdata); opts = JSON.parse(jsdata);
executeCallbacks(optionsAvailableCallbacks); /*global optionsAvailableCallbacks*/
executeCallbacks(optionsChangedCallbacks); /*global optionsChangedCallbacks*/ executeCallbacks(optionsChangedCallbacks); /*global optionsChangedCallbacks*/
Object.defineProperty(textarea, 'value', { Object.defineProperty(textarea, 'value', {
@ -346,8 +336,8 @@ onOptionsChanged(function() {
let txt2img_textarea, img2img_textarea = undefined; let txt2img_textarea, img2img_textarea = undefined;
function restart_reload() { function restart_reload() {
document.body.style.backgroundColor = "var(--background-fill-primary)";
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>'; document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
var requestPing = function() { var requestPing = function() {
requestGet("./internal/ping", {}, function(data) { requestGet("./internal/ping", {}, function(data) {
location.reload(); location.reload();
@ -421,7 +411,7 @@ function switchWidthHeight(tabname) {
var onEditTimers = {}; var onEditTimers = {};
// calls func after afterMs milliseconds has passed since the input elem has been edited by user // calls func after afterMs milliseconds has passed since the input elem has beed enited by user
function onEdit(editId, elem, afterMs, func) { function onEdit(editId, elem, afterMs, func) {
var edited = function() { var edited = function() {
var existingTimer = onEditTimers[editId]; var existingTimer = onEditTimers[editId];

View File

@ -17,13 +17,13 @@ from fastapi.encoders import jsonable_encoder
from secrets import compare_digest from secrets import compare_digest
import modules.shared as shared import modules.shared as shared
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, infotext_utils, sd_models, sd_schedulers from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, infotext_utils, sd_models
from modules.api import models from modules.api import models
from modules.shared import opts from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin from PIL import PngImagePlugin, Image
from modules.sd_models_config import find_checkpoint_config_near_filename from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models from modules.realesrgan_model import get_realesrgan_models
from modules import devices from modules import devices
@ -43,7 +43,7 @@ def script_name_to_index(name, scripts):
def validate_sampler_name(name): def validate_sampler_name(name):
config = sd_samplers.all_samplers_map.get(name, None) config = sd_samplers.all_samplers_map.get(name, None)
if config is None: if config is None:
raise HTTPException(status_code=400, detail="Sampler not found") raise HTTPException(status_code=404, detail="Sampler not found")
return name return name
@ -85,7 +85,7 @@ def decode_base64_to_image(encoding):
headers = {'user-agent': opts.api_useragent} if opts.api_useragent else {} headers = {'user-agent': opts.api_useragent} if opts.api_useragent else {}
response = requests.get(encoding, timeout=30, headers=headers) response = requests.get(encoding, timeout=30, headers=headers)
try: try:
image = images.read(BytesIO(response.content)) image = Image.open(BytesIO(response.content))
return image return image
except Exception as e: except Exception as e:
raise HTTPException(status_code=500, detail="Invalid image url") from e raise HTTPException(status_code=500, detail="Invalid image url") from e
@ -93,7 +93,7 @@ def decode_base64_to_image(encoding):
if encoding.startswith("data:image/"): if encoding.startswith("data:image/"):
encoding = encoding.split(";")[1].split(",")[1] encoding = encoding.split(";")[1].split(",")[1]
try: try:
image = images.read(BytesIO(base64.b64decode(encoding))) image = Image.open(BytesIO(base64.b64decode(encoding)))
return image return image
except Exception as e: except Exception as e:
raise HTTPException(status_code=500, detail="Invalid encoded image") from e raise HTTPException(status_code=500, detail="Invalid encoded image") from e
@ -113,7 +113,7 @@ def encode_pil_to_base64(image):
image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality) image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality)
elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"): elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"):
if image.mode in ("RGBA", "P"): if image.mode == "RGBA":
image = image.convert("RGB") image = image.convert("RGB")
parameters = image.info.get('parameters', None) parameters = image.info.get('parameters', None)
exif_bytes = piexif.dump({ exif_bytes = piexif.dump({
@ -221,7 +221,6 @@ class Api:
self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel) self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem]) self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem])
self.add_api_route("/sdapi/v1/schedulers", self.get_schedulers, methods=["GET"], response_model=list[models.SchedulerItem])
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem]) self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem])
self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem]) self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem])
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem]) self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem])
@ -361,7 +360,7 @@ class Api:
return script_args return script_args
def apply_infotext(self, request, tabname, *, script_runner=None, mentioned_script_args=None): def apply_infotext(self, request, tabname, *, script_runner=None, mentioned_script_args=None):
"""Processes `infotext` field from the `request`, and sets other fields of the `request` according to what's in infotext. """Processes `infotext` field from the `request`, and sets other fields of the `request` accoring to what's in infotext.
If request already has a field set, and that field is encountered in infotext too, the value from infotext is ignored. If request already has a field set, and that field is encountered in infotext too, the value from infotext is ignored.
@ -372,7 +371,7 @@ class Api:
return {} return {}
possible_fields = infotext_utils.paste_fields[tabname]["fields"] possible_fields = infotext_utils.paste_fields[tabname]["fields"]
set_fields = request.model_dump(exclude_unset=True) if hasattr(request, "request") else request.dict(exclude_unset=True) # pydantic v1/v2 have different names for this set_fields = request.model_dump(exclude_unset=True) if hasattr(request, "request") else request.dict(exclude_unset=True) # pydantic v1/v2 have differenrt names for this
params = infotext_utils.parse_generation_parameters(request.infotext) params = infotext_utils.parse_generation_parameters(request.infotext)
def get_field_value(field, params): def get_field_value(field, params):
@ -410,8 +409,8 @@ class Api:
if request.override_settings is None: if request.override_settings is None:
request.override_settings = {} request.override_settings = {}
overridden_settings = infotext_utils.get_override_settings(params) overriden_settings = infotext_utils.get_override_settings(params)
for _, setting_name, value in overridden_settings: for _, setting_name, value in overriden_settings:
if setting_name not in request.override_settings: if setting_name not in request.override_settings:
request.override_settings[setting_name] = value request.override_settings[setting_name] = value
@ -438,19 +437,15 @@ class Api:
self.apply_infotext(txt2imgreq, "txt2img", script_runner=script_runner, mentioned_script_args=infotext_script_args) self.apply_infotext(txt2imgreq, "txt2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner) selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
sampler, scheduler = sd_samplers.get_sampler_and_scheduler(txt2imgreq.sampler_name or txt2imgreq.sampler_index, txt2imgreq.scheduler)
populate = txt2imgreq.copy(update={ # Override __init__ params populate = txt2imgreq.copy(update={ # Override __init__ params
"sampler_name": validate_sampler_name(sampler), "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
"do_not_save_samples": not txt2imgreq.save_images, "do_not_save_samples": not txt2imgreq.save_images,
"do_not_save_grid": not txt2imgreq.save_images, "do_not_save_grid": not txt2imgreq.save_images,
}) })
if populate.sampler_name: if populate.sampler_name:
populate.sampler_index = None # prevent a warning later on populate.sampler_index = None # prevent a warning later on
if not populate.scheduler and scheduler != "Automatic":
populate.scheduler = scheduler
args = vars(populate) args = vars(populate)
args.pop('script_name', None) args.pop('script_name', None)
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
@ -506,10 +501,9 @@ class Api:
self.apply_infotext(img2imgreq, "img2img", script_runner=script_runner, mentioned_script_args=infotext_script_args) self.apply_infotext(img2imgreq, "img2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner) selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
sampler, scheduler = sd_samplers.get_sampler_and_scheduler(img2imgreq.sampler_name or img2imgreq.sampler_index, img2imgreq.scheduler)
populate = img2imgreq.copy(update={ # Override __init__ params populate = img2imgreq.copy(update={ # Override __init__ params
"sampler_name": validate_sampler_name(sampler), "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
"do_not_save_samples": not img2imgreq.save_images, "do_not_save_samples": not img2imgreq.save_images,
"do_not_save_grid": not img2imgreq.save_images, "do_not_save_grid": not img2imgreq.save_images,
"mask": mask, "mask": mask,
@ -517,9 +511,6 @@ class Api:
if populate.sampler_name: if populate.sampler_name:
populate.sampler_index = None # prevent a warning later on populate.sampler_index = None # prevent a warning later on
if not populate.scheduler and scheduler != "Automatic":
populate.scheduler = scheduler
args = vars(populate) args = vars(populate)
args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine. args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
args.pop('script_name', None) args.pop('script_name', None)
@ -692,17 +683,6 @@ class Api:
def get_samplers(self): def get_samplers(self):
return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers] return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers]
def get_schedulers(self):
return [
{
"name": scheduler.name,
"label": scheduler.label,
"aliases": scheduler.aliases,
"default_rho": scheduler.default_rho,
"need_inner_model": scheduler.need_inner_model,
}
for scheduler in sd_schedulers.schedulers]
def get_upscalers(self): def get_upscalers(self):
return [ return [
{ {

View File

@ -147,7 +147,7 @@ class ExtrasBaseRequest(BaseModel):
gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.") gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.") codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.") codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
upscaling_resize: float = Field(default=2, title="Upscaling Factor", gt=0, description="By how much to upscale the image, only used when resize_mode=0.") upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.")
upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?") upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?")
@ -235,13 +235,6 @@ class SamplerItem(BaseModel):
aliases: list[str] = Field(title="Aliases") aliases: list[str] = Field(title="Aliases")
options: dict[str, str] = Field(title="Options") options: dict[str, str] = Field(title="Options")
class SchedulerItem(BaseModel):
name: str = Field(title="Name")
label: str = Field(title="Label")
aliases: Optional[list[str]] = Field(title="Aliases")
default_rho: Optional[float] = Field(title="Default Rho")
need_inner_model: Optional[bool] = Field(title="Needs Inner Model")
class UpscalerItem(BaseModel): class UpscalerItem(BaseModel):
name: str = Field(title="Name") name: str = Field(title="Name")
model_name: Optional[str] = Field(title="Model Name") model_name: Optional[str] = Field(title="Model Name")

View File

@ -2,55 +2,48 @@ import json
import os import os
import os.path import os.path
import threading import threading
import time
import diskcache
import tqdm
from modules.paths import data_path, script_path from modules.paths import data_path, script_path
cache_filename = os.environ.get('SD_WEBUI_CACHE_FILE', os.path.join(data_path, "cache.json")) cache_filename = os.environ.get('SD_WEBUI_CACHE_FILE', os.path.join(data_path, "cache.json"))
cache_dir = os.environ.get('SD_WEBUI_CACHE_DIR', os.path.join(data_path, "cache")) cache_data = None
caches = {}
cache_lock = threading.Lock() cache_lock = threading.Lock()
dump_cache_after = None
dump_cache_thread = None
def dump_cache(): def dump_cache():
"""old function for dumping cache to disk; does nothing since diskcache.""" """
Marks cache for writing to disk. 5 seconds after no one else flags the cache for writing, it is written.
"""
pass global dump_cache_after
global dump_cache_thread
def thread_func():
global dump_cache_after
global dump_cache_thread
def make_cache(subsection: str) -> diskcache.Cache: while dump_cache_after is not None and time.time() < dump_cache_after:
return diskcache.Cache( time.sleep(1)
os.path.join(cache_dir, subsection),
size_limit=2**32, # 4 GB, culling oldest first
disk_min_file_size=2**18, # keep up to 256KB in Sqlite
)
with cache_lock:
cache_filename_tmp = cache_filename + "-"
with open(cache_filename_tmp, "w", encoding="utf8") as file:
json.dump(cache_data, file, indent=4, ensure_ascii=False)
def convert_old_cached_data(): os.replace(cache_filename_tmp, cache_filename)
try:
with open(cache_filename, "r", encoding="utf8") as file:
data = json.load(file)
except FileNotFoundError:
return
except Exception:
os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json"))
print('[ERROR] issue occurred while trying to read cache.json; old cache has been moved to tmp/cache.json')
return
total_count = sum(len(keyvalues) for keyvalues in data.values()) dump_cache_after = None
dump_cache_thread = None
with tqdm.tqdm(total=total_count, desc="converting cache") as progress: with cache_lock:
for subsection, keyvalues in data.items(): dump_cache_after = time.time() + 5
cache_obj = caches.get(subsection) if dump_cache_thread is None:
if cache_obj is None: dump_cache_thread = threading.Thread(name='cache-writer', target=thread_func)
cache_obj = make_cache(subsection) dump_cache_thread.start()
caches[subsection] = cache_obj
for key, value in keyvalues.items():
cache_obj[key] = value
progress.update(1)
def cache(subsection): def cache(subsection):
@ -61,21 +54,28 @@ def cache(subsection):
subsection (str): The subsection identifier for the cache. subsection (str): The subsection identifier for the cache.
Returns: Returns:
diskcache.Cache: The cache data for the specified subsection. dict: The cache data for the specified subsection.
""" """
cache_obj = caches.get(subsection) global cache_data
if not cache_obj:
if cache_data is None:
with cache_lock: with cache_lock:
if not os.path.exists(cache_dir) and os.path.isfile(cache_filename): if cache_data is None:
convert_old_cached_data() try:
with open(cache_filename, "r", encoding="utf8") as file:
cache_data = json.load(file)
except FileNotFoundError:
cache_data = {}
except Exception:
os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json"))
print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache')
cache_data = {}
cache_obj = caches.get(subsection) s = cache_data.get(subsection, {})
if not cache_obj: cache_data[subsection] = s
cache_obj = make_cache(subsection)
caches[subsection] = cache_obj
return cache_obj return s
def cached_data_for_file(subsection, title, filename, func): def cached_data_for_file(subsection, title, filename, func):

View File

@ -1,9 +1,8 @@
import os.path
from functools import wraps from functools import wraps
import html import html
import time import time
from modules import shared, progress, errors, devices, fifo_lock, profiling from modules import shared, progress, errors, devices, fifo_lock
queue_lock = fifo_lock.FIFOLock() queue_lock = fifo_lock.FIFOLock()
@ -47,22 +46,6 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
def wrap_gradio_call(func, extra_outputs=None, add_stats=False): def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
@wraps(func)
def f(*args, **kwargs):
try:
res = func(*args, **kwargs)
finally:
shared.state.skipped = False
shared.state.interrupted = False
shared.state.stopping_generation = False
shared.state.job_count = 0
shared.state.job = ""
return res
return wrap_gradio_call_no_job(f, extra_outputs, add_stats)
def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
@wraps(func) @wraps(func)
def f(*args, extra_outputs_array=extra_outputs, **kwargs): def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats
@ -82,6 +65,9 @@ def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)" arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
errors.report(f"{message}\n{arg_str}", exc_info=True) errors.report(f"{message}\n{arg_str}", exc_info=True)
shared.state.job = ""
shared.state.job_count = 0
if extra_outputs_array is None: if extra_outputs_array is None:
extra_outputs_array = [None, ''] extra_outputs_array = [None, '']
@ -90,6 +76,11 @@ def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
devices.torch_gc() devices.torch_gc()
shared.state.skipped = False
shared.state.interrupted = False
shared.state.stopping_generation = False
shared.state.job_count = 0
if not add_stats: if not add_stats:
return tuple(res) return tuple(res)
@ -109,8 +100,8 @@ def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
sys_pct = sys_peak/max(sys_total, 1) * 100 sys_pct = sys_peak/max(sys_total, 1) * 100
toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)" toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)"
toltip_r = "Reserved: total amount of video memory allocated by the Torch library " toltip_r = "Reserved: total amout of video memory allocated by the Torch library "
toltip_sys = "System: peak amount of video memory allocated by all running programs, out of total capacity" toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity"
text_a = f"<abbr title='{toltip_a}'>A</abbr>: <span class='measurement'>{active_peak/1024:.2f} GB</span>" text_a = f"<abbr title='{toltip_a}'>A</abbr>: <span class='measurement'>{active_peak/1024:.2f} GB</span>"
text_r = f"<abbr title='{toltip_r}'>R</abbr>: <span class='measurement'>{reserved_peak/1024:.2f} GB</span>" text_r = f"<abbr title='{toltip_r}'>R</abbr>: <span class='measurement'>{reserved_peak/1024:.2f} GB</span>"
@ -120,15 +111,9 @@ def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
else: else:
vram_html = '' vram_html = ''
if shared.opts.profiling_enable and os.path.exists(shared.opts.profiling_filename):
profiling_html = f"<p class='profile'> [ <a href='{profiling.webpath()}' download>Profile</a> ] </p>"
else:
profiling_html = ''
# last item is always HTML # last item is always HTML
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr><span class='measurement'>{elapsed_text}</span></p>{vram_html}{profiling_html}</div>" res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr><span class='measurement'>{elapsed_text}</span></p>{vram_html}</div>"
return tuple(res) return tuple(res)
return f return f

View File

@ -20,7 +20,6 @@ parser.add_argument("--dump-sysinfo", action='store_true', help="launch.py argum
parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None) parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None)
parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint") parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
parser.add_argument("--data-dir", type=normalized_filepath, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored") parser.add_argument("--data-dir", type=normalized_filepath, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--models-dir", type=normalized_filepath, default=None, help="base path where models are stored; overrides --data-dir")
parser.add_argument("--config", type=normalized_filepath, default=sd_default_config, help="path to config which constructs model",) parser.add_argument("--config", type=normalized_filepath, default=sd_default_config, help="path to config which constructs model",)
parser.add_argument("--ckpt", type=normalized_filepath, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt", type=normalized_filepath, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=normalized_filepath, default=None, help="Path to directory with stable diffusion checkpoints") parser.add_argument("--ckpt-dir", type=normalized_filepath, default=None, help="Path to directory with stable diffusion checkpoints")
@ -30,7 +29,7 @@ parser.add_argument("--gfpgan-model", type=normalized_filepath, help="GFPGAN mod
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats") parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats") parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)") parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="does not do anything") parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=normalized_filepath, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)") parser.add_argument("--embeddings-dir", type=normalized_filepath, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--textual-inversion-templates-dir", type=normalized_filepath, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates") parser.add_argument("--textual-inversion-templates-dir", type=normalized_filepath, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
parser.add_argument("--hypernetwork-dir", type=normalized_filepath, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory") parser.add_argument("--hypernetwork-dir", type=normalized_filepath, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
@ -42,7 +41,7 @@ parser.add_argument("--lowvram", action='store_true', help="enable stable diffus
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM") parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything") parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.") parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "half", "autocast"], default="autocast") parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.") parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site") parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None) parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
@ -122,7 +121,4 @@ parser.add_argument('--api-server-stop', action='store_true', help='enable serve
parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn') parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn')
parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False) parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False)
parser.add_argument("--disable-extra-extensions", action='store_true', help="prevent all extensions except built-in from running regardless of any other settings", default=False) parser.add_argument("--disable-extra-extensions", action='store_true', help="prevent all extensions except built-in from running regardless of any other settings", default=False)
parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui") parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui", )
parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system")
parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system')
parser.add_argument("--no-prompt-history", action='store_true', help="disable read prompt from last generation feature; settings this argument will not create '--data_path/params.txt' file")

View File

@ -50,7 +50,7 @@ class FaceRestorerCodeFormer(face_restoration_utils.CommonFaceRestoration):
def restore_face(cropped_face_t): def restore_face(cropped_face_t):
assert self.net is not None assert self.net is not None
return self.net(cropped_face_t, weight=w, adain=True)[0] return self.net(cropped_face_t, w=w, adain=True)[0]
return self.restore_with_helper(np_image, restore_face) return self.restore_with_helper(np_image, restore_face)

View File

@ -57,7 +57,7 @@ class DeepDanbooru:
a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255 a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255
with torch.no_grad(), devices.autocast(): with torch.no_grad(), devices.autocast():
x = torch.from_numpy(a).to(devices.device, devices.dtype) x = torch.from_numpy(a).to(devices.device)
y = self.model(x)[0].detach().cpu().numpy() y = self.model(x)[0].detach().cpu().numpy()
probability_dict = {} probability_dict = {}

View File

@ -114,9 +114,6 @@ errors.run(enable_tf32, "Enabling TF32")
cpu: torch.device = torch.device("cpu") cpu: torch.device = torch.device("cpu")
fp8: bool = False fp8: bool = False
# Force fp16 for all models in inference. No casting during inference.
# This flag is controlled by "--precision half" command line arg.
force_fp16: bool = False
device: torch.device = None device: torch.device = None
device_interrogate: torch.device = None device_interrogate: torch.device = None
device_gfpgan: torch.device = None device_gfpgan: torch.device = None
@ -130,8 +127,6 @@ unet_needs_upcast = False
def cond_cast_unet(input): def cond_cast_unet(input):
if force_fp16:
return input.to(torch.float16)
return input.to(dtype_unet) if unet_needs_upcast else input return input.to(dtype_unet) if unet_needs_upcast else input
@ -211,11 +206,6 @@ def autocast(disable=False):
if disable: if disable:
return contextlib.nullcontext() return contextlib.nullcontext()
if force_fp16:
# No casting during inference if force_fp16 is enabled.
# All tensor dtype conversion happens before inference.
return contextlib.nullcontext()
if fp8 and device==cpu: if fp8 and device==cpu:
return torch.autocast("cpu", dtype=torch.bfloat16, enabled=True) return torch.autocast("cpu", dtype=torch.bfloat16, enabled=True)
@ -243,22 +233,22 @@ def test_for_nans(x, where):
if shared.cmd_opts.disable_nan_check: if shared.cmd_opts.disable_nan_check:
return return
if not torch.isnan(x[(0, ) * len(x.shape)]): if not torch.all(torch.isnan(x)).item():
return return
if where == "unet": if where == "unet":
message = "A tensor with NaNs was produced in Unet." message = "A tensor with all NaNs was produced in Unet."
if not shared.cmd_opts.no_half: if not shared.cmd_opts.no_half:
message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this." message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this."
elif where == "vae": elif where == "vae":
message = "A tensor with NaNs was produced in VAE." message = "A tensor with all NaNs was produced in VAE."
if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae: if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae:
message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this." message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this."
else: else:
message = "A tensor with NaNs was produced." message = "A tensor with all NaNs was produced."
message += " Use --disable-nan-check commandline argument to disable this check." message += " Use --disable-nan-check commandline argument to disable this check."
@ -268,8 +258,8 @@ def test_for_nans(x, where):
@lru_cache @lru_cache
def first_time_calculation(): def first_time_calculation():
""" """
just do any calculation with pytorch layers - the first time this is done it allocates about 700MB of memory and just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
spends about 2.7 seconds doing that, at least with NVidia. spends about 2.7 seconds doing that, at least wih NVidia.
""" """
x = torch.zeros((1, 1)).to(device, dtype) x = torch.zeros((1, 1)).to(device, dtype)
@ -279,17 +269,3 @@ def first_time_calculation():
x = torch.zeros((1, 1, 3, 3)).to(device, dtype) x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype) conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
conv2d(x) conv2d(x)
def force_model_fp16():
"""
ldm and sgm has modules.diffusionmodules.util.GroupNorm32.forward, which
force conversion of input to float32. If force_fp16 is enabled, we need to
prevent this casting.
"""
assert force_fp16
import sgm.modules.diffusionmodules.util as sgm_util
import ldm.modules.diffusionmodules.util as ldm_util
sgm_util.GroupNorm32 = torch.nn.GroupNorm
ldm_util.GroupNorm32 = torch.nn.GroupNorm
print("ldm/sgm GroupNorm32 replaced with normal torch.nn.GroupNorm due to `--precision half`.")

View File

@ -1,7 +1,6 @@
from __future__ import annotations from __future__ import annotations
import configparser import configparser
import dataclasses
import os import os
import threading import threading
import re import re
@ -10,10 +9,6 @@ from modules import shared, errors, cache, scripts
from modules.gitpython_hack import Repo from modules.gitpython_hack import Repo
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
extensions: list[Extension] = []
extension_paths: dict[str, Extension] = {}
loaded_extensions: dict[str, Exception] = {}
os.makedirs(extensions_dir, exist_ok=True) os.makedirs(extensions_dir, exist_ok=True)
@ -27,13 +22,6 @@ def active():
return [x for x in extensions if x.enabled] return [x for x in extensions if x.enabled]
@dataclasses.dataclass
class CallbackOrderInfo:
name: str
before: list
after: list
class ExtensionMetadata: class ExtensionMetadata:
filename = "metadata.ini" filename = "metadata.ini"
config: configparser.ConfigParser config: configparser.ConfigParser
@ -54,7 +42,7 @@ class ExtensionMetadata:
self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name) self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name)
self.canonical_name = canonical_name.lower().strip() self.canonical_name = canonical_name.lower().strip()
self.requires = None self.requires = self.get_script_requirements("Requires", "Extension")
def get_script_requirements(self, field, section, extra_section=None): def get_script_requirements(self, field, section, extra_section=None):
"""reads a list of requirements from the config; field is the name of the field in the ini file, """reads a list of requirements from the config; field is the name of the field in the ini file,
@ -66,15 +54,7 @@ class ExtensionMetadata:
if extra_section: if extra_section:
x = x + ', ' + self.config.get(extra_section, field, fallback='') x = x + ', ' + self.config.get(extra_section, field, fallback='')
listed_requirements = self.parse_list(x.lower()) return self.parse_list(x.lower())
res = []
for requirement in listed_requirements:
loaded_requirements = (x for x in requirement.split("|") if x in loaded_extensions)
relevant_requirement = next(loaded_requirements, requirement)
res.append(relevant_requirement)
return res
def parse_list(self, text): def parse_list(self, text):
"""converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])""" """converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])"""
@ -85,22 +65,6 @@ class ExtensionMetadata:
# both "," and " " are accepted as separator # both "," and " " are accepted as separator
return [x for x in re.split(r"[,\s]+", text.strip()) if x] return [x for x in re.split(r"[,\s]+", text.strip()) if x]
def list_callback_order_instructions(self):
for section in self.config.sections():
if not section.startswith("callbacks/"):
continue
callback_name = section[10:]
if not callback_name.startswith(self.canonical_name):
errors.report(f"Callback order section for extension {self.canonical_name} is referencing the wrong extension: {section}")
continue
before = self.parse_list(self.config.get(section, 'Before', fallback=''))
after = self.parse_list(self.config.get(section, 'After', fallback=''))
yield CallbackOrderInfo(callback_name, before, after)
class Extension: class Extension:
lock = threading.Lock() lock = threading.Lock()
@ -191,17 +155,14 @@ class Extension:
def check_updates(self): def check_updates(self):
repo = Repo(self.path) repo = Repo(self.path)
branch_name = f'{repo.remote().name}/{self.branch}'
for fetch in repo.remote().fetch(dry_run=True): for fetch in repo.remote().fetch(dry_run=True):
if self.branch and fetch.name != branch_name:
continue
if fetch.flags != fetch.HEAD_UPTODATE: if fetch.flags != fetch.HEAD_UPTODATE:
self.can_update = True self.can_update = True
self.status = "new commits" self.status = "new commits"
return return
try: try:
origin = repo.rev_parse(branch_name) origin = repo.rev_parse('origin')
if repo.head.commit != origin: if repo.head.commit != origin:
self.can_update = True self.can_update = True
self.status = "behind HEAD" self.status = "behind HEAD"
@ -214,10 +175,8 @@ class Extension:
self.can_update = False self.can_update = False
self.status = "latest" self.status = "latest"
def fetch_and_reset_hard(self, commit=None): def fetch_and_reset_hard(self, commit='origin'):
repo = Repo(self.path) repo = Repo(self.path)
if commit is None:
commit = f'{repo.remote().name}/{self.branch}'
# Fix: `error: Your local changes to the following files would be overwritten by merge`, # Fix: `error: Your local changes to the following files would be overwritten by merge`,
# because WSL2 Docker set 755 file permissions instead of 644, this results to the error. # because WSL2 Docker set 755 file permissions instead of 644, this results to the error.
repo.git.fetch(all=True) repo.git.fetch(all=True)
@ -227,8 +186,6 @@ class Extension:
def list_extensions(): def list_extensions():
extensions.clear() extensions.clear()
extension_paths.clear()
loaded_extensions.clear()
if shared.cmd_opts.disable_all_extensions: if shared.cmd_opts.disable_all_extensions:
print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***") print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***")
@ -239,6 +196,7 @@ def list_extensions():
elif shared.opts.disable_all_extensions == "extra": elif shared.opts.disable_all_extensions == "extra":
print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***") print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***")
loaded_extensions = {}
# scan through extensions directory and load metadata # scan through extensions directory and load metadata
for dirname in [extensions_builtin_dir, extensions_dir]: for dirname in [extensions_builtin_dir, extensions_dir]:
@ -262,12 +220,8 @@ def list_extensions():
is_builtin = dirname == extensions_builtin_dir is_builtin = dirname == extensions_builtin_dir
extension = Extension(name=extension_dirname, path=path, enabled=extension_dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin, metadata=metadata) extension = Extension(name=extension_dirname, path=path, enabled=extension_dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin, metadata=metadata)
extensions.append(extension) extensions.append(extension)
extension_paths[extension.path] = extension
loaded_extensions[canonical_name] = extension loaded_extensions[canonical_name] = extension
for extension in extensions:
extension.metadata.requires = extension.metadata.get_script_requirements("Requires", "Extension")
# check for requirements # check for requirements
for extension in extensions: for extension in extensions:
if not extension.enabled: if not extension.enabled:
@ -284,16 +238,4 @@ def list_extensions():
continue continue
def find_extension(filename): extensions: list[Extension] = []
parentdir = os.path.dirname(os.path.realpath(filename))
while parentdir != filename:
extension = extension_paths.get(parentdir)
if extension is not None:
return extension
filename = parentdir
parentdir = os.path.dirname(filename)
return None

View File

@ -60,7 +60,7 @@ class ExtraNetwork:
Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments
separated by colon. separated by colon.
Even if the user does not mention this ExtraNetwork in his prompt, the call will still be made, with empty params_list - Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list -
in this case, all effects of this extra networks should be disabled. in this case, all effects of this extra networks should be disabled.
Can be called multiple times before deactivate() - each new call should override the previous call completely. Can be called multiple times before deactivate() - each new call should override the previous call completely.

View File

@ -36,11 +36,13 @@ class FaceRestorerGFPGAN(face_restoration_utils.CommonFaceRestoration):
ext_filter=['.pth'], ext_filter=['.pth'],
): ):
if 'GFPGAN' in os.path.basename(model_path): if 'GFPGAN' in os.path.basename(model_path):
return modelloader.load_spandrel_model( model = modelloader.load_spandrel_model(
model_path, model_path,
device=self.get_device(), device=self.get_device(),
expected_architecture='GFPGAN', expected_architecture='GFPGAN',
).model ).model
model.different_w = True # see https://github.com/chaiNNer-org/spandrel/pull/81
return model
raise ValueError("No GFPGAN model found") raise ValueError("No GFPGAN model found")
def restore(self, np_image): def restore(self, np_image):

View File

@ -11,7 +11,7 @@ import tqdm
from einops import rearrange, repeat from einops import rearrange, repeat
from ldm.util import default from ldm.util import default
from modules import devices, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors from modules import devices, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
from modules.textual_inversion import textual_inversion, saving_settings from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
@ -95,7 +95,6 @@ class HypernetworkModule(torch.nn.Module):
zeros_(b) zeros_(b)
else: else:
raise KeyError(f"Key {weight_init} is not defined as initialization!") raise KeyError(f"Key {weight_init} is not defined as initialization!")
devices.torch_npu_set_device()
self.to(devices.device) self.to(devices.device)
def fix_old_state_dict(self, state_dict): def fix_old_state_dict(self, state_dict):
@ -533,7 +532,7 @@ def train_hypernetwork(id_task, hypernetwork_name: str, learn_rate: float, batch
model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds),
**{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]} **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
) )
saving_settings.save_settings_to_file(log_directory, {**saved_params, **locals()}) logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
latent_sampling_method = ds.latent_sampling_method latent_sampling_method = ds.latent_sampling_method

View File

@ -1,7 +1,7 @@
from __future__ import annotations from __future__ import annotations
import datetime import datetime
import functools
import pytz import pytz
import io import io
import math import math
@ -12,9 +12,7 @@ import re
import numpy as np import numpy as np
import piexif import piexif
import piexif.helper import piexif.helper
from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin, ImageOps from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin
# pillow_avif needs to be imported somewhere in code for it to work
import pillow_avif # noqa: F401
import string import string
import json import json
import hashlib import hashlib
@ -54,14 +52,11 @@ def image_grid(imgs, batch_size=1, rows=None):
params = script_callbacks.ImageGridLoopParams(imgs, cols, rows) params = script_callbacks.ImageGridLoopParams(imgs, cols, rows)
script_callbacks.image_grid_callback(params) script_callbacks.image_grid_callback(params)
w, h = map(max, zip(*(img.size for img in imgs))) w, h = imgs[0].size
grid_background_color = ImageColor.getcolor(opts.grid_background_color, 'RGB') grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black')
grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color=grid_background_color)
for i, img in enumerate(params.imgs): for i, img in enumerate(params.imgs):
img_w, img_h = img.size grid.paste(img, box=(i % params.cols * w, i // params.cols * h))
w_offset, h_offset = 0 if img_w == w else (w - img_w) // 2, 0 if img_h == h else (h - img_h) // 2
grid.paste(img, box=(i % params.cols * w + w_offset, i // params.cols * h + h_offset))
return grid return grid
@ -326,16 +321,13 @@ def resize_image(resize_mode, im, width, height, upscaler_name=None):
return res return res
if not shared.cmd_opts.unix_filenames_sanitization: invalid_filename_chars = '#<>:"/\\|?*\n\r\t'
invalid_filename_chars = '#<>:"/\\|?*\n\r\t'
else:
invalid_filename_chars = '/'
invalid_filename_prefix = ' ' invalid_filename_prefix = ' '
invalid_filename_postfix = ' .' invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)") re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
re_pattern_arg = re.compile(r"(.*)<([^>]*)>$") re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
max_filename_part_length = shared.cmd_opts.filenames_max_length max_filename_part_length = 128
NOTHING_AND_SKIP_PREVIOUS_TEXT = object() NOTHING_AND_SKIP_PREVIOUS_TEXT = object()
@ -352,35 +344,8 @@ def sanitize_filename_part(text, replace_spaces=True):
return text return text
@functools.cache
def get_scheduler_str(sampler_name, scheduler_name):
"""Returns {Scheduler} if the scheduler is applicable to the sampler"""
if scheduler_name == 'Automatic':
config = sd_samplers.find_sampler_config(sampler_name)
scheduler_name = config.options.get('scheduler', 'Automatic')
return scheduler_name.capitalize()
@functools.cache
def get_sampler_scheduler_str(sampler_name, scheduler_name):
"""Returns the '{Sampler} {Scheduler}' if the scheduler is applicable to the sampler"""
return f'{sampler_name} {get_scheduler_str(sampler_name, scheduler_name)}'
def get_sampler_scheduler(p, sampler):
"""Returns '{Sampler} {Scheduler}' / '{Scheduler}' / 'NOTHING_AND_SKIP_PREVIOUS_TEXT'"""
if hasattr(p, 'scheduler') and hasattr(p, 'sampler_name'):
if sampler:
sampler_scheduler = get_sampler_scheduler_str(p.sampler_name, p.scheduler)
else:
sampler_scheduler = get_scheduler_str(p.sampler_name, p.scheduler)
return sanitize_filename_part(sampler_scheduler, replace_spaces=False)
return NOTHING_AND_SKIP_PREVIOUS_TEXT
class FilenameGenerator: class FilenameGenerator:
replacements = { replacements = {
'basename': lambda self: self.basename or 'img',
'seed': lambda self: self.seed if self.seed is not None else '', 'seed': lambda self: self.seed if self.seed is not None else '',
'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0], 'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
'seed_last': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.all_seeds[-1], 'seed_last': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.all_seeds[-1],
@ -390,8 +355,6 @@ class FilenameGenerator:
'height': lambda self: self.image.height, 'height': lambda self: self.image.height,
'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False), 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False), 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
'sampler_scheduler': lambda self: self.p and get_sampler_scheduler(self.p, True),
'scheduler': lambda self: self.p and get_sampler_scheduler(self.p, False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash), 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False), 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False),
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'), 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
@ -417,13 +380,12 @@ class FilenameGenerator:
} }
default_time_format = '%Y%m%d%H%M%S' default_time_format = '%Y%m%d%H%M%S'
def __init__(self, p, seed, prompt, image, zip=False, basename=""): def __init__(self, p, seed, prompt, image, zip=False):
self.p = p self.p = p
self.seed = seed self.seed = seed
self.prompt = prompt self.prompt = prompt
self.image = image self.image = image
self.zip = zip self.zip = zip
self.basename = basename
def get_vae_filename(self): def get_vae_filename(self):
"""Get the name of the VAE file.""" """Get the name of the VAE file."""
@ -604,17 +566,6 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p
}) })
piexif.insert(exif_bytes, filename) piexif.insert(exif_bytes, filename)
elif extension.lower() == '.avif':
if opts.enable_pnginfo and geninfo is not None:
exif_bytes = piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or "", encoding="unicode")
},
})
else:
exif_bytes = None
image.save(filename,format=image_format, quality=opts.jpeg_quality, exif=exif_bytes)
elif extension.lower() == ".gif": elif extension.lower() == ".gif":
image.save(filename, format=image_format, comment=geninfo) image.save(filename, format=image_format, comment=geninfo)
else: else:
@ -654,12 +605,12 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
txt_fullfn (`str` or None): txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None. If a text file is saved for this image, this will be its full path. Otherwise None.
""" """
namegen = FilenameGenerator(p, seed, prompt, image, basename=basename) namegen = FilenameGenerator(p, seed, prompt, image)
# WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit # WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp": if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
print('Image dimensions too large; saving as PNG') print('Image dimensions too large; saving as PNG')
extension = "png" extension = ".png"
if save_to_dirs is None: if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
@ -793,12 +744,10 @@ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]:
exif_comment = exif_comment.decode('utf8', errors="ignore") exif_comment = exif_comment.decode('utf8', errors="ignore")
if exif_comment: if exif_comment:
items['exif comment'] = exif_comment
geninfo = exif_comment geninfo = exif_comment
elif "comment" in items: # for gif elif "comment" in items: # for gif
if isinstance(items["comment"], bytes): geninfo = items["comment"].decode('utf8', errors="ignore")
geninfo = items["comment"].decode('utf8', errors="ignore")
else:
geninfo = items["comment"]
for field in IGNORED_INFO_KEYS: for field in IGNORED_INFO_KEYS:
items.pop(field, None) items.pop(field, None)
@ -821,7 +770,7 @@ def image_data(data):
import gradio as gr import gradio as gr
try: try:
image = read(io.BytesIO(data)) image = Image.open(io.BytesIO(data))
textinfo, _ = read_info_from_image(image) textinfo, _ = read_info_from_image(image)
return textinfo, None return textinfo, None
except Exception: except Exception:
@ -848,30 +797,3 @@ def flatten(img, bgcolor):
return img.convert('RGB') return img.convert('RGB')
def read(fp, **kwargs):
image = Image.open(fp, **kwargs)
image = fix_image(image)
return image
def fix_image(image: Image.Image):
if image is None:
return None
try:
image = ImageOps.exif_transpose(image)
image = fix_png_transparency(image)
except Exception:
pass
return image
def fix_png_transparency(image: Image.Image):
if image.mode not in ("RGB", "P") or not isinstance(image.info.get("transparency"), bytes):
return image
image = image.convert("RGBA")
return image

View File

@ -6,7 +6,7 @@ import numpy as np
from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError
import gradio as gr import gradio as gr
from modules import images from modules import images as imgutil
from modules.infotext_utils import create_override_settings_dict, parse_generation_parameters from modules.infotext_utils import create_override_settings_dict, parse_generation_parameters
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state from modules.shared import opts, state
@ -17,14 +17,11 @@ from modules.ui import plaintext_to_html
import modules.scripts import modules.scripts
def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None): def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
output_dir = output_dir.strip() output_dir = output_dir.strip()
processing.fix_seed(p) processing.fix_seed(p)
if isinstance(input, str): images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
batch_images = list(shared.walk_files(input, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
else:
batch_images = [os.path.abspath(x.name) for x in input]
is_inpaint_batch = False is_inpaint_batch = False
if inpaint_mask_dir: if inpaint_mask_dir:
@ -34,9 +31,9 @@ def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False,
if is_inpaint_batch: if is_inpaint_batch:
print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.") print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
print(f"Will process {len(batch_images)} images, creating {p.n_iter * p.batch_size} new images for each.") print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
state.job_count = len(batch_images) * p.n_iter state.job_count = len(images) * p.n_iter
# extract "default" params to use in case getting png info fails # extract "default" params to use in case getting png info fails
prompt = p.prompt prompt = p.prompt
@ -49,8 +46,8 @@ def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False,
sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None)) sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None))
batch_results = None batch_results = None
discard_further_results = False discard_further_results = False
for i, image in enumerate(batch_images): for i, image in enumerate(images):
state.job = f"{i+1} out of {len(batch_images)}" state.job = f"{i+1} out of {len(images)}"
if state.skipped: if state.skipped:
state.skipped = False state.skipped = False
@ -58,7 +55,7 @@ def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False,
break break
try: try:
img = images.read(image) img = Image.open(image)
except UnidentifiedImageError as e: except UnidentifiedImageError as e:
print(e) print(e)
continue continue
@ -89,7 +86,7 @@ def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False,
# otherwise user has many masks with the same name but different extensions # otherwise user has many masks with the same name but different extensions
mask_image_path = masks_found[0] mask_image_path = masks_found[0]
mask_image = images.read(mask_image_path) mask_image = Image.open(mask_image_path)
p.image_mask = mask_image p.image_mask = mask_image
if use_png_info: if use_png_info:
@ -97,8 +94,8 @@ def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False,
info_img = img info_img = img
if png_info_dir: if png_info_dir:
info_img_path = os.path.join(png_info_dir, os.path.basename(image)) info_img_path = os.path.join(png_info_dir, os.path.basename(image))
info_img = images.read(info_img_path) info_img = Image.open(info_img_path)
geninfo, _ = images.read_info_from_image(info_img) geninfo, _ = imgutil.read_info_from_image(info_img)
parsed_parameters = parse_generation_parameters(geninfo) parsed_parameters = parse_generation_parameters(geninfo)
parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})} parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}
except Exception: except Exception:
@ -149,7 +146,7 @@ def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False,
return batch_results return batch_results
def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, img2img_batch_source_type: str, img2img_batch_upload: list, *args): def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
override_settings = create_override_settings_dict(override_settings_texts) override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5 is_batch = mode == 5
@ -178,8 +175,9 @@ def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_
image = None image = None
mask = None mask = None
image = images.fix_image(image) # Use the EXIF orientation of photos taken by smartphones.
mask = images.fix_image(mask) if image is not None:
image = ImageOps.exif_transpose(image)
if selected_scale_tab == 1 and not is_batch: if selected_scale_tab == 1 and not is_batch:
assert image, "Can't scale by because no image is selected" assert image, "Can't scale by because no image is selected"
@ -196,8 +194,10 @@ def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_
prompt=prompt, prompt=prompt,
negative_prompt=negative_prompt, negative_prompt=negative_prompt,
styles=prompt_styles, styles=prompt_styles,
sampler_name=sampler_name,
batch_size=batch_size, batch_size=batch_size,
n_iter=n_iter, n_iter=n_iter,
steps=steps,
cfg_scale=cfg_scale, cfg_scale=cfg_scale,
width=width, width=width,
height=height, height=height,
@ -224,15 +224,8 @@ def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_
with closing(p): with closing(p):
if is_batch: if is_batch:
if img2img_batch_source_type == "upload": assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
assert isinstance(img2img_batch_upload, list) and img2img_batch_upload processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
output_dir = ""
inpaint_mask_dir = ""
png_info_dir = img2img_batch_png_info_dir if not shared.cmd_opts.hide_ui_dir_config else ""
processed = process_batch(p, img2img_batch_upload, output_dir, inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=png_info_dir)
else: # "from dir"
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
if processed is None: if processed is None:
processed = Processed(p, [], p.seed, "") processed = Processed(p, [], p.seed, "")

View File

@ -8,7 +8,7 @@ import sys
import gradio as gr import gradio as gr
from modules.paths import data_path from modules.paths import data_path
from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images, prompt_parser, errors from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions
from PIL import Image from PIL import Image
sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name
@ -83,7 +83,7 @@ def image_from_url_text(filedata):
assert is_in_right_dir, 'trying to open image file outside of allowed directories' assert is_in_right_dir, 'trying to open image file outside of allowed directories'
filename = filename.rsplit('?', 1)[0] filename = filename.rsplit('?', 1)[0]
return images.read(filename) return Image.open(filename)
if type(filedata) == list: if type(filedata) == list:
if len(filedata) == 0: if len(filedata) == 0:
@ -95,7 +95,7 @@ def image_from_url_text(filedata):
filedata = filedata[len("data:image/png;base64,"):] filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8')) filedata = base64.decodebytes(filedata.encode('utf-8'))
image = images.read(io.BytesIO(filedata)) image = Image.open(io.BytesIO(filedata))
return image return image
@ -146,19 +146,18 @@ def connect_paste_params_buttons():
destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
if binding.source_image_component and destination_image_component: if binding.source_image_component and destination_image_component:
need_send_dementions = destination_width_component and binding.tabname != 'inpaint'
if isinstance(binding.source_image_component, gr.Gallery): if isinstance(binding.source_image_component, gr.Gallery):
func = send_image_and_dimensions if need_send_dementions else image_from_url_text func = send_image_and_dimensions if destination_width_component else image_from_url_text
jsfunc = "extract_image_from_gallery" jsfunc = "extract_image_from_gallery"
else: else:
func = send_image_and_dimensions if need_send_dementions else lambda x: x func = send_image_and_dimensions if destination_width_component else lambda x: x
jsfunc = None jsfunc = None
binding.paste_button.click( binding.paste_button.click(
fn=func, fn=func,
_js=jsfunc, _js=jsfunc,
inputs=[binding.source_image_component], inputs=[binding.source_image_component],
outputs=[destination_image_component, destination_width_component, destination_height_component] if need_send_dementions else [destination_image_component], outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
show_progress=False, show_progress=False,
) )
@ -266,6 +265,17 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
else: else:
prompt += ("" if prompt == "" else "\n") + line prompt += ("" if prompt == "" else "\n") + line
if shared.opts.infotext_styles != "Ignore":
found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
if shared.opts.infotext_styles == "Apply":
res["Styles array"] = found_styles
elif shared.opts.infotext_styles == "Apply if any" and found_styles:
res["Styles array"] = found_styles
res["Prompt"] = prompt
res["Negative prompt"] = negative_prompt
for k, v in re_param.findall(lastline): for k, v in re_param.findall(lastline):
try: try:
if v[0] == '"' and v[-1] == '"': if v[0] == '"' and v[-1] == '"':
@ -280,26 +290,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
except Exception: except Exception:
print(f"Error parsing \"{k}: {v}\"") print(f"Error parsing \"{k}: {v}\"")
# Extract styles from prompt
if shared.opts.infotext_styles != "Ignore":
found_styles, prompt_no_styles, negative_prompt_no_styles = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
same_hr_styles = True
if ("Hires prompt" in res or "Hires negative prompt" in res) and (infotext_ver > infotext_versions.v180_hr_styles if (infotext_ver := infotext_versions.parse_version(res.get("Version"))) else True):
hr_prompt, hr_negative_prompt = res.get("Hires prompt", prompt), res.get("Hires negative prompt", negative_prompt)
hr_found_styles, hr_prompt_no_styles, hr_negative_prompt_no_styles = shared.prompt_styles.extract_styles_from_prompt(hr_prompt, hr_negative_prompt)
if same_hr_styles := found_styles == hr_found_styles:
res["Hires prompt"] = '' if hr_prompt_no_styles == prompt_no_styles else hr_prompt_no_styles
res['Hires negative prompt'] = '' if hr_negative_prompt_no_styles == negative_prompt_no_styles else hr_negative_prompt_no_styles
if same_hr_styles:
prompt, negative_prompt = prompt_no_styles, negative_prompt_no_styles
if (shared.opts.infotext_styles == "Apply if any" and found_styles) or shared.opts.infotext_styles == "Apply":
res['Styles array'] = found_styles
res["Prompt"] = prompt
res["Negative prompt"] = negative_prompt
# Missing CLIP skip means it was set to 1 (the default) # Missing CLIP skip means it was set to 1 (the default)
if "Clip skip" not in res: if "Clip skip" not in res:
res["Clip skip"] = "1" res["Clip skip"] = "1"
@ -315,9 +305,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "Hires sampler" not in res: if "Hires sampler" not in res:
res["Hires sampler"] = "Use same sampler" res["Hires sampler"] = "Use same sampler"
if "Hires schedule type" not in res:
res["Hires schedule type"] = "Use same scheduler"
if "Hires checkpoint" not in res: if "Hires checkpoint" not in res:
res["Hires checkpoint"] = "Use same checkpoint" res["Hires checkpoint"] = "Use same checkpoint"
@ -369,15 +356,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "Cache FP16 weight for LoRA" not in res and res["FP8 weight"] != "Disable": if "Cache FP16 weight for LoRA" not in res and res["FP8 weight"] != "Disable":
res["Cache FP16 weight for LoRA"] = False res["Cache FP16 weight for LoRA"] = False
prompt_attention = prompt_parser.parse_prompt_attention(prompt) if "Emphasis" not in res:
prompt_attention += prompt_parser.parse_prompt_attention(negative_prompt)
prompt_uses_emphasis = len(prompt_attention) != len([p for p in prompt_attention if p[1] == 1.0 or p[0] == 'BREAK'])
if "Emphasis" not in res and prompt_uses_emphasis:
res["Emphasis"] = "Original" res["Emphasis"] = "Original"
if "Refiner switch by sampling steps" not in res:
res["Refiner switch by sampling steps"] = False
infotext_versions.backcompat(res) infotext_versions.backcompat(res)
for key in skip_fields: for key in skip_fields:
@ -475,7 +456,7 @@ def get_override_settings(params, *, skip_fields=None):
def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname): def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname):
def paste_func(prompt): def paste_func(prompt):
if not prompt and not shared.cmd_opts.hide_ui_dir_config and not shared.cmd_opts.no_prompt_history: if not prompt and not shared.cmd_opts.hide_ui_dir_config:
filename = os.path.join(data_path, "params.txt") filename = os.path.join(data_path, "params.txt")
try: try:
with open(filename, "r", encoding="utf8") as file: with open(filename, "r", encoding="utf8") as file:
@ -489,11 +470,7 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
for output, key in paste_fields: for output, key in paste_fields:
if callable(key): if callable(key):
try: v = key(params)
v = key(params)
except Exception:
errors.report(f"Error executing {key}", exc_info=True)
v = None
else: else:
v = params.get(key, None) v = params.get(key, None)

View File

@ -5,8 +5,6 @@ import re
v160 = version.parse("1.6.0") v160 = version.parse("1.6.0")
v170_tsnr = version.parse("v1.7.0-225") v170_tsnr = version.parse("v1.7.0-225")
v180 = version.parse("1.8.0")
v180_hr_styles = version.parse("1.8.0-139")
def parse_version(text): def parse_version(text):
@ -42,5 +40,3 @@ def backcompat(d):
if ver < v170_tsnr: if ver < v170_tsnr:
d["Downcast alphas_cumprod"] = True d["Downcast alphas_cumprod"] = True
if ver < v180 and d.get('Refiner'):
d["Refiner switch by sampling steps"] = True

View File

@ -51,7 +51,6 @@ def check_versions():
def initialize(): def initialize():
from modules import initialize_util from modules import initialize_util
initialize_util.fix_torch_version() initialize_util.fix_torch_version()
initialize_util.fix_pytorch_lightning()
initialize_util.fix_asyncio_event_loop_policy() initialize_util.fix_asyncio_event_loop_policy()
initialize_util.validate_tls_options() initialize_util.validate_tls_options()
initialize_util.configure_sigint_handler() initialize_util.configure_sigint_handler()
@ -110,7 +109,7 @@ def initialize_rest(*, reload_script_modules=False):
with startup_timer.subcategory("load scripts"): with startup_timer.subcategory("load scripts"):
scripts.load_scripts() scripts.load_scripts()
if reload_script_modules and shared.opts.enable_reloading_ui_scripts: if reload_script_modules:
for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
importlib.reload(module) importlib.reload(module)
startup_timer.record("reload script modules") startup_timer.record("reload script modules")
@ -140,7 +139,7 @@ def initialize_rest(*, reload_script_modules=False):
""" """
Accesses shared.sd_model property to load model. Accesses shared.sd_model property to load model.
After it's available, if it has been loaded before this access by some extension, After it's available, if it has been loaded before this access by some extension,
its optimization may be None because the list of optimizers has not been filled its optimization may be None because the list of optimizaers has neet been filled
by that time, so we apply optimization again. by that time, so we apply optimization again.
""" """
from modules import devices from modules import devices

View File

@ -24,13 +24,6 @@ def fix_torch_version():
torch.__long_version__ = torch.__version__ torch.__long_version__ = torch.__version__
torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
def fix_pytorch_lightning():
# Checks if pytorch_lightning.utilities.distributed already exists in the sys.modules cache
if 'pytorch_lightning.utilities.distributed' not in sys.modules:
import pytorch_lightning
# Lets the user know that the library was not found and then will set it to pytorch_lightning.utilities.rank_zero
print("Pytorch_lightning.distributed not found, attempting pytorch_lightning.rank_zero")
sys.modules["pytorch_lightning.utilities.distributed"] = pytorch_lightning.utilities.rank_zero
def fix_asyncio_event_loop_policy(): def fix_asyncio_event_loop_policy():
""" """

View File

@ -9,7 +9,6 @@ import importlib.util
import importlib.metadata import importlib.metadata
import platform import platform
import json import json
import shlex
from functools import lru_cache from functools import lru_cache
from modules import cmd_args, errors from modules import cmd_args, errors
@ -56,7 +55,7 @@ and delete current Python and "venv" folder in WebUI's directory.
You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/ You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/
{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre" if is_windows else ""} {"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
Use --skip-python-version-check to suppress this warning. Use --skip-python-version-check to suppress this warning.
""") """)
@ -77,7 +76,7 @@ def git_tag():
except Exception: except Exception:
try: try:
changelog_md = os.path.join(script_path, "CHANGELOG.md") changelog_md = os.path.join(os.path.dirname(os.path.dirname(__file__)), "CHANGELOG.md")
with open(changelog_md, "r", encoding="utf-8") as file: with open(changelog_md, "r", encoding="utf-8") as file:
line = next((line.strip() for line in file if line.strip()), "<none>") line = next((line.strip() for line in file if line.strip()), "<none>")
line = line.replace("## ", "") line = line.replace("## ", "")
@ -232,7 +231,7 @@ def run_extension_installer(extension_dir):
try: try:
env = os.environ.copy() env = os.environ.copy()
env['PYTHONPATH'] = f"{script_path}{os.pathsep}{env.get('PYTHONPATH', '')}" env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}"
stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip() stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip()
if stdout: if stdout:
@ -446,6 +445,7 @@ def prepare_environment():
exit(0) exit(0)
def configure_for_tests(): def configure_for_tests():
if "--api" not in sys.argv: if "--api" not in sys.argv:
sys.argv.append("--api") sys.argv.append("--api")
@ -461,7 +461,7 @@ def configure_for_tests():
def start(): def start():
print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {shlex.join(sys.argv[1:])}") print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
import webui import webui
if '--nowebui' in sys.argv: if '--nowebui' in sys.argv:
webui.api_only() webui.api_only()

View File

@ -1,12 +1,9 @@
from collections import namedtuple
import torch import torch
from modules import devices, shared from modules import devices, shared
module_in_gpu = None module_in_gpu = None
cpu = torch.device("cpu") cpu = torch.device("cpu")
ModuleWithParent = namedtuple('ModuleWithParent', ['module', 'parent'], defaults=['None'])
def send_everything_to_cpu(): def send_everything_to_cpu():
global module_in_gpu global module_in_gpu
@ -78,14 +75,13 @@ def setup_for_low_vram(sd_model, use_medvram):
(sd_model, 'depth_model'), (sd_model, 'depth_model'),
(sd_model, 'embedder'), (sd_model, 'embedder'),
(sd_model, 'model'), (sd_model, 'model'),
(sd_model, 'embedder'),
] ]
is_sdxl = hasattr(sd_model, 'conditioner') is_sdxl = hasattr(sd_model, 'conditioner')
is_sd2 = not is_sdxl and hasattr(sd_model.cond_stage_model, 'model') is_sd2 = not is_sdxl and hasattr(sd_model.cond_stage_model, 'model')
if hasattr(sd_model, 'medvram_fields'): if is_sdxl:
to_remain_in_cpu = sd_model.medvram_fields()
elif is_sdxl:
to_remain_in_cpu.append((sd_model, 'conditioner')) to_remain_in_cpu.append((sd_model, 'conditioner'))
elif is_sd2: elif is_sd2:
to_remain_in_cpu.append((sd_model.cond_stage_model, 'model')) to_remain_in_cpu.append((sd_model.cond_stage_model, 'model'))
@ -107,21 +103,7 @@ def setup_for_low_vram(sd_model, use_medvram):
setattr(obj, field, module) setattr(obj, field, module)
# register hooks for those the first three models # register hooks for those the first three models
if hasattr(sd_model, "cond_stage_model") and hasattr(sd_model.cond_stage_model, "medvram_modules"): if is_sdxl:
for module in sd_model.cond_stage_model.medvram_modules():
if isinstance(module, ModuleWithParent):
parent = module.parent
module = module.module
else:
parent = None
if module:
module.register_forward_pre_hook(send_me_to_gpu)
if parent:
parents[module] = parent
elif is_sdxl:
sd_model.conditioner.register_forward_pre_hook(send_me_to_gpu) sd_model.conditioner.register_forward_pre_hook(send_me_to_gpu)
elif is_sd2: elif is_sd2:
sd_model.cond_stage_model.model.register_forward_pre_hook(send_me_to_gpu) sd_model.cond_stage_model.model.register_forward_pre_hook(send_me_to_gpu)
@ -135,9 +117,9 @@ def setup_for_low_vram(sd_model, use_medvram):
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu) sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.encode = first_stage_model_encode_wrap sd_model.first_stage_model.encode = first_stage_model_encode_wrap
sd_model.first_stage_model.decode = first_stage_model_decode_wrap sd_model.first_stage_model.decode = first_stage_model_decode_wrap
if getattr(sd_model, 'depth_model', None) is not None: if sd_model.depth_model:
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu) sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
if getattr(sd_model, 'embedder', None) is not None: if sd_model.embedder:
sd_model.embedder.register_forward_pre_hook(send_me_to_gpu) sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
if use_medvram: if use_medvram:

View File

@ -12,7 +12,7 @@ log = logging.getLogger(__name__)
# before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+, # before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+,
# use check `getattr` and try it for compatibility. # use check `getattr` and try it for compatibility.
# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availability, # in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty,
# since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279 # since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279
def check_for_mps() -> bool: def check_for_mps() -> bool:
if version.parse(torch.__version__) <= version.parse("2.0.1"): if version.parse(torch.__version__) <= version.parse("2.0.1"):

View File

@ -1,39 +1,17 @@
from PIL import Image, ImageFilter, ImageOps from PIL import Image, ImageFilter, ImageOps
def get_crop_region_v2(mask, pad=0):
"""
Finds a rectangular region that contains all masked ares in a mask.
Returns None if mask is completely black mask (all 0)
Parameters:
mask: PIL.Image.Image L mode or numpy 1d array
pad: int number of pixels that the region will be extended on all sides
Returns: (x1, y1, x2, y2) | None
Introduced post 1.9.0
"""
mask = mask if isinstance(mask, Image.Image) else Image.fromarray(mask)
if box := mask.getbbox():
x1, y1, x2, y2 = box
return (max(x1 - pad, 0), max(y1 - pad, 0), min(x2 + pad, mask.size[0]), min(y2 + pad, mask.size[1])) if pad else box
def get_crop_region(mask, pad=0): def get_crop_region(mask, pad=0):
""" """finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle.
Same function as get_crop_region_v2 but handles completely black mask (all 0) differently For example, if a user has painted the top-right part of a 512x512 image, the result may be (256, 0, 512, 256)"""
when mask all black still return coordinates but the coordinates may be invalid ie x2>x1 or y2>y1 mask_img = mask if isinstance(mask, Image.Image) else Image.fromarray(mask)
Notes: it is possible for the coordinates to be "valid" again if pad size is sufficiently large box = mask_img.getbbox()
(mask_size.x-pad, mask_size.y-pad, pad, pad) if box:
x1, y1, x2, y2 = box
Extension developer should use get_crop_region_v2 instead unless for compatibility considerations. else: # when no box is found
""" x1, y1 = mask_img.size
mask = mask if isinstance(mask, Image.Image) else Image.fromarray(mask) x2 = y2 = 0
if box := get_crop_region_v2(mask, pad): return max(x1 - pad, 0), max(y1 - pad, 0), min(x2 + pad, mask_img.size[0]), min(y2 + pad, mask_img.size[1])
return box
x1, y1 = mask.size
x2 = y2 = 0
return max(x1 - pad, 0), max(y1 - pad, 0), min(x2 + pad, mask.size[0]), min(y2 + pad, mask.size[1])
def expand_crop_region(crop_region, processing_width, processing_height, image_width, image_height): def expand_crop_region(crop_region, processing_width, processing_height, image_width, image_height):

View File

@ -23,7 +23,6 @@ def load_file_from_url(
model_dir: str, model_dir: str,
progress: bool = True, progress: bool = True,
file_name: str | None = None, file_name: str | None = None,
hash_prefix: str | None = None,
) -> str: ) -> str:
"""Download a file from `url` into `model_dir`, using the file present if possible. """Download a file from `url` into `model_dir`, using the file present if possible.
@ -37,11 +36,11 @@ def load_file_from_url(
if not os.path.exists(cached_file): if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n') print(f'Downloading: "{url}" to {cached_file}\n')
from torch.hub import download_url_to_file from torch.hub import download_url_to_file
download_url_to_file(url, cached_file, progress=progress, hash_prefix=hash_prefix) download_url_to_file(url, cached_file, progress=progress)
return cached_file return cached_file
def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None, hash_prefix=None) -> list: def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list:
""" """
A one-and done loader to try finding the desired models in specified directories. A one-and done loader to try finding the desired models in specified directories.
@ -50,7 +49,6 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
@param model_path: The location to store/find models in. @param model_path: The location to store/find models in.
@param command_path: A command-line argument to search for models in first. @param command_path: A command-line argument to search for models in first.
@param ext_filter: An optional list of filename extensions to filter by @param ext_filter: An optional list of filename extensions to filter by
@param hash_prefix: the expected sha256 of the model_url
@return: A list of paths containing the desired model(s) @return: A list of paths containing the desired model(s)
""" """
output = [] output = []
@ -80,7 +78,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
if model_url is not None and len(output) == 0: if model_url is not None and len(output) == 0:
if download_name is not None: if download_name is not None:
output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name, hash_prefix=hash_prefix)) output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name))
else: else:
output.append(model_url) output.append(model_url)
@ -112,7 +110,7 @@ def load_upscalers():
except Exception: except Exception:
pass pass
data = [] datas = []
commandline_options = vars(shared.cmd_opts) commandline_options = vars(shared.cmd_opts)
# some of upscaler classes will not go away after reloading their modules, and we'll end # some of upscaler classes will not go away after reloading their modules, and we'll end
@ -131,35 +129,14 @@ def load_upscalers():
scaler = cls(commandline_model_path) scaler = cls(commandline_model_path)
scaler.user_path = commandline_model_path scaler.user_path = commandline_model_path
scaler.model_download_path = commandline_model_path or scaler.model_path scaler.model_download_path = commandline_model_path or scaler.model_path
data += scaler.scalers datas += scaler.scalers
shared.sd_upscalers = sorted( shared.sd_upscalers = sorted(
data, datas,
# Special case for UpscalerNone keeps it at the beginning of the list. # Special case for UpscalerNone keeps it at the beginning of the list.
key=lambda x: x.name.lower() if not isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest)) else "" key=lambda x: x.name.lower() if not isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest)) else ""
) )
# None: not loaded, False: failed to load, True: loaded
_spandrel_extra_init_state = None
def _init_spandrel_extra_archs() -> None:
"""
Try to initialize `spandrel_extra_archs` (exactly once).
"""
global _spandrel_extra_init_state
if _spandrel_extra_init_state is not None:
return
try:
import spandrel
import spandrel_extra_arches
spandrel.MAIN_REGISTRY.add(*spandrel_extra_arches.EXTRA_REGISTRY)
_spandrel_extra_init_state = True
except Exception:
logger.warning("Failed to load spandrel_extra_arches", exc_info=True)
_spandrel_extra_init_state = False
def load_spandrel_model( def load_spandrel_model(
path: str | os.PathLike, path: str | os.PathLike,
@ -169,16 +146,11 @@ def load_spandrel_model(
dtype: str | torch.dtype | None = None, dtype: str | torch.dtype | None = None,
expected_architecture: str | None = None, expected_architecture: str | None = None,
) -> spandrel.ModelDescriptor: ) -> spandrel.ModelDescriptor:
global _spandrel_extra_init_state
import spandrel import spandrel
_init_spandrel_extra_archs()
model_descriptor = spandrel.ModelLoader(device=device).load_from_file(str(path)) model_descriptor = spandrel.ModelLoader(device=device).load_from_file(str(path))
arch = model_descriptor.architecture if expected_architecture and model_descriptor.architecture != expected_architecture:
if expected_architecture and arch.name != expected_architecture:
logger.warning( logger.warning(
f"Model {path!r} is not a {expected_architecture!r} model (got {arch.name!r})", f"Model {path!r} is not a {expected_architecture!r} model (got {model_descriptor.architecture!r})",
) )
half = False half = False
if prefer_half: if prefer_half:
@ -192,6 +164,6 @@ def load_spandrel_model(
model_descriptor.model.eval() model_descriptor.model.eval()
logger.debug( logger.debug(
"Loaded %s from %s (device=%s, half=%s, dtype=%s)", "Loaded %s from %s (device=%s, half=%s, dtype=%s)",
arch, path, device, half, dtype, model_descriptor, path, device, half, dtype,
) )
return model_descriptor return model_descriptor

View File

@ -341,7 +341,7 @@ class DDPM(pl.LightningModule):
elif self.parameterization == "x0": elif self.parameterization == "x0":
target = x_start target = x_start
else: else:
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
@ -901,7 +901,7 @@ class LatentDiffusion(DDPM):
def apply_model(self, x_noisy, t, cond, return_ids=False): def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict): if isinstance(cond, dict):
# hybrid case, cond is expected to be a dict # hybrid case, cond is exptected to be a dict
pass pass
else: else:
if not isinstance(cond, list): if not isinstance(cond, list):
@ -937,7 +937,7 @@ class LatentDiffusion(DDPM):
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
elif self.cond_stage_key == 'coordinates_bbox': elif self.cond_stage_key == 'coordinates_bbox':
assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size' assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
# assuming padding of unfold is always 0 and its dilation is always 1 # assuming padding of unfold is always 0 and its dilation is always 1
n_patches_per_row = int((w - ks[0]) / stride[0] + 1) n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
@ -947,7 +947,7 @@ class LatentDiffusion(DDPM):
num_downs = self.first_stage_model.encoder.num_resolutions - 1 num_downs = self.first_stage_model.encoder.num_resolutions - 1
rescale_latent = 2 ** (num_downs) rescale_latent = 2 ** (num_downs)
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
# need to rescale the tl patch coordinates to be in between (0,1) # need to rescale the tl patch coordinates to be in between (0,1)
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)

View File

@ -323,7 +323,7 @@ def model_wrapper(
def model_fn(x, t_continuous, condition, unconditional_condition): def model_fn(x, t_continuous, condition, unconditional_condition):
""" """
The noise prediction model function that is used for DPM-Solver. The noise predicition model function that is used for DPM-Solver.
""" """
if t_continuous.reshape((-1,)).shape[0] == 1: if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0])) t_continuous = t_continuous.expand((x.shape[0]))

View File

@ -1,622 +0,0 @@
### This file contains impls for MM-DiT, the core model component of SD3
import math
from typing import Dict, Optional
import numpy as np
import torch
import torch.nn as nn
from einops import rearrange, repeat
from modules.models.sd3.other_impls import attention, Mlp
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding"""
def __init__(
self,
img_size: Optional[int] = 224,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
flatten: bool = True,
bias: bool = True,
strict_img_size: bool = True,
dynamic_img_pad: bool = False,
dtype=None,
device=None,
):
super().__init__()
self.patch_size = (patch_size, patch_size)
if img_size is not None:
self.img_size = (img_size, img_size)
self.grid_size = tuple([s // p for s, p in zip(self.img_size, self.patch_size)])
self.num_patches = self.grid_size[0] * self.grid_size[1]
else:
self.img_size = None
self.grid_size = None
self.num_patches = None
# flatten spatial dim and transpose to channels last, kept for bwd compat
self.flatten = flatten
self.strict_img_size = strict_img_size
self.dynamic_img_pad = dynamic_img_pad
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias, dtype=dtype, device=device)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
return x
def modulate(x, shift, scale):
if shift is None:
shift = torch.zeros_like(scale)
return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
#################################################################################
# Sine/Cosine Positional Embedding Functions #
#################################################################################
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0, scaling_factor=None, offset=None):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
if scaling_factor is not None:
grid = grid / scaling_factor
if offset is not None:
grid = grid - offset
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token and extra_tokens > 0:
pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float64)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
return np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
#################################################################################
# Embedding Layers for Timesteps and Class Labels #
#################################################################################
class TimestepEmbedder(nn.Module):
"""Embeds scalar timesteps into vector representations."""
def __init__(self, hidden_size, frequency_embedding_size=256, dtype=None, device=None):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(frequency_embedding_size, hidden_size, bias=True, dtype=dtype, device=device),
nn.SiLU(),
nn.Linear(hidden_size, hidden_size, bias=True, dtype=dtype, device=device),
)
self.frequency_embedding_size = frequency_embedding_size
@staticmethod
def timestep_embedding(t, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param t: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an (N, D) Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period)
* torch.arange(start=0, end=half, dtype=torch.float32)
/ half
).to(device=t.device)
args = t[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
if torch.is_floating_point(t):
embedding = embedding.to(dtype=t.dtype)
return embedding
def forward(self, t, dtype, **kwargs):
t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(dtype)
t_emb = self.mlp(t_freq)
return t_emb
class VectorEmbedder(nn.Module):
"""Embeds a flat vector of dimension input_dim"""
def __init__(self, input_dim: int, hidden_size: int, dtype=None, device=None):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(input_dim, hidden_size, bias=True, dtype=dtype, device=device),
nn.SiLU(),
nn.Linear(hidden_size, hidden_size, bias=True, dtype=dtype, device=device),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.mlp(x)
#################################################################################
# Core DiT Model #
#################################################################################
class QkvLinear(torch.nn.Linear):
pass
def split_qkv(qkv, head_dim):
qkv = qkv.reshape(qkv.shape[0], qkv.shape[1], 3, -1, head_dim).movedim(2, 0)
return qkv[0], qkv[1], qkv[2]
def optimized_attention(qkv, num_heads):
return attention(qkv[0], qkv[1], qkv[2], num_heads)
class SelfAttention(nn.Module):
ATTENTION_MODES = ("xformers", "torch", "torch-hb", "math", "debug")
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
qk_scale: Optional[float] = None,
attn_mode: str = "xformers",
pre_only: bool = False,
qk_norm: Optional[str] = None,
rmsnorm: bool = False,
dtype=None,
device=None,
):
super().__init__()
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.qkv = QkvLinear(dim, dim * 3, bias=qkv_bias, dtype=dtype, device=device)
if not pre_only:
self.proj = nn.Linear(dim, dim, dtype=dtype, device=device)
assert attn_mode in self.ATTENTION_MODES
self.attn_mode = attn_mode
self.pre_only = pre_only
if qk_norm == "rms":
self.ln_q = RMSNorm(self.head_dim, elementwise_affine=True, eps=1.0e-6, dtype=dtype, device=device)
self.ln_k = RMSNorm(self.head_dim, elementwise_affine=True, eps=1.0e-6, dtype=dtype, device=device)
elif qk_norm == "ln":
self.ln_q = nn.LayerNorm(self.head_dim, elementwise_affine=True, eps=1.0e-6, dtype=dtype, device=device)
self.ln_k = nn.LayerNorm(self.head_dim, elementwise_affine=True, eps=1.0e-6, dtype=dtype, device=device)
elif qk_norm is None:
self.ln_q = nn.Identity()
self.ln_k = nn.Identity()
else:
raise ValueError(qk_norm)
def pre_attention(self, x: torch.Tensor):
B, L, C = x.shape
qkv = self.qkv(x)
q, k, v = split_qkv(qkv, self.head_dim)
q = self.ln_q(q).reshape(q.shape[0], q.shape[1], -1)
k = self.ln_k(k).reshape(q.shape[0], q.shape[1], -1)
return (q, k, v)
def post_attention(self, x: torch.Tensor) -> torch.Tensor:
assert not self.pre_only
x = self.proj(x)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
(q, k, v) = self.pre_attention(x)
x = attention(q, k, v, self.num_heads)
x = self.post_attention(x)
return x
class RMSNorm(torch.nn.Module):
def __init__(
self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None
):
"""
Initialize the RMSNorm normalization layer.
Args:
dim (int): The dimension of the input tensor.
eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.
Attributes:
eps (float): A small value added to the denominator for numerical stability.
weight (nn.Parameter): Learnable scaling parameter.
"""
super().__init__()
self.eps = eps
self.learnable_scale = elementwise_affine
if self.learnable_scale:
self.weight = nn.Parameter(torch.empty(dim, device=device, dtype=dtype))
else:
self.register_parameter("weight", None)
def _norm(self, x):
"""
Apply the RMSNorm normalization to the input tensor.
Args:
x (torch.Tensor): The input tensor.
Returns:
torch.Tensor: The normalized tensor.
"""
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
"""
Forward pass through the RMSNorm layer.
Args:
x (torch.Tensor): The input tensor.
Returns:
torch.Tensor: The output tensor after applying RMSNorm.
"""
x = self._norm(x)
if self.learnable_scale:
return x * self.weight.to(device=x.device, dtype=x.dtype)
else:
return x
class SwiGLUFeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
multiple_of: int,
ffn_dim_multiplier: Optional[float] = None,
):
"""
Initialize the FeedForward module.
Args:
dim (int): Input dimension.
hidden_dim (int): Hidden dimension of the feedforward layer.
multiple_of (int): Value to ensure hidden dimension is a multiple of this value.
ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None.
Attributes:
w1 (ColumnParallelLinear): Linear transformation for the first layer.
w2 (RowParallelLinear): Linear transformation for the second layer.
w3 (ColumnParallelLinear): Linear transformation for the third layer.
"""
super().__init__()
hidden_dim = int(2 * hidden_dim / 3)
# custom dim factor multiplier
if ffn_dim_multiplier is not None:
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
self.w1 = nn.Linear(dim, hidden_dim, bias=False)
self.w2 = nn.Linear(hidden_dim, dim, bias=False)
self.w3 = nn.Linear(dim, hidden_dim, bias=False)
def forward(self, x):
return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x))
class DismantledBlock(nn.Module):
"""A DiT block with gated adaptive layer norm (adaLN) conditioning."""
ATTENTION_MODES = ("xformers", "torch", "torch-hb", "math", "debug")
def __init__(
self,
hidden_size: int,
num_heads: int,
mlp_ratio: float = 4.0,
attn_mode: str = "xformers",
qkv_bias: bool = False,
pre_only: bool = False,
rmsnorm: bool = False,
scale_mod_only: bool = False,
swiglu: bool = False,
qk_norm: Optional[str] = None,
dtype=None,
device=None,
**block_kwargs,
):
super().__init__()
assert attn_mode in self.ATTENTION_MODES
if not rmsnorm:
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device)
else:
self.norm1 = RMSNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias, attn_mode=attn_mode, pre_only=pre_only, qk_norm=qk_norm, rmsnorm=rmsnorm, dtype=dtype, device=device)
if not pre_only:
if not rmsnorm:
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device)
else:
self.norm2 = RMSNorm(hidden_size, elementwise_affine=False, eps=1e-6)
mlp_hidden_dim = int(hidden_size * mlp_ratio)
if not pre_only:
if not swiglu:
self.mlp = Mlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, act_layer=nn.GELU(approximate="tanh"), dtype=dtype, device=device)
else:
self.mlp = SwiGLUFeedForward(dim=hidden_size, hidden_dim=mlp_hidden_dim, multiple_of=256)
self.scale_mod_only = scale_mod_only
if not scale_mod_only:
n_mods = 6 if not pre_only else 2
else:
n_mods = 4 if not pre_only else 1
self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, n_mods * hidden_size, bias=True, dtype=dtype, device=device))
self.pre_only = pre_only
def pre_attention(self, x: torch.Tensor, c: torch.Tensor):
assert x is not None, "pre_attention called with None input"
if not self.pre_only:
if not self.scale_mod_only:
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(6, dim=1)
else:
shift_msa = None
shift_mlp = None
scale_msa, gate_msa, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(4, dim=1)
qkv = self.attn.pre_attention(modulate(self.norm1(x), shift_msa, scale_msa))
return qkv, (x, gate_msa, shift_mlp, scale_mlp, gate_mlp)
else:
if not self.scale_mod_only:
shift_msa, scale_msa = self.adaLN_modulation(c).chunk(2, dim=1)
else:
shift_msa = None
scale_msa = self.adaLN_modulation(c)
qkv = self.attn.pre_attention(modulate(self.norm1(x), shift_msa, scale_msa))
return qkv, None
def post_attention(self, attn, x, gate_msa, shift_mlp, scale_mlp, gate_mlp):
assert not self.pre_only
x = x + gate_msa.unsqueeze(1) * self.attn.post_attention(attn)
x = x + gate_mlp.unsqueeze(1) * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
return x
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
assert not self.pre_only
(q, k, v), intermediates = self.pre_attention(x, c)
attn = attention(q, k, v, self.attn.num_heads)
return self.post_attention(attn, *intermediates)
def block_mixing(context, x, context_block, x_block, c):
assert context is not None, "block_mixing called with None context"
context_qkv, context_intermediates = context_block.pre_attention(context, c)
x_qkv, x_intermediates = x_block.pre_attention(x, c)
o = []
for t in range(3):
o.append(torch.cat((context_qkv[t], x_qkv[t]), dim=1))
q, k, v = tuple(o)
attn = attention(q, k, v, x_block.attn.num_heads)
context_attn, x_attn = (attn[:, : context_qkv[0].shape[1]], attn[:, context_qkv[0].shape[1] :])
if not context_block.pre_only:
context = context_block.post_attention(context_attn, *context_intermediates)
else:
context = None
x = x_block.post_attention(x_attn, *x_intermediates)
return context, x
class JointBlock(nn.Module):
"""just a small wrapper to serve as a fsdp unit"""
def __init__(self, *args, **kwargs):
super().__init__()
pre_only = kwargs.pop("pre_only")
qk_norm = kwargs.pop("qk_norm", None)
self.context_block = DismantledBlock(*args, pre_only=pre_only, qk_norm=qk_norm, **kwargs)
self.x_block = DismantledBlock(*args, pre_only=False, qk_norm=qk_norm, **kwargs)
def forward(self, *args, **kwargs):
return block_mixing(*args, context_block=self.context_block, x_block=self.x_block, **kwargs)
class FinalLayer(nn.Module):
"""
The final layer of DiT.
"""
def __init__(self, hidden_size: int, patch_size: int, out_channels: int, total_out_channels: Optional[int] = None, dtype=None, device=None):
super().__init__()
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, dtype=dtype, device=device)
self.linear = (
nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True, dtype=dtype, device=device)
if (total_out_channels is None)
else nn.Linear(hidden_size, total_out_channels, bias=True, dtype=dtype, device=device)
)
self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True, dtype=dtype, device=device))
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
x = modulate(self.norm_final(x), shift, scale)
x = self.linear(x)
return x
class MMDiT(nn.Module):
"""Diffusion model with a Transformer backbone."""
def __init__(
self,
input_size: int = 32,
patch_size: int = 2,
in_channels: int = 4,
depth: int = 28,
mlp_ratio: float = 4.0,
learn_sigma: bool = False,
adm_in_channels: Optional[int] = None,
context_embedder_config: Optional[Dict] = None,
register_length: int = 0,
attn_mode: str = "torch",
rmsnorm: bool = False,
scale_mod_only: bool = False,
swiglu: bool = False,
out_channels: Optional[int] = None,
pos_embed_scaling_factor: Optional[float] = None,
pos_embed_offset: Optional[float] = None,
pos_embed_max_size: Optional[int] = None,
num_patches = None,
qk_norm: Optional[str] = None,
qkv_bias: bool = True,
dtype = None,
device = None,
):
super().__init__()
self.dtype = dtype
self.learn_sigma = learn_sigma
self.in_channels = in_channels
default_out_channels = in_channels * 2 if learn_sigma else in_channels
self.out_channels = out_channels if out_channels is not None else default_out_channels
self.patch_size = patch_size
self.pos_embed_scaling_factor = pos_embed_scaling_factor
self.pos_embed_offset = pos_embed_offset
self.pos_embed_max_size = pos_embed_max_size
# apply magic --> this defines a head_size of 64
hidden_size = 64 * depth
num_heads = depth
self.num_heads = num_heads
self.x_embedder = PatchEmbed(input_size, patch_size, in_channels, hidden_size, bias=True, strict_img_size=self.pos_embed_max_size is None, dtype=dtype, device=device)
self.t_embedder = TimestepEmbedder(hidden_size, dtype=dtype, device=device)
if adm_in_channels is not None:
assert isinstance(adm_in_channels, int)
self.y_embedder = VectorEmbedder(adm_in_channels, hidden_size, dtype=dtype, device=device)
self.context_embedder = nn.Identity()
if context_embedder_config is not None:
if context_embedder_config["target"] == "torch.nn.Linear":
self.context_embedder = nn.Linear(**context_embedder_config["params"], dtype=dtype, device=device)
self.register_length = register_length
if self.register_length > 0:
self.register = nn.Parameter(torch.randn(1, register_length, hidden_size, dtype=dtype, device=device))
# num_patches = self.x_embedder.num_patches
# Will use fixed sin-cos embedding:
# just use a buffer already
if num_patches is not None:
self.register_buffer(
"pos_embed",
torch.zeros(1, num_patches, hidden_size, dtype=dtype, device=device),
)
else:
self.pos_embed = None
self.joint_blocks = nn.ModuleList(
[
JointBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_mode=attn_mode, pre_only=i == depth - 1, rmsnorm=rmsnorm, scale_mod_only=scale_mod_only, swiglu=swiglu, qk_norm=qk_norm, dtype=dtype, device=device)
for i in range(depth)
]
)
self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels, dtype=dtype, device=device)
def cropped_pos_embed(self, hw):
assert self.pos_embed_max_size is not None
p = self.x_embedder.patch_size[0]
h, w = hw
# patched size
h = h // p
w = w // p
assert h <= self.pos_embed_max_size, (h, self.pos_embed_max_size)
assert w <= self.pos_embed_max_size, (w, self.pos_embed_max_size)
top = (self.pos_embed_max_size - h) // 2
left = (self.pos_embed_max_size - w) // 2
spatial_pos_embed = rearrange(
self.pos_embed,
"1 (h w) c -> 1 h w c",
h=self.pos_embed_max_size,
w=self.pos_embed_max_size,
)
spatial_pos_embed = spatial_pos_embed[:, top : top + h, left : left + w, :]
spatial_pos_embed = rearrange(spatial_pos_embed, "1 h w c -> 1 (h w) c")
return spatial_pos_embed
def unpatchify(self, x, hw=None):
"""
x: (N, T, patch_size**2 * C)
imgs: (N, H, W, C)
"""
c = self.out_channels
p = self.x_embedder.patch_size[0]
if hw is None:
h = w = int(x.shape[1] ** 0.5)
else:
h, w = hw
h = h // p
w = w // p
assert h * w == x.shape[1]
x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
x = torch.einsum("nhwpqc->nchpwq", x)
imgs = x.reshape(shape=(x.shape[0], c, h * p, w * p))
return imgs
def forward_core_with_concat(self, x: torch.Tensor, c_mod: torch.Tensor, context: Optional[torch.Tensor] = None) -> torch.Tensor:
if self.register_length > 0:
context = torch.cat((repeat(self.register, "1 ... -> b ...", b=x.shape[0]), context if context is not None else torch.Tensor([]).type_as(x)), 1)
# context is B, L', D
# x is B, L, D
for block in self.joint_blocks:
context, x = block(context, x, c=c_mod)
x = self.final_layer(x, c_mod) # (N, T, patch_size ** 2 * out_channels)
return x
def forward(self, x: torch.Tensor, t: torch.Tensor, y: Optional[torch.Tensor] = None, context: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Forward pass of DiT.
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
t: (N,) tensor of diffusion timesteps
y: (N,) tensor of class labels
"""
hw = x.shape[-2:]
x = self.x_embedder(x) + self.cropped_pos_embed(hw)
c = self.t_embedder(t, dtype=x.dtype) # (N, D)
if y is not None:
y = self.y_embedder(y) # (N, D)
c = c + y # (N, D)
context = self.context_embedder(context)
x = self.forward_core_with_concat(x, c, context)
x = self.unpatchify(x, hw=hw) # (N, out_channels, H, W)
return x

View File

@ -1,510 +0,0 @@
### This file contains impls for underlying related models (CLIP, T5, etc)
import torch
import math
from torch import nn
from transformers import CLIPTokenizer, T5TokenizerFast
from modules import sd_hijack
#################################################################################################
### Core/Utility
#################################################################################################
class AutocastLinear(nn.Linear):
"""Same as usual linear layer, but casts its weights to whatever the parameter type is.
This is different from torch.autocast in a way that float16 layer processing float32 input
will return float16 with autocast on, and float32 with this. T5 seems to be fucked
if you do it in full float16 (returning almost all zeros in the final output).
"""
def forward(self, x):
return torch.nn.functional.linear(x, self.weight.to(x.dtype), self.bias.to(x.dtype) if self.bias is not None else None)
def attention(q, k, v, heads, mask=None):
"""Convenience wrapper around a basic attention operation"""
b, _, dim_head = q.shape
dim_head //= heads
q, k, v = [t.view(b, -1, heads, dim_head).transpose(1, 2) for t in (q, k, v)]
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
return out.transpose(1, 2).reshape(b, -1, heads * dim_head)
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, dtype=None, device=None):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias, dtype=dtype, device=device)
self.act = act_layer
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias, dtype=dtype, device=device)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
return x
#################################################################################################
### CLIP
#################################################################################################
class CLIPAttention(torch.nn.Module):
def __init__(self, embed_dim, heads, dtype, device):
super().__init__()
self.heads = heads
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
def forward(self, x, mask=None):
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
out = attention(q, k, v, self.heads, mask)
return self.out_proj(out)
ACTIVATIONS = {
"quick_gelu": lambda a: a * torch.sigmoid(1.702 * a),
"gelu": torch.nn.functional.gelu,
}
class CLIPLayer(torch.nn.Module):
def __init__(self, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device):
super().__init__()
self.layer_norm1 = nn.LayerNorm(embed_dim, dtype=dtype, device=device)
self.self_attn = CLIPAttention(embed_dim, heads, dtype, device)
self.layer_norm2 = nn.LayerNorm(embed_dim, dtype=dtype, device=device)
#self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device)
self.mlp = Mlp(embed_dim, intermediate_size, embed_dim, act_layer=ACTIVATIONS[intermediate_activation], dtype=dtype, device=device)
def forward(self, x, mask=None):
x += self.self_attn(self.layer_norm1(x), mask)
x += self.mlp(self.layer_norm2(x))
return x
class CLIPEncoder(torch.nn.Module):
def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device):
super().__init__()
self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device) for i in range(num_layers)])
def forward(self, x, mask=None, intermediate_output=None):
if intermediate_output is not None:
if intermediate_output < 0:
intermediate_output = len(self.layers) + intermediate_output
intermediate = None
for i, layer in enumerate(self.layers):
x = layer(x, mask)
if i == intermediate_output:
intermediate = x.clone()
return x, intermediate
class CLIPEmbeddings(torch.nn.Module):
def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None, textual_inversion_key="clip_l"):
super().__init__()
self.token_embedding = sd_hijack.TextualInversionEmbeddings(vocab_size, embed_dim, dtype=dtype, device=device, textual_inversion_key=textual_inversion_key)
self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device)
def forward(self, input_tokens):
return self.token_embedding(input_tokens) + self.position_embedding.weight
class CLIPTextModel_(torch.nn.Module):
def __init__(self, config_dict, dtype, device):
num_layers = config_dict["num_hidden_layers"]
embed_dim = config_dict["hidden_size"]
heads = config_dict["num_attention_heads"]
intermediate_size = config_dict["intermediate_size"]
intermediate_activation = config_dict["hidden_act"]
super().__init__()
self.embeddings = CLIPEmbeddings(embed_dim, dtype=torch.float32, device=device, textual_inversion_key=config_dict.get('textual_inversion_key', 'clip_l'))
self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device)
self.final_layer_norm = nn.LayerNorm(embed_dim, dtype=dtype, device=device)
def forward(self, input_tokens, intermediate_output=None, final_layer_norm_intermediate=True):
x = self.embeddings(input_tokens)
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1)
x, i = self.encoder(x, mask=causal_mask, intermediate_output=intermediate_output)
x = self.final_layer_norm(x)
if i is not None and final_layer_norm_intermediate:
i = self.final_layer_norm(i)
pooled_output = x[torch.arange(x.shape[0], device=x.device), input_tokens.to(dtype=torch.int, device=x.device).argmax(dim=-1),]
return x, i, pooled_output
class CLIPTextModel(torch.nn.Module):
def __init__(self, config_dict, dtype, device):
super().__init__()
self.num_layers = config_dict["num_hidden_layers"]
self.text_model = CLIPTextModel_(config_dict, dtype, device)
embed_dim = config_dict["hidden_size"]
self.text_projection = nn.Linear(embed_dim, embed_dim, bias=False, dtype=dtype, device=device)
self.text_projection.weight.copy_(torch.eye(embed_dim))
self.dtype = dtype
def get_input_embeddings(self):
return self.text_model.embeddings.token_embedding
def set_input_embeddings(self, embeddings):
self.text_model.embeddings.token_embedding = embeddings
def forward(self, *args, **kwargs):
x = self.text_model(*args, **kwargs)
out = self.text_projection(x[2])
return (x[0], x[1], out, x[2])
class SDTokenizer:
def __init__(self, max_length=77, pad_with_end=True, tokenizer=None, has_start_token=True, pad_to_max_length=True, min_length=None):
self.tokenizer = tokenizer
self.max_length = max_length
self.min_length = min_length
empty = self.tokenizer('')["input_ids"]
if has_start_token:
self.tokens_start = 1
self.start_token = empty[0]
self.end_token = empty[1]
else:
self.tokens_start = 0
self.start_token = None
self.end_token = empty[0]
self.pad_with_end = pad_with_end
self.pad_to_max_length = pad_to_max_length
vocab = self.tokenizer.get_vocab()
self.inv_vocab = {v: k for k, v in vocab.items()}
self.max_word_length = 8
def tokenize_with_weights(self, text:str):
"""Tokenize the text, with weight values - presume 1.0 for all and ignore other features here. The details aren't relevant for a reference impl, and weights themselves has weak effect on SD3."""
if self.pad_with_end:
pad_token = self.end_token
else:
pad_token = 0
batch = []
if self.start_token is not None:
batch.append((self.start_token, 1.0))
to_tokenize = text.replace("\n", " ").split(' ')
to_tokenize = [x for x in to_tokenize if x != ""]
for word in to_tokenize:
batch.extend([(t, 1) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]])
batch.append((self.end_token, 1.0))
if self.pad_to_max_length:
batch.extend([(pad_token, 1.0)] * (self.max_length - len(batch)))
if self.min_length is not None and len(batch) < self.min_length:
batch.extend([(pad_token, 1.0)] * (self.min_length - len(batch)))
return [batch]
class SDXLClipGTokenizer(SDTokenizer):
def __init__(self, tokenizer):
super().__init__(pad_with_end=False, tokenizer=tokenizer)
class SD3Tokenizer:
def __init__(self):
clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
self.clip_l = SDTokenizer(tokenizer=clip_tokenizer)
self.clip_g = SDXLClipGTokenizer(clip_tokenizer)
self.t5xxl = T5XXLTokenizer()
def tokenize_with_weights(self, text:str):
out = {}
out["g"] = self.clip_g.tokenize_with_weights(text)
out["l"] = self.clip_l.tokenize_with_weights(text)
out["t5xxl"] = self.t5xxl.tokenize_with_weights(text)
return out
class ClipTokenWeightEncoder:
def encode_token_weights(self, token_weight_pairs):
tokens = [a[0] for a in token_weight_pairs[0]]
out, pooled = self([tokens])
if pooled is not None:
first_pooled = pooled[0:1].cpu()
else:
first_pooled = pooled
output = [out[0:1]]
return torch.cat(output, dim=-2).cpu(), first_pooled
class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
"""Uses the CLIP transformer encoder for text (from huggingface)"""
LAYERS = ["last", "pooled", "hidden"]
def __init__(self, device="cpu", max_length=77, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=CLIPTextModel,
special_tokens=None, layer_norm_hidden_state=True, return_projected_pooled=True):
super().__init__()
assert layer in self.LAYERS
self.transformer = model_class(textmodel_json_config, dtype, device)
self.num_layers = self.transformer.num_layers
self.max_length = max_length
self.transformer = self.transformer.eval()
for param in self.parameters():
param.requires_grad = False
self.layer = layer
self.layer_idx = None
self.special_tokens = special_tokens if special_tokens is not None else {"start": 49406, "end": 49407, "pad": 49407}
self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055))
self.layer_norm_hidden_state = layer_norm_hidden_state
self.return_projected_pooled = return_projected_pooled
if layer == "hidden":
assert layer_idx is not None
assert abs(layer_idx) < self.num_layers
self.set_clip_options({"layer": layer_idx})
self.options_default = (self.layer, self.layer_idx, self.return_projected_pooled)
def set_clip_options(self, options):
layer_idx = options.get("layer", self.layer_idx)
self.return_projected_pooled = options.get("projected_pooled", self.return_projected_pooled)
if layer_idx is None or abs(layer_idx) > self.num_layers:
self.layer = "last"
else:
self.layer = "hidden"
self.layer_idx = layer_idx
def forward(self, tokens):
backup_embeds = self.transformer.get_input_embeddings()
tokens = torch.asarray(tokens, dtype=torch.int64, device=backup_embeds.weight.device)
outputs = self.transformer(tokens, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state)
self.transformer.set_input_embeddings(backup_embeds)
if self.layer == "last":
z = outputs[0]
else:
z = outputs[1]
pooled_output = None
if len(outputs) >= 3:
if not self.return_projected_pooled and len(outputs) >= 4 and outputs[3] is not None:
pooled_output = outputs[3].float()
elif outputs[2] is not None:
pooled_output = outputs[2].float()
return z.float(), pooled_output
class SDXLClipG(SDClipModel):
"""Wraps the CLIP-G model into the SD-CLIP-Model interface"""
def __init__(self, config, device="cpu", layer="penultimate", layer_idx=None, dtype=None):
if layer == "penultimate":
layer="hidden"
layer_idx=-2
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False)
class T5XXLModel(SDClipModel):
"""Wraps the T5-XXL model into the SD-CLIP-Model interface for convenience"""
def __init__(self, config, device="cpu", layer="last", layer_idx=None, dtype=None):
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=T5)
#################################################################################################
### T5 implementation, for the T5-XXL text encoder portion, largely pulled from upstream impl
#################################################################################################
class T5XXLTokenizer(SDTokenizer):
"""Wraps the T5 Tokenizer from HF into the SDTokenizer interface"""
def __init__(self):
super().__init__(pad_with_end=False, tokenizer=T5TokenizerFast.from_pretrained("google/t5-v1_1-xxl"), has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=77)
class T5LayerNorm(torch.nn.Module):
def __init__(self, hidden_size, eps=1e-6, dtype=None, device=None):
super().__init__()
self.weight = torch.nn.Parameter(torch.ones(hidden_size, dtype=dtype, device=device))
self.variance_epsilon = eps
def forward(self, x):
variance = x.pow(2).mean(-1, keepdim=True)
x = x * torch.rsqrt(variance + self.variance_epsilon)
return self.weight.to(device=x.device, dtype=x.dtype) * x
class T5DenseGatedActDense(torch.nn.Module):
def __init__(self, model_dim, ff_dim, dtype, device):
super().__init__()
self.wi_0 = AutocastLinear(model_dim, ff_dim, bias=False, dtype=dtype, device=device)
self.wi_1 = AutocastLinear(model_dim, ff_dim, bias=False, dtype=dtype, device=device)
self.wo = AutocastLinear(ff_dim, model_dim, bias=False, dtype=dtype, device=device)
def forward(self, x):
hidden_gelu = torch.nn.functional.gelu(self.wi_0(x), approximate="tanh")
hidden_linear = self.wi_1(x)
x = hidden_gelu * hidden_linear
x = self.wo(x)
return x
class T5LayerFF(torch.nn.Module):
def __init__(self, model_dim, ff_dim, dtype, device):
super().__init__()
self.DenseReluDense = T5DenseGatedActDense(model_dim, ff_dim, dtype, device)
self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device)
def forward(self, x):
forwarded_states = self.layer_norm(x)
forwarded_states = self.DenseReluDense(forwarded_states)
x += forwarded_states
return x
class T5Attention(torch.nn.Module):
def __init__(self, model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device):
super().__init__()
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = AutocastLinear(model_dim, inner_dim, bias=False, dtype=dtype, device=device)
self.k = AutocastLinear(model_dim, inner_dim, bias=False, dtype=dtype, device=device)
self.v = AutocastLinear(model_dim, inner_dim, bias=False, dtype=dtype, device=device)
self.o = AutocastLinear(inner_dim, model_dim, bias=False, dtype=dtype, device=device)
self.num_heads = num_heads
self.relative_attention_bias = None
if relative_attention_bias:
self.relative_attention_num_buckets = 32
self.relative_attention_max_distance = 128
self.relative_attention_bias = torch.nn.Embedding(self.relative_attention_num_buckets, self.num_heads, device=device)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1))
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device):
"""Compute binned relative position bias"""
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=True,
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(self, x, past_bias=None):
q = self.q(x)
k = self.k(x)
v = self.v(x)
if self.relative_attention_bias is not None:
past_bias = self.compute_bias(x.shape[1], x.shape[1], x.device)
if past_bias is not None:
mask = past_bias
else:
mask = None
out = attention(q, k * ((k.shape[-1] / self.num_heads) ** 0.5), v, self.num_heads, mask.to(x.dtype) if mask is not None else None)
return self.o(out), past_bias
class T5LayerSelfAttention(torch.nn.Module):
def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device):
super().__init__()
self.SelfAttention = T5Attention(model_dim, inner_dim, num_heads, relative_attention_bias, dtype, device)
self.layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device)
def forward(self, x, past_bias=None):
output, past_bias = self.SelfAttention(self.layer_norm(x), past_bias=past_bias)
x += output
return x, past_bias
class T5Block(torch.nn.Module):
def __init__(self, model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device):
super().__init__()
self.layer = torch.nn.ModuleList()
self.layer.append(T5LayerSelfAttention(model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias, dtype, device))
self.layer.append(T5LayerFF(model_dim, ff_dim, dtype, device))
def forward(self, x, past_bias=None):
x, past_bias = self.layer[0](x, past_bias)
x = self.layer[-1](x)
return x, past_bias
class T5Stack(torch.nn.Module):
def __init__(self, num_layers, model_dim, inner_dim, ff_dim, num_heads, vocab_size, dtype, device):
super().__init__()
self.embed_tokens = torch.nn.Embedding(vocab_size, model_dim, device=device)
self.block = torch.nn.ModuleList([T5Block(model_dim, inner_dim, ff_dim, num_heads, relative_attention_bias=(i == 0), dtype=dtype, device=device) for i in range(num_layers)])
self.final_layer_norm = T5LayerNorm(model_dim, dtype=dtype, device=device)
def forward(self, input_ids, intermediate_output=None, final_layer_norm_intermediate=True):
intermediate = None
x = self.embed_tokens(input_ids).to(torch.float32) # needs float32 or else T5 returns all zeroes
past_bias = None
for i, layer in enumerate(self.block):
x, past_bias = layer(x, past_bias)
if i == intermediate_output:
intermediate = x.clone()
x = self.final_layer_norm(x)
if intermediate is not None and final_layer_norm_intermediate:
intermediate = self.final_layer_norm(intermediate)
return x, intermediate
class T5(torch.nn.Module):
def __init__(self, config_dict, dtype, device):
super().__init__()
self.num_layers = config_dict["num_layers"]
self.encoder = T5Stack(self.num_layers, config_dict["d_model"], config_dict["d_model"], config_dict["d_ff"], config_dict["num_heads"], config_dict["vocab_size"], dtype, device)
self.dtype = dtype
def get_input_embeddings(self):
return self.encoder.embed_tokens
def set_input_embeddings(self, embeddings):
self.encoder.embed_tokens = embeddings
def forward(self, *args, **kwargs):
return self.encoder(*args, **kwargs)

View File

@ -1,222 +0,0 @@
import os
import safetensors
import torch
import typing
from transformers import CLIPTokenizer, T5TokenizerFast
from modules import shared, devices, modelloader, sd_hijack_clip, prompt_parser
from modules.models.sd3.other_impls import SDClipModel, SDXLClipG, T5XXLModel, SD3Tokenizer
class SafetensorsMapping(typing.Mapping):
def __init__(self, file):
self.file = file
def __len__(self):
return len(self.file.keys())
def __iter__(self):
for key in self.file.keys():
yield key
def __getitem__(self, key):
return self.file.get_tensor(key)
CLIPL_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_l.safetensors"
CLIPL_CONFIG = {
"hidden_act": "quick_gelu",
"hidden_size": 768,
"intermediate_size": 3072,
"num_attention_heads": 12,
"num_hidden_layers": 12,
}
CLIPG_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_g.safetensors"
CLIPG_CONFIG = {
"hidden_act": "gelu",
"hidden_size": 1280,
"intermediate_size": 5120,
"num_attention_heads": 20,
"num_hidden_layers": 32,
"textual_inversion_key": "clip_g",
}
T5_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/t5xxl_fp16.safetensors"
T5_CONFIG = {
"d_ff": 10240,
"d_model": 4096,
"num_heads": 64,
"num_layers": 24,
"vocab_size": 32128,
}
class Sd3ClipLG(sd_hijack_clip.TextConditionalModel):
def __init__(self, clip_l, clip_g):
super().__init__()
self.clip_l = clip_l
self.clip_g = clip_g
self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
empty = self.tokenizer('')["input_ids"]
self.id_start = empty[0]
self.id_end = empty[1]
self.id_pad = empty[1]
self.return_pooled = True
def tokenize(self, texts):
return self.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"]
def encode_with_transformers(self, tokens):
tokens_g = tokens.clone()
for batch_pos in range(tokens_g.shape[0]):
index = tokens_g[batch_pos].cpu().tolist().index(self.id_end)
tokens_g[batch_pos, index+1:tokens_g.shape[1]] = 0
l_out, l_pooled = self.clip_l(tokens)
g_out, g_pooled = self.clip_g(tokens_g)
lg_out = torch.cat([l_out, g_out], dim=-1)
lg_out = torch.nn.functional.pad(lg_out, (0, 4096 - lg_out.shape[-1]))
vector_out = torch.cat((l_pooled, g_pooled), dim=-1)
lg_out.pooled = vector_out
return lg_out
def encode_embedding_init_text(self, init_text, nvpt):
return torch.zeros((nvpt, 768+1280), device=devices.device) # XXX
class Sd3T5(torch.nn.Module):
def __init__(self, t5xxl):
super().__init__()
self.t5xxl = t5xxl
self.tokenizer = T5TokenizerFast.from_pretrained("google/t5-v1_1-xxl")
empty = self.tokenizer('', padding='max_length', max_length=2)["input_ids"]
self.id_end = empty[0]
self.id_pad = empty[1]
def tokenize(self, texts):
return self.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"]
def tokenize_line(self, line, *, target_token_count=None):
if shared.opts.emphasis != "None":
parsed = prompt_parser.parse_prompt_attention(line)
else:
parsed = [[line, 1.0]]
tokenized = self.tokenize([text for text, _ in parsed])
tokens = []
multipliers = []
for text_tokens, (text, weight) in zip(tokenized, parsed):
if text == 'BREAK' and weight == -1:
continue
tokens += text_tokens
multipliers += [weight] * len(text_tokens)
tokens += [self.id_end]
multipliers += [1.0]
if target_token_count is not None:
if len(tokens) < target_token_count:
tokens += [self.id_pad] * (target_token_count - len(tokens))
multipliers += [1.0] * (target_token_count - len(tokens))
else:
tokens = tokens[0:target_token_count]
multipliers = multipliers[0:target_token_count]
return tokens, multipliers
def forward(self, texts, *, token_count):
if not self.t5xxl or not shared.opts.sd3_enable_t5:
return torch.zeros((len(texts), token_count, 4096), device=devices.device, dtype=devices.dtype)
tokens_batch = []
for text in texts:
tokens, multipliers = self.tokenize_line(text, target_token_count=token_count)
tokens_batch.append(tokens)
t5_out, t5_pooled = self.t5xxl(tokens_batch)
return t5_out
def encode_embedding_init_text(self, init_text, nvpt):
return torch.zeros((nvpt, 4096), device=devices.device) # XXX
class SD3Cond(torch.nn.Module):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tokenizer = SD3Tokenizer()
with torch.no_grad():
self.clip_g = SDXLClipG(CLIPG_CONFIG, device="cpu", dtype=devices.dtype)
self.clip_l = SDClipModel(layer="hidden", layer_idx=-2, device="cpu", dtype=devices.dtype, layer_norm_hidden_state=False, return_projected_pooled=False, textmodel_json_config=CLIPL_CONFIG)
if shared.opts.sd3_enable_t5:
self.t5xxl = T5XXLModel(T5_CONFIG, device="cpu", dtype=devices.dtype)
else:
self.t5xxl = None
self.model_lg = Sd3ClipLG(self.clip_l, self.clip_g)
self.model_t5 = Sd3T5(self.t5xxl)
def forward(self, prompts: list[str]):
with devices.without_autocast():
lg_out, vector_out = self.model_lg(prompts)
t5_out = self.model_t5(prompts, token_count=lg_out.shape[1])
lgt_out = torch.cat([lg_out, t5_out], dim=-2)
return {
'crossattn': lgt_out,
'vector': vector_out,
}
def before_load_weights(self, state_dict):
clip_path = os.path.join(shared.models_path, "CLIP")
if 'text_encoders.clip_g.transformer.text_model.embeddings.position_embedding.weight' not in state_dict:
clip_g_file = modelloader.load_file_from_url(CLIPG_URL, model_dir=clip_path, file_name="clip_g.safetensors")
with safetensors.safe_open(clip_g_file, framework="pt") as file:
self.clip_g.transformer.load_state_dict(SafetensorsMapping(file))
if 'text_encoders.clip_l.transformer.text_model.embeddings.position_embedding.weight' not in state_dict:
clip_l_file = modelloader.load_file_from_url(CLIPL_URL, model_dir=clip_path, file_name="clip_l.safetensors")
with safetensors.safe_open(clip_l_file, framework="pt") as file:
self.clip_l.transformer.load_state_dict(SafetensorsMapping(file), strict=False)
if self.t5xxl and 'text_encoders.t5xxl.transformer.encoder.embed_tokens.weight' not in state_dict:
t5_file = modelloader.load_file_from_url(T5_URL, model_dir=clip_path, file_name="t5xxl_fp16.safetensors")
with safetensors.safe_open(t5_file, framework="pt") as file:
self.t5xxl.transformer.load_state_dict(SafetensorsMapping(file), strict=False)
def encode_embedding_init_text(self, init_text, nvpt):
return self.model_lg.encode_embedding_init_text(init_text, nvpt)
def tokenize(self, texts):
return self.model_lg.tokenize(texts)
def medvram_modules(self):
return [self.clip_g, self.clip_l, self.t5xxl]
def get_token_count(self, text):
_, token_count = self.model_lg.process_texts([text])
return token_count
def get_target_prompt_token_count(self, token_count):
return self.model_lg.get_target_prompt_token_count(token_count)

View File

@ -1,374 +0,0 @@
### Impls of the SD3 core diffusion model and VAE
import torch
import math
import einops
from modules.models.sd3.mmdit import MMDiT
from PIL import Image
#################################################################################################
### MMDiT Model Wrapping
#################################################################################################
class ModelSamplingDiscreteFlow(torch.nn.Module):
"""Helper for sampler scheduling (ie timestep/sigma calculations) for Discrete Flow models"""
def __init__(self, shift=1.0):
super().__init__()
self.shift = shift
timesteps = 1000
ts = self.sigma(torch.arange(1, timesteps + 1, 1))
self.register_buffer('sigmas', ts)
@property
def sigma_min(self):
return self.sigmas[0]
@property
def sigma_max(self):
return self.sigmas[-1]
def timestep(self, sigma):
return sigma * 1000
def sigma(self, timestep: torch.Tensor):
timestep = timestep / 1000.0
if self.shift == 1.0:
return timestep
return self.shift * timestep / (1 + (self.shift - 1) * timestep)
def calculate_denoised(self, sigma, model_output, model_input):
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
return model_input - model_output * sigma
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
return sigma * noise + (1.0 - sigma) * latent_image
class BaseModel(torch.nn.Module):
"""Wrapper around the core MM-DiT model"""
def __init__(self, shift=1.0, device=None, dtype=torch.float32, state_dict=None, prefix=""):
super().__init__()
# Important configuration values can be quickly determined by checking shapes in the source file
# Some of these will vary between models (eg 2B vs 8B primarily differ in their depth, but also other details change)
patch_size = state_dict[f"{prefix}x_embedder.proj.weight"].shape[2]
depth = state_dict[f"{prefix}x_embedder.proj.weight"].shape[0] // 64
num_patches = state_dict[f"{prefix}pos_embed"].shape[1]
pos_embed_max_size = round(math.sqrt(num_patches))
adm_in_channels = state_dict[f"{prefix}y_embedder.mlp.0.weight"].shape[1]
context_shape = state_dict[f"{prefix}context_embedder.weight"].shape
context_embedder_config = {
"target": "torch.nn.Linear",
"params": {
"in_features": context_shape[1],
"out_features": context_shape[0]
}
}
self.diffusion_model = MMDiT(input_size=None, pos_embed_scaling_factor=None, pos_embed_offset=None, pos_embed_max_size=pos_embed_max_size, patch_size=patch_size, in_channels=16, depth=depth, num_patches=num_patches, adm_in_channels=adm_in_channels, context_embedder_config=context_embedder_config, device=device, dtype=dtype)
self.model_sampling = ModelSamplingDiscreteFlow(shift=shift)
self.depth = depth
def apply_model(self, x, sigma, c_crossattn=None, y=None):
dtype = self.get_dtype()
timestep = self.model_sampling.timestep(sigma).float()
model_output = self.diffusion_model(x.to(dtype), timestep, context=c_crossattn.to(dtype), y=y.to(dtype)).float()
return self.model_sampling.calculate_denoised(sigma, model_output, x)
def forward(self, *args, **kwargs):
return self.apply_model(*args, **kwargs)
def get_dtype(self):
return self.diffusion_model.dtype
class CFGDenoiser(torch.nn.Module):
"""Helper for applying CFG Scaling to diffusion outputs"""
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x, timestep, cond, uncond, cond_scale):
# Run cond and uncond in a batch together
batched = self.model.apply_model(torch.cat([x, x]), torch.cat([timestep, timestep]), c_crossattn=torch.cat([cond["c_crossattn"], uncond["c_crossattn"]]), y=torch.cat([cond["y"], uncond["y"]]))
# Then split and apply CFG Scaling
pos_out, neg_out = batched.chunk(2)
scaled = neg_out + (pos_out - neg_out) * cond_scale
return scaled
class SD3LatentFormat:
"""Latents are slightly shifted from center - this class must be called after VAE Decode to correct for the shift"""
def __init__(self):
self.scale_factor = 1.5305
self.shift_factor = 0.0609
def process_in(self, latent):
return (latent - self.shift_factor) * self.scale_factor
def process_out(self, latent):
return (latent / self.scale_factor) + self.shift_factor
def decode_latent_to_preview(self, x0):
"""Quick RGB approximate preview of sd3 latents"""
factors = torch.tensor([
[-0.0645, 0.0177, 0.1052], [ 0.0028, 0.0312, 0.0650],
[ 0.1848, 0.0762, 0.0360], [ 0.0944, 0.0360, 0.0889],
[ 0.0897, 0.0506, -0.0364], [-0.0020, 0.1203, 0.0284],
[ 0.0855, 0.0118, 0.0283], [-0.0539, 0.0658, 0.1047],
[-0.0057, 0.0116, 0.0700], [-0.0412, 0.0281, -0.0039],
[ 0.1106, 0.1171, 0.1220], [-0.0248, 0.0682, -0.0481],
[ 0.0815, 0.0846, 0.1207], [-0.0120, -0.0055, -0.0867],
[-0.0749, -0.0634, -0.0456], [-0.1418, -0.1457, -0.1259]
], device="cpu")
latent_image = x0[0].permute(1, 2, 0).cpu() @ factors
latents_ubyte = (((latent_image + 1) / 2)
.clamp(0, 1) # change scale from -1..1 to 0..1
.mul(0xFF) # to 0..255
.byte()).cpu()
return Image.fromarray(latents_ubyte.numpy())
#################################################################################################
### K-Diffusion Sampling
#################################################################################################
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
return x[(...,) + (None,) * dims_to_append]
def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / append_dims(sigma, x.ndim)
@torch.no_grad()
@torch.autocast("cuda", dtype=torch.float16)
def sample_euler(model, x, sigmas, extra_args=None):
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in range(len(sigmas) - 1):
sigma_hat = sigmas[i]
denoised = model(x, sigma_hat * s_in, **extra_args)
d = to_d(x, sigma_hat, denoised)
dt = sigmas[i + 1] - sigma_hat
# Euler method
x = x + d * dt
return x
#################################################################################################
### VAE
#################################################################################################
def Normalize(in_channels, num_groups=32, dtype=torch.float32, device=None):
return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
class ResnetBlock(torch.nn.Module):
def __init__(self, *, in_channels, out_channels=None, dtype=torch.float32, device=None):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.norm1 = Normalize(in_channels, dtype=dtype, device=device)
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
self.norm2 = Normalize(out_channels, dtype=dtype, device=device)
self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
if self.in_channels != self.out_channels:
self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
else:
self.nin_shortcut = None
self.swish = torch.nn.SiLU(inplace=True)
def forward(self, x):
hidden = x
hidden = self.norm1(hidden)
hidden = self.swish(hidden)
hidden = self.conv1(hidden)
hidden = self.norm2(hidden)
hidden = self.swish(hidden)
hidden = self.conv2(hidden)
if self.in_channels != self.out_channels:
x = self.nin_shortcut(x)
return x + hidden
class AttnBlock(torch.nn.Module):
def __init__(self, in_channels, dtype=torch.float32, device=None):
super().__init__()
self.norm = Normalize(in_channels, dtype=dtype, device=device)
self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device)
def forward(self, x):
hidden = self.norm(x)
q = self.q(hidden)
k = self.k(hidden)
v = self.v(hidden)
b, c, h, w = q.shape
q, k, v = [einops.rearrange(x, "b c h w -> b 1 (h w) c").contiguous() for x in (q, k, v)]
hidden = torch.nn.functional.scaled_dot_product_attention(q, k, v) # scale is dim ** -0.5 per default
hidden = einops.rearrange(hidden, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
hidden = self.proj_out(hidden)
return x + hidden
class Downsample(torch.nn.Module):
def __init__(self, in_channels, dtype=torch.float32, device=None):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0, dtype=dtype, device=device)
def forward(self, x):
pad = (0,1,0,1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
return x
class Upsample(torch.nn.Module):
def __init__(self, in_channels, dtype=torch.float32, device=None):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
x = self.conv(x)
return x
class VAEEncoder(torch.nn.Module):
def __init__(self, ch=128, ch_mult=(1,2,4,4), num_res_blocks=2, in_channels=3, z_channels=16, dtype=torch.float32, device=None):
super().__init__()
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels, ch, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
in_ch_mult = (1,) + tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = torch.nn.ModuleList()
for i_level in range(self.num_resolutions):
block = torch.nn.ModuleList()
attn = torch.nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for _ in range(num_res_blocks):
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dtype=dtype, device=device))
block_in = block_out
down = torch.nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions - 1:
down.downsample = Downsample(block_in, dtype=dtype, device=device)
self.down.append(down)
# middle
self.mid = torch.nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device)
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
# end
self.norm_out = Normalize(block_in, dtype=dtype, device=device)
self.conv_out = torch.nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
self.swish = torch.nn.SiLU(inplace=True)
def forward(self, x):
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1])
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h)
h = self.mid.attn_1(h)
h = self.mid.block_2(h)
# end
h = self.norm_out(h)
h = self.swish(h)
h = self.conv_out(h)
return h
class VAEDecoder(torch.nn.Module):
def __init__(self, ch=128, out_ch=3, ch_mult=(1, 2, 4, 4), num_res_blocks=2, resolution=256, z_channels=16, dtype=torch.float32, device=None):
super().__init__()
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
block_in = ch * ch_mult[self.num_resolutions - 1]
curr_res = resolution // 2 ** (self.num_resolutions - 1)
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
# middle
self.mid = torch.nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
self.mid.attn_1 = AttnBlock(block_in, dtype=dtype, device=device)
self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, dtype=dtype, device=device)
# upsampling
self.up = torch.nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = torch.nn.ModuleList()
block_out = ch * ch_mult[i_level]
for _ in range(self.num_res_blocks + 1):
block.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dtype=dtype, device=device))
block_in = block_out
up = torch.nn.Module()
up.block = block
if i_level != 0:
up.upsample = Upsample(block_in, dtype=dtype, device=device)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in, dtype=dtype, device=device)
self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1, dtype=dtype, device=device)
self.swish = torch.nn.SiLU(inplace=True)
def forward(self, z):
# z to block_in
hidden = self.conv_in(z)
# middle
hidden = self.mid.block_1(hidden)
hidden = self.mid.attn_1(hidden)
hidden = self.mid.block_2(hidden)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
hidden = self.up[i_level].block[i_block](hidden)
if i_level != 0:
hidden = self.up[i_level].upsample(hidden)
# end
hidden = self.norm_out(hidden)
hidden = self.swish(hidden)
hidden = self.conv_out(hidden)
return hidden
class SDVAE(torch.nn.Module):
def __init__(self, dtype=torch.float32, device=None):
super().__init__()
self.encoder = VAEEncoder(dtype=dtype, device=device)
self.decoder = VAEDecoder(dtype=dtype, device=device)
@torch.autocast("cuda", dtype=torch.float16)
def decode(self, latent):
return self.decoder(latent)
@torch.autocast("cuda", dtype=torch.float16)
def encode(self, image):
hidden = self.encoder(image)
mean, logvar = torch.chunk(hidden, 2, dim=1)
logvar = torch.clamp(logvar, -30.0, 20.0)
std = torch.exp(0.5 * logvar)
return mean + std * torch.randn_like(mean)

View File

@ -1,96 +0,0 @@
import contextlib
import torch
import k_diffusion
from modules.models.sd3.sd3_impls import BaseModel, SDVAE, SD3LatentFormat
from modules.models.sd3.sd3_cond import SD3Cond
from modules import shared, devices
class SD3Denoiser(k_diffusion.external.DiscreteSchedule):
def __init__(self, inner_model, sigmas):
super().__init__(sigmas, quantize=shared.opts.enable_quantization)
self.inner_model = inner_model
def forward(self, input, sigma, **kwargs):
return self.inner_model.apply_model(input, sigma, **kwargs)
class SD3Inferencer(torch.nn.Module):
def __init__(self, state_dict, shift=3, use_ema=False):
super().__init__()
self.shift = shift
with torch.no_grad():
self.model = BaseModel(shift=shift, state_dict=state_dict, prefix="model.diffusion_model.", device="cpu", dtype=devices.dtype)
self.first_stage_model = SDVAE(device="cpu", dtype=devices.dtype_vae)
self.first_stage_model.dtype = self.model.diffusion_model.dtype
self.alphas_cumprod = 1 / (self.model.model_sampling.sigmas ** 2 + 1)
self.text_encoders = SD3Cond()
self.cond_stage_key = 'txt'
self.parameterization = "eps"
self.model.conditioning_key = "crossattn"
self.latent_format = SD3LatentFormat()
self.latent_channels = 16
@property
def cond_stage_model(self):
return self.text_encoders
def before_load_weights(self, state_dict):
self.cond_stage_model.before_load_weights(state_dict)
def ema_scope(self):
return contextlib.nullcontext()
def get_learned_conditioning(self, batch: list[str]):
return self.cond_stage_model(batch)
def apply_model(self, x, t, cond):
return self.model(x, t, c_crossattn=cond['crossattn'], y=cond['vector'])
def decode_first_stage(self, latent):
latent = self.latent_format.process_out(latent)
return self.first_stage_model.decode(latent)
def encode_first_stage(self, image):
latent = self.first_stage_model.encode(image)
return self.latent_format.process_in(latent)
def get_first_stage_encoding(self, x):
return x
def create_denoiser(self):
return SD3Denoiser(self, self.model.model_sampling.sigmas)
def medvram_fields(self):
return [
(self, 'first_stage_model'),
(self, 'text_encoders'),
(self, 'model'),
]
def add_noise_to_latent(self, x, noise, amount):
return x * (1 - amount) + noise * amount
def fix_dimensions(self, width, height):
return width // 16 * 16, height // 16 * 16
def diffusers_weight_mapping(self):
for i in range(self.model.depth):
yield f"transformer.transformer_blocks.{i}.attn.to_q", f"diffusion_model_joint_blocks_{i}_x_block_attn_qkv_q_proj"
yield f"transformer.transformer_blocks.{i}.attn.to_k", f"diffusion_model_joint_blocks_{i}_x_block_attn_qkv_k_proj"
yield f"transformer.transformer_blocks.{i}.attn.to_v", f"diffusion_model_joint_blocks_{i}_x_block_attn_qkv_v_proj"
yield f"transformer.transformer_blocks.{i}.attn.to_out.0", f"diffusion_model_joint_blocks_{i}_x_block_attn_proj"
yield f"transformer.transformer_blocks.{i}.attn.add_q_proj", f"diffusion_model_joint_blocks_{i}_context_block.attn_qkv_q_proj"
yield f"transformer.transformer_blocks.{i}.attn.add_k_proj", f"diffusion_model_joint_blocks_{i}_context_block.attn_qkv_k_proj"
yield f"transformer.transformer_blocks.{i}.attn.add_v_proj", f"diffusion_model_joint_blocks_{i}_context_block.attn_qkv_v_proj"
yield f"transformer.transformer_blocks.{i}.attn.add_out_proj.0", f"diffusion_model_joint_blocks_{i}_context_block_attn_proj"

View File

@ -240,9 +240,6 @@ class Options:
item_categories = {} item_categories = {}
for item in self.data_labels.values(): for item in self.data_labels.values():
if item.section[0] is None:
continue
category = categories.mapping.get(item.category_id) category = categories.mapping.get(item.category_id)
category = "Uncategorized" if category is None else category.label category = "Uncategorized" if category is None else category.label
if category not in item_categories: if category not in item_categories:

View File

@ -24,15 +24,14 @@ default_sd_model_file = sd_model_file
# Parse the --data-dir flag first so we can use it as a base for our other argument default values # Parse the --data-dir flag first so we can use it as a base for our other argument default values
parser_pre = argparse.ArgumentParser(add_help=False) parser_pre = argparse.ArgumentParser(add_help=False)
parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", ) parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", )
parser_pre.add_argument("--models-dir", type=str, default=None, help="base path where models are stored; overrides --data-dir", )
cmd_opts_pre = parser_pre.parse_known_args()[0] cmd_opts_pre = parser_pre.parse_known_args()[0]
data_path = cmd_opts_pre.data_dir data_path = cmd_opts_pre.data_dir
models_path = cmd_opts_pre.models_dir if cmd_opts_pre.models_dir else os.path.join(data_path, "models") models_path = os.path.join(data_path, "models")
extensions_dir = os.path.join(data_path, "extensions") extensions_dir = os.path.join(data_path, "extensions")
extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin")
config_states_dir = os.path.join(script_path, "config_states") config_states_dir = os.path.join(script_path, "config_states")
default_output_dir = os.path.join(data_path, "outputs") default_output_dir = os.path.join(data_path, "output")
roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf') roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf')

View File

@ -17,10 +17,10 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
if extras_mode == 1: if extras_mode == 1:
for img in image_folder: for img in image_folder:
if isinstance(img, Image.Image): if isinstance(img, Image.Image):
image = images.fix_image(img) image = img
fn = '' fn = ''
else: else:
image = images.read(os.path.abspath(img.name)) image = Image.open(os.path.abspath(img.name))
fn = os.path.splitext(img.orig_name)[0] fn = os.path.splitext(img.orig_name)[0]
yield image, fn yield image, fn
elif extras_mode == 2: elif extras_mode == 2:
@ -51,24 +51,22 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
shared.state.textinfo = name shared.state.textinfo = name
shared.state.skipped = False shared.state.skipped = False
if shared.state.interrupted or shared.state.stopping_generation: if shared.state.interrupted:
break break
if isinstance(image_placeholder, str): if isinstance(image_placeholder, str):
try: try:
image_data = images.read(image_placeholder) image_data = Image.open(image_placeholder)
except Exception: except Exception:
continue continue
else: else:
image_data = image_placeholder image_data = image_placeholder
image_data = image_data if image_data.mode in ("RGBA", "RGB") else image_data.convert("RGB")
parameters, existing_pnginfo = images.read_info_from_image(image_data) parameters, existing_pnginfo = images.read_info_from_image(image_data)
if parameters: if parameters:
existing_pnginfo["parameters"] = parameters existing_pnginfo["parameters"] = parameters
initial_pp = scripts_postprocessing.PostprocessedImage(image_data) initial_pp = scripts_postprocessing.PostprocessedImage(image_data.convert("RGB"))
scripts.scripts_postproc.run(initial_pp, args) scripts.scripts_postproc.run(initial_pp, args)
@ -124,6 +122,8 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
if extras_mode != 2 or show_extras_results: if extras_mode != 2 or show_extras_results:
outputs.append(pp.image) outputs.append(pp.image)
image_data.close()
devices.torch_gc() devices.torch_gc()
shared.state.end() shared.state.end()
return outputs, ui_common.plaintext_to_html(infotext), '' return outputs, ui_common.plaintext_to_html(infotext), ''
@ -133,15 +133,13 @@ def run_postprocessing_webui(id_task, *args, **kwargs):
return run_postprocessing(*args, **kwargs) return run_postprocessing(*args, **kwargs)
def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True, max_side_length: int = 0): def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True):
"""old handler for API""" """old handler for API"""
args = scripts.scripts_postproc.create_args_for_run({ args = scripts.scripts_postproc.create_args_for_run({
"Upscale": { "Upscale": {
"upscale_enabled": True,
"upscale_mode": resize_mode, "upscale_mode": resize_mode,
"upscale_by": upscaling_resize, "upscale_by": upscaling_resize,
"max_side_length": max_side_length,
"upscale_to_width": upscaling_resize_w, "upscale_to_width": upscaling_resize_w,
"upscale_to_height": upscaling_resize_h, "upscale_to_height": upscaling_resize_h,
"upscale_crop": upscaling_crop, "upscale_crop": upscaling_crop,

View File

@ -16,7 +16,7 @@ from skimage import exposure
from typing import Any from typing import Any
import modules.sd_hijack import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, infotext_utils, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng, profiling from modules import devices, prompt_parser, masking, sd_samplers, lowvram, infotext_utils, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng
from modules.rng import slerp # noqa: F401 from modules.rng import slerp # noqa: F401
from modules.sd_hijack import model_hijack from modules.sd_hijack import model_hijack
from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
@ -115,17 +115,20 @@ def txt2img_image_conditioning(sd_model, x, width, height):
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device) return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
else: else:
if sd_model.is_sdxl_inpaint: sd = sd_model.model.state_dict()
# The "masked-image" in this case will just be all 0.5 since the entire image is masked. diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5 if diffusion_model_input is not None:
image_conditioning = images_tensor_to_samples(image_conditioning, if diffusion_model_input.shape[1] == 9:
approximation_indexes.get(opts.sd_vae_encode_method)) # The "masked-image" in this case will just be all 0.5 since the entire image is masked.
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
image_conditioning = images_tensor_to_samples(image_conditioning,
approximation_indexes.get(opts.sd_vae_encode_method))
# Add the fake full 1s mask to the first dimension. # Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype) image_conditioning = image_conditioning.to(x.dtype)
return image_conditioning return image_conditioning
# Dummy zero conditioning if we're not using inpainting or unclip models. # Dummy zero conditioning if we're not using inpainting or unclip models.
# Still takes up a bit of memory, but no encoder call. # Still takes up a bit of memory, but no encoder call.
@ -149,7 +152,6 @@ class StableDiffusionProcessing:
seed_resize_from_w: int = -1 seed_resize_from_w: int = -1
seed_enable_extras: bool = True seed_enable_extras: bool = True
sampler_name: str = None sampler_name: str = None
scheduler: str = None
batch_size: int = 1 batch_size: int = 1
n_iter: int = 1 n_iter: int = 1
steps: int = 50 steps: int = 50
@ -235,6 +237,11 @@ class StableDiffusionProcessing:
self.styles = [] self.styles = []
self.sampler_noise_scheduler_override = None self.sampler_noise_scheduler_override = None
self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond
self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn
self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin
self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf')
self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise
self.extra_generation_params = self.extra_generation_params or {} self.extra_generation_params = self.extra_generation_params or {}
self.override_settings = self.override_settings or {} self.override_settings = self.override_settings or {}
@ -251,13 +258,6 @@ class StableDiffusionProcessing:
self.cached_uc = StableDiffusionProcessing.cached_uc self.cached_uc = StableDiffusionProcessing.cached_uc
self.cached_c = StableDiffusionProcessing.cached_c self.cached_c = StableDiffusionProcessing.cached_c
def fill_fields_from_opts(self):
self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond
self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn
self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin
self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf')
self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise
@property @property
def sd_model(self): def sd_model(self):
return shared.sd_model return shared.sd_model
@ -389,8 +389,11 @@ class StableDiffusionProcessing:
if self.sampler.conditioning_key == "crossattn-adm": if self.sampler.conditioning_key == "crossattn-adm":
return self.unclip_image_conditioning(source_image) return self.unclip_image_conditioning(source_image)
if self.sampler.model_wrap.inner_model.is_sdxl_inpaint: sd = self.sampler.model_wrap.inner_model.model.state_dict()
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask) diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
if diffusion_model_input is not None:
if diffusion_model_input.shape[1] == 9:
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
# Dummy zero conditioning if we're not using inpainting or depth model. # Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1) return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
@ -565,7 +568,7 @@ class Processed:
self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt] self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
self.all_seeds = all_seeds or p.all_seeds or [self.seed] self.all_seeds = all_seeds or p.all_seeds or [self.seed]
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed] self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
self.infotexts = infotexts or [info] * len(images_list) self.infotexts = infotexts or [info]
self.version = program_version() self.version = program_version()
def js(self): def js(self):
@ -604,7 +607,7 @@ class Processed:
"version": self.version, "version": self.version,
} }
return json.dumps(obj, default=lambda o: None) return json.dumps(obj)
def infotext(self, p: StableDiffusionProcessing, index): def infotext(self, p: StableDiffusionProcessing, index):
return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
@ -625,9 +628,6 @@ class DecodedSamples(list):
def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
samples = DecodedSamples() samples = DecodedSamples()
if check_for_nans:
devices.test_for_nans(batch, "unet")
for i in range(batch.shape[0]): for i in range(batch.shape[0]):
sample = decode_first_stage(model, batch[i:i + 1])[0] sample = decode_first_stage(model, batch[i:i + 1])[0]
@ -703,53 +703,7 @@ def program_version():
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
""" if index is None:
this function is used to generate the infotext that is stored in the generated images, it's contains the parameters that are required to generate the imagee
Args:
p: StableDiffusionProcessing
all_prompts: list[str]
all_seeds: list[int]
all_subseeds: list[int]
comments: list[str]
iteration: int
position_in_batch: int
use_main_prompt: bool
index: int
all_negative_prompts: list[str]
Returns: str
Extra generation params
p.extra_generation_params dictionary allows for additional parameters to be added to the infotext
this can be use by the base webui or extensions.
To add a new entry, add a new key value pair, the dictionary key will be used as the key of the parameter in the infotext
the value generation_params can be defined as:
- str | None
- List[str|None]
- callable func(**kwargs) -> str | None
When defined as a string, it will be used as without extra processing; this is this most common use case.
Defining as a list allows for parameter that changes across images in the job, for example, the 'Seed' parameter.
The list should have the same length as the total number of images in the entire job.
Defining as a callable function allows parameter cannot be generated earlier or when extra logic is required.
For example 'Hires prompt', due to reasons the hr_prompt might be changed by process in the pipeline or extensions
and may vary across different images, defining as a static string or list would not work.
The function takes locals() as **kwargs, as such will have access to variables like 'p' and 'index'.
the base signature of the function should be:
func(**kwargs) -> str | None
optionally it can have additional arguments that will be used in the function:
func(p, index, **kwargs) -> str | None
note: for better future compatibility even though this function will have access to all variables in the locals(),
it is recommended to only use the arguments present in the function signature of create_infotext.
For actual implementation examples, see StableDiffusionProcessingTxt2Img.init > get_hr_prompt.
"""
if use_main_prompt:
index = 0
elif index is None:
index = position_in_batch + iteration * p.batch_size index = position_in_batch + iteration * p.batch_size
if all_negative_prompts is None: if all_negative_prompts is None:
@ -760,9 +714,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
token_merging_ratio = p.get_token_merging_ratio() token_merging_ratio = p.get_token_merging_ratio()
token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True) token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True)
prompt_text = p.main_prompt if use_main_prompt else all_prompts[index]
negative_prompt = p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]
uses_ensd = opts.eta_noise_seed_delta != 0 uses_ensd = opts.eta_noise_seed_delta != 0
if uses_ensd: if uses_ensd:
uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p) uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
@ -770,7 +721,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
generation_params = { generation_params = {
"Steps": p.steps, "Steps": p.steps,
"Sampler": p.sampler_name, "Sampler": p.sampler_name,
"Schedule type": p.scheduler,
"CFG scale": p.cfg_scale, "CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None), "Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index], "Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
@ -793,25 +743,17 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr, "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr,
"Init image hash": getattr(p, 'init_img_hash', None), "Init image hash": getattr(p, 'init_img_hash', None),
"RNG": opts.randn_source if opts.randn_source != "GPU" else None, "RNG": opts.randn_source if opts.randn_source != "GPU" else None,
"NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
"Tiling": "True" if p.tiling else None, "Tiling": "True" if p.tiling else None,
**p.extra_generation_params, **p.extra_generation_params,
"Version": program_version() if opts.add_version_to_infotext else None, "Version": program_version() if opts.add_version_to_infotext else None,
"User": p.user if opts.add_user_name_to_info else None, "User": p.user if opts.add_user_name_to_info else None,
} }
for key, value in generation_params.items():
try:
if isinstance(value, list):
generation_params[key] = value[index]
elif callable(value):
generation_params[key] = value(**locals())
except Exception:
errors.report(f'Error creating infotext for key "{key}"', exc_info=True)
generation_params[key] = None
generation_params_text = ", ".join([k if k == v else f'{k}: {infotext_utils.quote(v)}' for k, v in generation_params.items() if v is not None]) generation_params_text = ", ".join([k if k == v else f'{k}: {infotext_utils.quote(v)}' for k, v in generation_params.items() if v is not None])
negative_prompt_text = f"\nNegative prompt: {negative_prompt}" if negative_prompt else "" prompt_text = p.main_prompt if use_main_prompt else all_prompts[index]
negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else ""
return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip() return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
@ -840,11 +782,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio()) sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
# backwards compatibility, fix sampler and scheduler if invalid res = process_images_inner(p)
sd_samplers.fix_p_invalid_sampler_and_scheduler(p)
with profiling.Profiler():
res = process_images_inner(p)
finally: finally:
sd_models.apply_token_merging(p.sd_model, 0) sd_models.apply_token_merging(p.sd_model, 0)
@ -884,9 +822,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.refiner_checkpoint_info is None: if p.refiner_checkpoint_info is None:
raise Exception(f'Could not find checkpoint with name {p.refiner_checkpoint}') raise Exception(f'Could not find checkpoint with name {p.refiner_checkpoint}')
if hasattr(shared.sd_model, 'fix_dimensions'):
p.width, p.height = shared.sd_model.fix_dimensions(p.width, p.height)
p.sd_model_name = shared.sd_model.sd_checkpoint_info.name_for_extra p.sd_model_name = shared.sd_model.sd_checkpoint_info.name_for_extra
p.sd_model_hash = shared.sd_model.sd_model_hash p.sd_model_hash = shared.sd_model.sd_model_hash
p.sd_vae_name = sd_vae.get_loaded_vae_name() p.sd_vae_name = sd_vae.get_loaded_vae_name()
@ -895,7 +830,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.apply_circular(p.tiling)
modules.sd_hijack.model_hijack.clear_comments() modules.sd_hijack.model_hijack.clear_comments()
p.fill_fields_from_opts()
p.setup_prompts() p.setup_prompts()
if isinstance(seed, list): if isinstance(seed, list):
@ -945,8 +879,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
latent_channels = getattr(shared.sd_model, 'latent_channels', opt_C) p.rng = rng.ImageRNG((opt_C, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w)
p.rng = rng.ImageRNG((latent_channels, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w)
if p.scripts is not None: if p.scripts is not None:
p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds) p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
@ -963,22 +896,22 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.scripts is not None: if p.scripts is not None:
p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds) p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
p.setup_conds()
p.extra_generation_params.update(model_hijack.extra_generation_params)
# params.txt should be saved after scripts.process_batch, since the # params.txt should be saved after scripts.process_batch, since the
# infotext could be modified by that callback # infotext could be modified by that callback
# Example: a wildcard processed by process_batch sets an extra model # Example: a wildcard processed by process_batch sets an extra model
# strength, which is saved as "Model Strength: 1.0" in the infotext # strength, which is saved as "Model Strength: 1.0" in the infotext
if n == 0 and not cmd_opts.no_prompt_history: if n == 0:
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file: with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, []) processed = Processed(p, [])
file.write(processed.infotext(p, 0)) file.write(processed.infotext(p, 0))
p.setup_conds()
for comment in model_hijack.comments: for comment in model_hijack.comments:
p.comment(comment) p.comment(comment)
p.extra_generation_params.update(model_hijack.extra_generation_params)
if p.n_iter > 1: if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}" shared.state.job = f"Batch {n+1} out of {p.n_iter}"
@ -995,8 +928,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if getattr(samples_ddim, 'already_decoded', False): if getattr(samples_ddim, 'already_decoded', False):
x_samples_ddim = samples_ddim x_samples_ddim = samples_ddim
else: else:
devices.test_for_nans(samples_ddim, "unet")
if opts.sd_vae_decode_method != 'Full': if opts.sd_vae_decode_method != 'Full':
p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True) x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
@ -1175,7 +1106,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
hr_resize_y: int = 0 hr_resize_y: int = 0
hr_checkpoint_name: str = None hr_checkpoint_name: str = None
hr_sampler_name: str = None hr_sampler_name: str = None
hr_scheduler: str = None
hr_prompt: str = '' hr_prompt: str = ''
hr_negative_prompt: str = '' hr_negative_prompt: str = ''
force_task_id: str = None force_task_id: str = None
@ -1264,21 +1194,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
def get_hr_prompt(p, index, prompt_text, **kwargs): if tuple(self.hr_prompt) != tuple(self.prompt):
hr_prompt = p.all_hr_prompts[index] self.extra_generation_params["Hires prompt"] = self.hr_prompt
return hr_prompt if hr_prompt != prompt_text else None
def get_hr_negative_prompt(p, index, negative_prompt, **kwargs): if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt):
hr_negative_prompt = p.all_hr_negative_prompts[index] self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt
return hr_negative_prompt if hr_negative_prompt != negative_prompt else None
self.extra_generation_params["Hires prompt"] = get_hr_prompt
self.extra_generation_params["Hires negative prompt"] = get_hr_negative_prompt
self.extra_generation_params["Hires schedule type"] = None # to be set in sd_samplers_kdiffusion.py
if self.hr_scheduler is None:
self.hr_scheduler = self.scheduler
self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
if self.enable_hr and self.latent_scale_mode is None: if self.enable_hr and self.latent_scale_mode is None:
@ -1334,15 +1254,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
# here we generate an image normally # here we generate an image normally
x = self.rng.next() x = self.rng.next()
if self.scripts is not None:
self.scripts.process_before_every_sampling(
p=self,
x=x,
noise=x,
c=conditioning,
uc=unconditional_conditioning
)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
del x del x
@ -1443,13 +1354,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if self.scripts is not None: if self.scripts is not None:
self.scripts.before_hr(self) self.scripts.before_hr(self)
self.scripts.process_before_every_sampling(
p=self,
x=samples,
noise=noise,
c=self.hr_c,
uc=self.hr_uc,
)
samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
@ -1636,23 +1540,16 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
if self.inpaint_full_res: if self.inpaint_full_res:
self.mask_for_overlay = image_mask self.mask_for_overlay = image_mask
mask = image_mask.convert('L') mask = image_mask.convert('L')
crop_region = masking.get_crop_region_v2(mask, self.inpaint_full_res_padding) crop_region = masking.get_crop_region(mask, self.inpaint_full_res_padding)
if crop_region: crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height) x1, y1, x2, y2 = crop_region
x1, y1, x2, y2 = crop_region
mask = mask.crop(crop_region) mask = mask.crop(crop_region)
image_mask = images.resize_image(2, mask, self.width, self.height) image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1) self.paste_to = (x1, y1, x2-x1, y2-y1)
self.extra_generation_params["Inpaint area"] = "Only masked"
self.extra_generation_params["Masked area padding"] = self.inpaint_full_res_padding self.extra_generation_params["Inpaint area"] = "Only masked"
else: self.extra_generation_params["Masked area padding"] = self.inpaint_full_res_padding
crop_region = None
image_mask = None
self.mask_for_overlay = None
self.inpaint_full_res = False
massage = 'Unable to perform "Inpaint Only mask" because mask is blank, switch to img2img mode.'
model_hijack.comments.append(massage)
logging.info(massage)
else: else:
image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
np_mask = np.array(image_mask) np_mask = np.array(image_mask)
@ -1680,8 +1577,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image = images.resize_image(self.resize_mode, image, self.width, self.height) image = images.resize_image(self.resize_mode, image, self.width, self.height)
if image_mask is not None: if image_mask is not None:
if self.mask_for_overlay.size != (image.width, image.height):
self.mask_for_overlay = images.resize_image(self.resize_mode, self.mask_for_overlay, image.width, image.height)
image_masked = Image.new('RGBa', (image.width, image.height)) image_masked = Image.new('RGBa', (image.width, image.height))
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L'))) image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
@ -1740,10 +1635,10 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
latmask = latmask[0] latmask = latmask[0]
if self.mask_round: if self.mask_round:
latmask = np.around(latmask) latmask = np.around(latmask)
latmask = np.tile(latmask[None], (self.init_latent.shape[1], 1, 1)) latmask = np.tile(latmask[None], (4, 1, 1))
self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(devices.dtype) self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
self.nmask = torch.asarray(latmask).to(shared.device).type(devices.dtype) self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
# this needs to be fixed to be done in sample() using actual seeds for batches # this needs to be fixed to be done in sample() using actual seeds for batches
if self.inpainting_fill == 2: if self.inpainting_fill == 2:
@ -1763,14 +1658,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
x *= self.initial_noise_multiplier x *= self.initial_noise_multiplier
if self.scripts is not None:
self.scripts.process_before_every_sampling(
p=self,
x=self.init_latent,
noise=x,
c=conditioning,
uc=unconditional_conditioning
)
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
if self.mask is not None: if self.mask is not None:

View File

@ -26,13 +26,6 @@ class ScriptStripComments(scripts.Script):
p.main_prompt = strip_comments(p.main_prompt) p.main_prompt = strip_comments(p.main_prompt)
p.main_negative_prompt = strip_comments(p.main_negative_prompt) p.main_negative_prompt = strip_comments(p.main_negative_prompt)
if getattr(p, 'enable_hr', False):
p.all_hr_prompts = [strip_comments(x) for x in p.all_hr_prompts]
p.all_hr_negative_prompts = [strip_comments(x) for x in p.all_hr_negative_prompts]
p.hr_prompt = strip_comments(p.hr_prompt)
p.hr_negative_prompt = strip_comments(p.hr_negative_prompt)
def before_token_counter(params: script_callbacks.BeforeTokenCounterParams): def before_token_counter(params: script_callbacks.BeforeTokenCounterParams):
if not shared.opts.enable_prompt_comments: if not shared.opts.enable_prompt_comments:

View File

@ -1,45 +0,0 @@
import gradio as gr
from modules import scripts, sd_samplers, sd_schedulers, shared
from modules.infotext_utils import PasteField
from modules.ui_components import FormRow, FormGroup
class ScriptSampler(scripts.ScriptBuiltinUI):
section = "sampler"
def __init__(self):
self.steps = None
self.sampler_name = None
self.scheduler = None
def title(self):
return "Sampler"
def ui(self, is_img2img):
sampler_names = [x.name for x in sd_samplers.visible_samplers()]
scheduler_names = [x.label for x in sd_schedulers.schedulers]
if shared.opts.samplers_in_dropdown:
with FormRow(elem_id=f"sampler_selection_{self.tabname}"):
self.sampler_name = gr.Dropdown(label='Sampling method', elem_id=f"{self.tabname}_sampling", choices=sampler_names, value=sampler_names[0])
self.scheduler = gr.Dropdown(label='Schedule type', elem_id=f"{self.tabname}_scheduler", choices=scheduler_names, value=scheduler_names[0])
self.steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{self.tabname}_steps", label="Sampling steps", value=20)
else:
with FormGroup(elem_id=f"sampler_selection_{self.tabname}"):
self.steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{self.tabname}_steps", label="Sampling steps", value=20)
self.sampler_name = gr.Radio(label='Sampling method', elem_id=f"{self.tabname}_sampling", choices=sampler_names, value=sampler_names[0])
self.scheduler = gr.Dropdown(label='Schedule type', elem_id=f"{self.tabname}_scheduler", choices=scheduler_names, value=scheduler_names[0])
self.infotext_fields = [
PasteField(self.steps, "Steps", api="steps"),
PasteField(self.sampler_name, sd_samplers.get_sampler_from_infotext, api="sampler_name"),
PasteField(self.scheduler, sd_samplers.get_scheduler_from_infotext, api="scheduler"),
]
return self.steps, self.sampler_name, self.scheduler
def setup(self, p, steps, sampler_name, scheduler):
p.steps = steps
p.sampler_name = sampler_name
p.scheduler = scheduler

View File

@ -1,46 +0,0 @@
import torch
from modules import shared, ui_gradio_extensions
class Profiler:
def __init__(self):
if not shared.opts.profiling_enable:
self.profiler = None
return
activities = []
if "CPU" in shared.opts.profiling_activities:
activities.append(torch.profiler.ProfilerActivity.CPU)
if "CUDA" in shared.opts.profiling_activities:
activities.append(torch.profiler.ProfilerActivity.CUDA)
if not activities:
self.profiler = None
return
self.profiler = torch.profiler.profile(
activities=activities,
record_shapes=shared.opts.profiling_record_shapes,
profile_memory=shared.opts.profiling_profile_memory,
with_stack=shared.opts.profiling_with_stack
)
def __enter__(self):
if self.profiler:
self.profiler.__enter__()
return self
def __exit__(self, exc_type, exc, exc_tb):
if self.profiler:
shared.state.textinfo = "Finishing profile..."
self.profiler.__exit__(exc_type, exc, exc_tb)
self.profiler.export_chrome_trace(shared.opts.profiling_filename)
def webpath():
return ui_gradio_extensions.webpath(shared.opts.profiling_filename)

View File

@ -268,7 +268,7 @@ def get_multicond_learned_conditioning(model, prompts, steps, hires_steps=None,
class DictWithShape(dict): class DictWithShape(dict):
def __init__(self, x, shape=None): def __init__(self, x, shape):
super().__init__() super().__init__()
self.update(x) self.update(x)

View File

@ -34,7 +34,7 @@ def randn_local(seed, shape):
def randn_like(x): def randn_like(x):
"""Generate a tensor with random numbers from a normal distribution using the previously initialized generator. """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
Use either randn() or manual_seed() to initialize the generator.""" Use either randn() or manual_seed() to initialize the generator."""
@ -48,7 +48,7 @@ def randn_like(x):
def randn_without_seed(shape, generator=None): def randn_without_seed(shape, generator=None):
"""Generate a tensor with random numbers from a normal distribution using the previously initialized generator. """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
Use either randn() or manual_seed() to initialize the generator.""" Use either randn() or manual_seed() to initialize the generator."""

View File

@ -64,8 +64,8 @@ class RestrictedUnpickler(pickle.Unpickler):
raise Exception(f"global '{module}/{name}' is forbidden") raise Exception(f"global '{module}/{name}' is forbidden")
# Regular expression that accepts 'dirname/version', 'dirname/byteorder', 'dirname/data.pkl', '.data/serialization_id', and 'dirname/data/<number>' # Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/<number>'
allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|byteorder|.data/serialization_id|(data\.pkl))$") allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|(data\.pkl))$")
data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$") data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$")
def check_zip_filenames(filename, names): def check_zip_filenames(filename, names):

View File

@ -1,14 +1,13 @@
from __future__ import annotations
import dataclasses import dataclasses
import inspect import inspect
import os import os
from collections import namedtuple
from typing import Optional, Any from typing import Optional, Any
from fastapi import FastAPI from fastapi import FastAPI
from gradio import Blocks from gradio import Blocks
from modules import errors, timer, extensions, shared, util from modules import errors, timer
def report_exception(c, job): def report_exception(c, job):
@ -117,105 +116,7 @@ class BeforeTokenCounterParams:
is_positive: bool = True is_positive: bool = True
@dataclasses.dataclass ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
class ScriptCallback:
script: str
callback: any
name: str = "unnamed"
def add_callback(callbacks, fun, *, name=None, category='unknown', filename=None):
if filename is None:
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if stack else 'unknown file'
extension = extensions.find_extension(filename)
extension_name = extension.canonical_name if extension else 'base'
callback_name = f"{extension_name}/{os.path.basename(filename)}/{category}"
if name is not None:
callback_name += f'/{name}'
unique_callback_name = callback_name
for index in range(1000):
existing = any(x.name == unique_callback_name for x in callbacks)
if not existing:
break
unique_callback_name = f'{callback_name}-{index+1}'
callbacks.append(ScriptCallback(filename, fun, unique_callback_name))
def sort_callbacks(category, unordered_callbacks, *, enable_user_sort=True):
callbacks = unordered_callbacks.copy()
callback_lookup = {x.name: x for x in callbacks}
dependencies = {}
order_instructions = {}
for extension in extensions.extensions:
for order_instruction in extension.metadata.list_callback_order_instructions():
if order_instruction.name in callback_lookup:
if order_instruction.name not in order_instructions:
order_instructions[order_instruction.name] = []
order_instructions[order_instruction.name].append(order_instruction)
if order_instructions:
for callback in callbacks:
dependencies[callback.name] = []
for callback in callbacks:
for order_instruction in order_instructions.get(callback.name, []):
for after in order_instruction.after:
if after not in callback_lookup:
continue
dependencies[callback.name].append(after)
for before in order_instruction.before:
if before not in callback_lookup:
continue
dependencies[before].append(callback.name)
sorted_names = util.topological_sort(dependencies)
callbacks = [callback_lookup[x] for x in sorted_names]
if enable_user_sort:
for name in reversed(getattr(shared.opts, 'prioritized_callbacks_' + category, [])):
index = next((i for i, callback in enumerate(callbacks) if callback.name == name), None)
if index is not None:
callbacks.insert(0, callbacks.pop(index))
return callbacks
def ordered_callbacks(category, unordered_callbacks=None, *, enable_user_sort=True):
if unordered_callbacks is None:
unordered_callbacks = callback_map.get('callbacks_' + category, [])
if not enable_user_sort:
return sort_callbacks(category, unordered_callbacks, enable_user_sort=False)
callbacks = ordered_callbacks_map.get(category)
if callbacks is not None and len(callbacks) == len(unordered_callbacks):
return callbacks
callbacks = sort_callbacks(category, unordered_callbacks)
ordered_callbacks_map[category] = callbacks
return callbacks
def enumerate_callbacks():
for category, callbacks in callback_map.items():
if category.startswith('callbacks_'):
category = category[10:]
yield category, callbacks
callback_map = dict( callback_map = dict(
callbacks_app_started=[], callbacks_app_started=[],
callbacks_model_loaded=[], callbacks_model_loaded=[],
@ -240,18 +141,14 @@ callback_map = dict(
callbacks_before_token_counter=[], callbacks_before_token_counter=[],
) )
ordered_callbacks_map = {}
def clear_callbacks(): def clear_callbacks():
for callback_list in callback_map.values(): for callback_list in callback_map.values():
callback_list.clear() callback_list.clear()
ordered_callbacks_map.clear()
def app_started_callback(demo: Optional[Blocks], app: FastAPI): def app_started_callback(demo: Optional[Blocks], app: FastAPI):
for c in ordered_callbacks('app_started'): for c in callback_map['callbacks_app_started']:
try: try:
c.callback(demo, app) c.callback(demo, app)
timer.startup_timer.record(os.path.basename(c.script)) timer.startup_timer.record(os.path.basename(c.script))
@ -260,7 +157,7 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI):
def app_reload_callback(): def app_reload_callback():
for c in ordered_callbacks('on_reload'): for c in callback_map['callbacks_on_reload']:
try: try:
c.callback() c.callback()
except Exception: except Exception:
@ -268,7 +165,7 @@ def app_reload_callback():
def model_loaded_callback(sd_model): def model_loaded_callback(sd_model):
for c in ordered_callbacks('model_loaded'): for c in callback_map['callbacks_model_loaded']:
try: try:
c.callback(sd_model) c.callback(sd_model)
except Exception: except Exception:
@ -278,7 +175,7 @@ def model_loaded_callback(sd_model):
def ui_tabs_callback(): def ui_tabs_callback():
res = [] res = []
for c in ordered_callbacks('ui_tabs'): for c in callback_map['callbacks_ui_tabs']:
try: try:
res += c.callback() or [] res += c.callback() or []
except Exception: except Exception:
@ -288,7 +185,7 @@ def ui_tabs_callback():
def ui_train_tabs_callback(params: UiTrainTabParams): def ui_train_tabs_callback(params: UiTrainTabParams):
for c in ordered_callbacks('ui_train_tabs'): for c in callback_map['callbacks_ui_train_tabs']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -296,7 +193,7 @@ def ui_train_tabs_callback(params: UiTrainTabParams):
def ui_settings_callback(): def ui_settings_callback():
for c in ordered_callbacks('ui_settings'): for c in callback_map['callbacks_ui_settings']:
try: try:
c.callback() c.callback()
except Exception: except Exception:
@ -304,7 +201,7 @@ def ui_settings_callback():
def before_image_saved_callback(params: ImageSaveParams): def before_image_saved_callback(params: ImageSaveParams):
for c in ordered_callbacks('before_image_saved'): for c in callback_map['callbacks_before_image_saved']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -312,7 +209,7 @@ def before_image_saved_callback(params: ImageSaveParams):
def image_saved_callback(params: ImageSaveParams): def image_saved_callback(params: ImageSaveParams):
for c in ordered_callbacks('image_saved'): for c in callback_map['callbacks_image_saved']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -320,7 +217,7 @@ def image_saved_callback(params: ImageSaveParams):
def extra_noise_callback(params: ExtraNoiseParams): def extra_noise_callback(params: ExtraNoiseParams):
for c in ordered_callbacks('extra_noise'): for c in callback_map['callbacks_extra_noise']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -328,7 +225,7 @@ def extra_noise_callback(params: ExtraNoiseParams):
def cfg_denoiser_callback(params: CFGDenoiserParams): def cfg_denoiser_callback(params: CFGDenoiserParams):
for c in ordered_callbacks('cfg_denoiser'): for c in callback_map['callbacks_cfg_denoiser']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -336,7 +233,7 @@ def cfg_denoiser_callback(params: CFGDenoiserParams):
def cfg_denoised_callback(params: CFGDenoisedParams): def cfg_denoised_callback(params: CFGDenoisedParams):
for c in ordered_callbacks('cfg_denoised'): for c in callback_map['callbacks_cfg_denoised']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -344,7 +241,7 @@ def cfg_denoised_callback(params: CFGDenoisedParams):
def cfg_after_cfg_callback(params: AfterCFGCallbackParams): def cfg_after_cfg_callback(params: AfterCFGCallbackParams):
for c in ordered_callbacks('cfg_after_cfg'): for c in callback_map['callbacks_cfg_after_cfg']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -352,7 +249,7 @@ def cfg_after_cfg_callback(params: AfterCFGCallbackParams):
def before_component_callback(component, **kwargs): def before_component_callback(component, **kwargs):
for c in ordered_callbacks('before_component'): for c in callback_map['callbacks_before_component']:
try: try:
c.callback(component, **kwargs) c.callback(component, **kwargs)
except Exception: except Exception:
@ -360,7 +257,7 @@ def before_component_callback(component, **kwargs):
def after_component_callback(component, **kwargs): def after_component_callback(component, **kwargs):
for c in ordered_callbacks('after_component'): for c in callback_map['callbacks_after_component']:
try: try:
c.callback(component, **kwargs) c.callback(component, **kwargs)
except Exception: except Exception:
@ -368,7 +265,7 @@ def after_component_callback(component, **kwargs):
def image_grid_callback(params: ImageGridLoopParams): def image_grid_callback(params: ImageGridLoopParams):
for c in ordered_callbacks('image_grid'): for c in callback_map['callbacks_image_grid']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -376,7 +273,7 @@ def image_grid_callback(params: ImageGridLoopParams):
def infotext_pasted_callback(infotext: str, params: dict[str, Any]): def infotext_pasted_callback(infotext: str, params: dict[str, Any]):
for c in ordered_callbacks('infotext_pasted'): for c in callback_map['callbacks_infotext_pasted']:
try: try:
c.callback(infotext, params) c.callback(infotext, params)
except Exception: except Exception:
@ -384,7 +281,7 @@ def infotext_pasted_callback(infotext: str, params: dict[str, Any]):
def script_unloaded_callback(): def script_unloaded_callback():
for c in reversed(ordered_callbacks('script_unloaded')): for c in reversed(callback_map['callbacks_script_unloaded']):
try: try:
c.callback() c.callback()
except Exception: except Exception:
@ -392,7 +289,7 @@ def script_unloaded_callback():
def before_ui_callback(): def before_ui_callback():
for c in reversed(ordered_callbacks('before_ui')): for c in reversed(callback_map['callbacks_before_ui']):
try: try:
c.callback() c.callback()
except Exception: except Exception:
@ -402,7 +299,7 @@ def before_ui_callback():
def list_optimizers_callback(): def list_optimizers_callback():
res = [] res = []
for c in ordered_callbacks('list_optimizers'): for c in callback_map['callbacks_list_optimizers']:
try: try:
c.callback(res) c.callback(res)
except Exception: except Exception:
@ -414,7 +311,7 @@ def list_optimizers_callback():
def list_unets_callback(): def list_unets_callback():
res = [] res = []
for c in ordered_callbacks('list_unets'): for c in callback_map['callbacks_list_unets']:
try: try:
c.callback(res) c.callback(res)
except Exception: except Exception:
@ -424,13 +321,20 @@ def list_unets_callback():
def before_token_counter_callback(params: BeforeTokenCounterParams): def before_token_counter_callback(params: BeforeTokenCounterParams):
for c in ordered_callbacks('before_token_counter'): for c in callback_map['callbacks_before_token_counter']:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
report_exception(c, 'before_token_counter') report_exception(c, 'before_token_counter')
def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if stack else 'unknown file'
callbacks.append(ScriptCallback(filename, fun))
def remove_current_script_callbacks(): def remove_current_script_callbacks():
stack = [x for x in inspect.stack() if x.filename != __file__] stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if stack else 'unknown file' filename = stack[0].filename if stack else 'unknown file'
@ -439,38 +343,32 @@ def remove_current_script_callbacks():
for callback_list in callback_map.values(): for callback_list in callback_map.values():
for callback_to_remove in [cb for cb in callback_list if cb.script == filename]: for callback_to_remove in [cb for cb in callback_list if cb.script == filename]:
callback_list.remove(callback_to_remove) callback_list.remove(callback_to_remove)
for ordered_callbacks_list in ordered_callbacks_map.values():
for callback_to_remove in [cb for cb in ordered_callbacks_list if cb.script == filename]:
ordered_callbacks_list.remove(callback_to_remove)
def remove_callbacks_for_function(callback_func): def remove_callbacks_for_function(callback_func):
for callback_list in callback_map.values(): for callback_list in callback_map.values():
for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]: for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]:
callback_list.remove(callback_to_remove) callback_list.remove(callback_to_remove)
for ordered_callback_list in ordered_callbacks_map.values():
for callback_to_remove in [cb for cb in ordered_callback_list if cb.callback == callback_func]:
ordered_callback_list.remove(callback_to_remove)
def on_app_started(callback, *, name=None): def on_app_started(callback):
"""register a function to be called when the webui started, the gradio `Block` component and """register a function to be called when the webui started, the gradio `Block` component and
fastapi `FastAPI` object are passed as the arguments""" fastapi `FastAPI` object are passed as the arguments"""
add_callback(callback_map['callbacks_app_started'], callback, name=name, category='app_started') add_callback(callback_map['callbacks_app_started'], callback)
def on_before_reload(callback, *, name=None): def on_before_reload(callback):
"""register a function to be called just before the server reloads.""" """register a function to be called just before the server reloads."""
add_callback(callback_map['callbacks_on_reload'], callback, name=name, category='on_reload') add_callback(callback_map['callbacks_on_reload'], callback)
def on_model_loaded(callback, *, name=None): def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is """register a function to be called when the stable diffusion model is created; the model is
passed as an argument; this function is also called when the script is reloaded. """ passed as an argument; this function is also called when the script is reloaded. """
add_callback(callback_map['callbacks_model_loaded'], callback, name=name, category='model_loaded') add_callback(callback_map['callbacks_model_loaded'], callback)
def on_ui_tabs(callback, *, name=None): def on_ui_tabs(callback):
"""register a function to be called when the UI is creating new tabs. """register a function to be called when the UI is creating new tabs.
The function must either return a None, which means no new tabs to be added, or a list, where The function must either return a None, which means no new tabs to be added, or a list, where
each element is a tuple: each element is a tuple:
@ -480,71 +378,71 @@ def on_ui_tabs(callback, *, name=None):
title is tab text displayed to user in the UI title is tab text displayed to user in the UI
elem_id is HTML id for the tab elem_id is HTML id for the tab
""" """
add_callback(callback_map['callbacks_ui_tabs'], callback, name=name, category='ui_tabs') add_callback(callback_map['callbacks_ui_tabs'], callback)
def on_ui_train_tabs(callback, *, name=None): def on_ui_train_tabs(callback):
"""register a function to be called when the UI is creating new tabs for the train tab. """register a function to be called when the UI is creating new tabs for the train tab.
Create your new tabs with gr.Tab. Create your new tabs with gr.Tab.
""" """
add_callback(callback_map['callbacks_ui_train_tabs'], callback, name=name, category='ui_train_tabs') add_callback(callback_map['callbacks_ui_train_tabs'], callback)
def on_ui_settings(callback, *, name=None): def on_ui_settings(callback):
"""register a function to be called before UI settings are populated; add your settings """register a function to be called before UI settings are populated; add your settings
by using shared.opts.add_option(shared.OptionInfo(...)) """ by using shared.opts.add_option(shared.OptionInfo(...)) """
add_callback(callback_map['callbacks_ui_settings'], callback, name=name, category='ui_settings') add_callback(callback_map['callbacks_ui_settings'], callback)
def on_before_image_saved(callback, *, name=None): def on_before_image_saved(callback):
"""register a function to be called before an image is saved to a file. """register a function to be called before an image is saved to a file.
The callback is called with one argument: The callback is called with one argument:
- params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object. - params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object.
""" """
add_callback(callback_map['callbacks_before_image_saved'], callback, name=name, category='before_image_saved') add_callback(callback_map['callbacks_before_image_saved'], callback)
def on_image_saved(callback, *, name=None): def on_image_saved(callback):
"""register a function to be called after an image is saved to a file. """register a function to be called after an image is saved to a file.
The callback is called with one argument: The callback is called with one argument:
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing. - params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
""" """
add_callback(callback_map['callbacks_image_saved'], callback, name=name, category='image_saved') add_callback(callback_map['callbacks_image_saved'], callback)
def on_extra_noise(callback, *, name=None): def on_extra_noise(callback):
"""register a function to be called before adding extra noise in img2img or hires fix; """register a function to be called before adding extra noise in img2img or hires fix;
The callback is called with one argument: The callback is called with one argument:
- params: ExtraNoiseParams - contains noise determined by seed and latent representation of image - params: ExtraNoiseParams - contains noise determined by seed and latent representation of image
""" """
add_callback(callback_map['callbacks_extra_noise'], callback, name=name, category='extra_noise') add_callback(callback_map['callbacks_extra_noise'], callback)
def on_cfg_denoiser(callback, *, name=None): def on_cfg_denoiser(callback):
"""register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
The callback is called with one argument: The callback is called with one argument:
- params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details. - params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
""" """
add_callback(callback_map['callbacks_cfg_denoiser'], callback, name=name, category='cfg_denoiser') add_callback(callback_map['callbacks_cfg_denoiser'], callback)
def on_cfg_denoised(callback, *, name=None): def on_cfg_denoised(callback):
"""register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
The callback is called with one argument: The callback is called with one argument:
- params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details. - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details.
""" """
add_callback(callback_map['callbacks_cfg_denoised'], callback, name=name, category='cfg_denoised') add_callback(callback_map['callbacks_cfg_denoised'], callback)
def on_cfg_after_cfg(callback, *, name=None): def on_cfg_after_cfg(callback):
"""register a function to be called in the kdiffussion cfg_denoiser method after cfg calculations are completed. """register a function to be called in the kdiffussion cfg_denoiser method after cfg calculations are completed.
The callback is called with one argument: The callback is called with one argument:
- params: AfterCFGCallbackParams - parameters to be passed to the script for post-processing after cfg calculation. - params: AfterCFGCallbackParams - parameters to be passed to the script for post-processing after cfg calculation.
""" """
add_callback(callback_map['callbacks_cfg_after_cfg'], callback, name=name, category='cfg_after_cfg') add_callback(callback_map['callbacks_cfg_after_cfg'], callback)
def on_before_component(callback, *, name=None): def on_before_component(callback):
"""register a function to be called before a component is created. """register a function to be called before a component is created.
The callback is called with arguments: The callback is called with arguments:
- component - gradio component that is about to be created. - component - gradio component that is about to be created.
@ -553,61 +451,61 @@ def on_before_component(callback, *, name=None):
Use elem_id/label fields of kwargs to figure out which component it is. Use elem_id/label fields of kwargs to figure out which component it is.
This can be useful to inject your own components somewhere in the middle of vanilla UI. This can be useful to inject your own components somewhere in the middle of vanilla UI.
""" """
add_callback(callback_map['callbacks_before_component'], callback, name=name, category='before_component') add_callback(callback_map['callbacks_before_component'], callback)
def on_after_component(callback, *, name=None): def on_after_component(callback):
"""register a function to be called after a component is created. See on_before_component for more.""" """register a function to be called after a component is created. See on_before_component for more."""
add_callback(callback_map['callbacks_after_component'], callback, name=name, category='after_component') add_callback(callback_map['callbacks_after_component'], callback)
def on_image_grid(callback, *, name=None): def on_image_grid(callback):
"""register a function to be called before making an image grid. """register a function to be called before making an image grid.
The callback is called with one argument: The callback is called with one argument:
- params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified. - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified.
""" """
add_callback(callback_map['callbacks_image_grid'], callback, name=name, category='image_grid') add_callback(callback_map['callbacks_image_grid'], callback)
def on_infotext_pasted(callback, *, name=None): def on_infotext_pasted(callback):
"""register a function to be called before applying an infotext. """register a function to be called before applying an infotext.
The callback is called with two arguments: The callback is called with two arguments:
- infotext: str - raw infotext. - infotext: str - raw infotext.
- result: dict[str, any] - parsed infotext parameters. - result: dict[str, any] - parsed infotext parameters.
""" """
add_callback(callback_map['callbacks_infotext_pasted'], callback, name=name, category='infotext_pasted') add_callback(callback_map['callbacks_infotext_pasted'], callback)
def on_script_unloaded(callback, *, name=None): def on_script_unloaded(callback):
"""register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that """register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that
the script did should be reverted here""" the script did should be reverted here"""
add_callback(callback_map['callbacks_script_unloaded'], callback, name=name, category='script_unloaded') add_callback(callback_map['callbacks_script_unloaded'], callback)
def on_before_ui(callback, *, name=None): def on_before_ui(callback):
"""register a function to be called before the UI is created.""" """register a function to be called before the UI is created."""
add_callback(callback_map['callbacks_before_ui'], callback, name=name, category='before_ui') add_callback(callback_map['callbacks_before_ui'], callback)
def on_list_optimizers(callback, *, name=None): def on_list_optimizers(callback):
"""register a function to be called when UI is making a list of cross attention optimization options. """register a function to be called when UI is making a list of cross attention optimization options.
The function will be called with one argument, a list, and shall add objects of type modules.sd_hijack_optimizations.SdOptimization The function will be called with one argument, a list, and shall add objects of type modules.sd_hijack_optimizations.SdOptimization
to it.""" to it."""
add_callback(callback_map['callbacks_list_optimizers'], callback, name=name, category='list_optimizers') add_callback(callback_map['callbacks_list_optimizers'], callback)
def on_list_unets(callback, *, name=None): def on_list_unets(callback):
"""register a function to be called when UI is making a list of alternative options for unet. """register a function to be called when UI is making a list of alternative options for unet.
The function will be called with one argument, a list, and shall add objects of type modules.sd_unet.SdUnetOption to it.""" The function will be called with one argument, a list, and shall add objects of type modules.sd_unet.SdUnetOption to it."""
add_callback(callback_map['callbacks_list_unets'], callback, name=name, category='list_unets') add_callback(callback_map['callbacks_list_unets'], callback)
def on_before_token_counter(callback, *, name=None): def on_before_token_counter(callback):
"""register a function to be called when UI is counting tokens for a prompt. """register a function to be called when UI is counting tokens for a prompt.
The function will be called with one argument of type BeforeTokenCounterParams, and should modify its fields if necessary.""" The function will be called with one argument of type BeforeTokenCounterParams, and should modify its fields if necessary."""
add_callback(callback_map['callbacks_before_token_counter'], callback, name=name, category='before_token_counter') add_callback(callback_map['callbacks_before_token_counter'], callback)

View File

@ -4,15 +4,11 @@ import importlib.util
from modules import errors from modules import errors
loaded_scripts = {}
def load_module(path): def load_module(path):
module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path) module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path)
module = importlib.util.module_from_spec(module_spec) module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module) module_spec.loader.exec_module(module)
loaded_scripts[path] = module
return module return module

View File

@ -7,9 +7,7 @@ from dataclasses import dataclass
import gradio as gr import gradio as gr
from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, errors, timer, util from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, errors, timer
topological_sort = util.topological_sort
AlwaysVisible = object() AlwaysVisible = object()
@ -94,7 +92,7 @@ class Script:
"""If true, the script setup will only be run in Gradio UI, not in API""" """If true, the script setup will only be run in Gradio UI, not in API"""
controls = None controls = None
"""A list of controls returned by the ui().""" """A list of controls retured by the ui()."""
def title(self): def title(self):
"""this function should return the title of the script. This is what will be displayed in the dropdown menu.""" """this function should return the title of the script. This is what will be displayed in the dropdown menu."""
@ -111,7 +109,7 @@ class Script:
def show(self, is_img2img): def show(self, is_img2img):
""" """
is_img2img is True if this function is called for the img2img interface, and False otherwise is_img2img is True if this function is called for the img2img interface, and Fasle otherwise
This function should return: This function should return:
- False if the script should not be shown in UI at all - False if the script should not be shown in UI at all
@ -140,6 +138,7 @@ class Script:
""" """
pass pass
def before_process(self, p, *args): def before_process(self, p, *args):
""" """
This function is called very early during processing begins for AlwaysVisible scripts. This function is called very early during processing begins for AlwaysVisible scripts.
@ -187,13 +186,6 @@ class Script:
""" """
pass pass
def process_before_every_sampling(self, p, *args, **kwargs):
"""
Similar to process(), called before every sampling.
If you use high-res fix, this will be called two times.
"""
pass
def process_batch(self, p, *args, **kwargs): def process_batch(self, p, *args, **kwargs):
""" """
Same as process(), but called for every batch. Same as process(), but called for every batch.
@ -359,9 +351,6 @@ class ScriptBuiltinUI(Script):
return f'{tabname}{item_id}' return f'{tabname}{item_id}'
def show(self, is_img2img):
return AlwaysVisible
current_basedir = paths.script_path current_basedir = paths.script_path
@ -380,6 +369,29 @@ scripts_data = []
postprocessing_scripts_data = [] postprocessing_scripts_data = []
ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"]) ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"])
def topological_sort(dependencies):
"""Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies.
Ignores errors relating to missing dependeencies or circular dependencies
"""
visited = {}
result = []
def inner(name):
visited[name] = True
for dep in dependencies.get(name, []):
if dep in dependencies and dep not in visited:
inner(dep)
result.append(name)
for depname in dependencies:
if depname not in visited:
inner(depname)
return result
@dataclass @dataclass
class ScriptWithDependencies: class ScriptWithDependencies:
@ -550,25 +562,6 @@ class ScriptRunner:
self.paste_field_names = [] self.paste_field_names = []
self.inputs = [None] self.inputs = [None]
self.callback_map = {}
self.callback_names = [
'before_process',
'process',
'before_process_batch',
'after_extra_networks_activate',
'process_batch',
'postprocess',
'postprocess_batch',
'postprocess_batch_list',
'post_sample',
'on_mask_blend',
'postprocess_image',
'postprocess_maskoverlay',
'postprocess_image_after_composite',
'before_component',
'after_component',
]
self.on_before_component_elem_id = {} self.on_before_component_elem_id = {}
"""dict of callbacks to be called before an element is created; key=elem_id, value=list of callbacks""" """dict of callbacks to be called before an element is created; key=elem_id, value=list of callbacks"""
@ -607,8 +600,6 @@ class ScriptRunner:
self.scripts.append(script) self.scripts.append(script)
self.selectable_scripts.append(script) self.selectable_scripts.append(script)
self.callback_map.clear()
self.apply_on_before_component_callbacks() self.apply_on_before_component_callbacks()
def apply_on_before_component_callbacks(self): def apply_on_before_component_callbacks(self):
@ -746,17 +737,12 @@ class ScriptRunner:
def onload_script_visibility(params): def onload_script_visibility(params):
title = params.get('Script', None) title = params.get('Script', None)
if title: if title:
try: title_index = self.titles.index(title)
title_index = self.titles.index(title) visibility = title_index == self.script_load_ctr
visibility = title_index == self.script_load_ctr self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles)
self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles) return gr.update(visible=visibility)
return gr.update(visible=visibility) else:
except ValueError: return gr.update(visible=False)
params['Script'] = None
massage = f'Cannot find Script: "{title}"'
print(massage)
gr.Warning(massage)
return gr.update(visible=False)
self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None')))) self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None'))))
self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts]) self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts])
@ -783,42 +769,8 @@ class ScriptRunner:
return processed return processed
def list_scripts_for_method(self, method_name):
if method_name in ('before_component', 'after_component'):
return self.scripts
else:
return self.alwayson_scripts
def create_ordered_callbacks_list(self, method_name, *, enable_user_sort=True):
script_list = self.list_scripts_for_method(method_name)
category = f'script_{method_name}'
callbacks = []
for script in script_list:
if getattr(script.__class__, method_name, None) == getattr(Script, method_name, None):
continue
script_callbacks.add_callback(callbacks, script, category=category, name=script.__class__.__name__, filename=script.filename)
return script_callbacks.sort_callbacks(category, callbacks, enable_user_sort=enable_user_sort)
def ordered_callbacks(self, method_name, *, enable_user_sort=True):
script_list = self.list_scripts_for_method(method_name)
category = f'script_{method_name}'
scrpts_len, callbacks = self.callback_map.get(category, (-1, None))
if callbacks is None or scrpts_len != len(script_list):
callbacks = self.create_ordered_callbacks_list(method_name, enable_user_sort=enable_user_sort)
self.callback_map[category] = len(script_list), callbacks
return callbacks
def ordered_scripts(self, method_name):
return [x.callback for x in self.ordered_callbacks(method_name)]
def before_process(self, p): def before_process(self, p):
for script in self.ordered_scripts('before_process'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.before_process(p, *script_args) script.before_process(p, *script_args)
@ -826,23 +778,15 @@ class ScriptRunner:
errors.report(f"Error running before_process: {script.filename}", exc_info=True) errors.report(f"Error running before_process: {script.filename}", exc_info=True)
def process(self, p): def process(self, p):
for script in self.ordered_scripts('process'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.process(p, *script_args) script.process(p, *script_args)
except Exception: except Exception:
errors.report(f"Error running process: {script.filename}", exc_info=True) errors.report(f"Error running process: {script.filename}", exc_info=True)
def process_before_every_sampling(self, p, **kwargs):
for script in self.ordered_scripts('process_before_every_sampling'):
try:
script_args = p.script_args[script.args_from:script.args_to]
script.process_before_every_sampling(p, *script_args, **kwargs)
except Exception:
errors.report(f"Error running process_before_every_sampling: {script.filename}", exc_info=True)
def before_process_batch(self, p, **kwargs): def before_process_batch(self, p, **kwargs):
for script in self.ordered_scripts('before_process_batch'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.before_process_batch(p, *script_args, **kwargs) script.before_process_batch(p, *script_args, **kwargs)
@ -850,7 +794,7 @@ class ScriptRunner:
errors.report(f"Error running before_process_batch: {script.filename}", exc_info=True) errors.report(f"Error running before_process_batch: {script.filename}", exc_info=True)
def after_extra_networks_activate(self, p, **kwargs): def after_extra_networks_activate(self, p, **kwargs):
for script in self.ordered_scripts('after_extra_networks_activate'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.after_extra_networks_activate(p, *script_args, **kwargs) script.after_extra_networks_activate(p, *script_args, **kwargs)
@ -858,7 +802,7 @@ class ScriptRunner:
errors.report(f"Error running after_extra_networks_activate: {script.filename}", exc_info=True) errors.report(f"Error running after_extra_networks_activate: {script.filename}", exc_info=True)
def process_batch(self, p, **kwargs): def process_batch(self, p, **kwargs):
for script in self.ordered_scripts('process_batch'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.process_batch(p, *script_args, **kwargs) script.process_batch(p, *script_args, **kwargs)
@ -866,7 +810,7 @@ class ScriptRunner:
errors.report(f"Error running process_batch: {script.filename}", exc_info=True) errors.report(f"Error running process_batch: {script.filename}", exc_info=True)
def postprocess(self, p, processed): def postprocess(self, p, processed):
for script in self.ordered_scripts('postprocess'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.postprocess(p, processed, *script_args) script.postprocess(p, processed, *script_args)
@ -874,7 +818,7 @@ class ScriptRunner:
errors.report(f"Error running postprocess: {script.filename}", exc_info=True) errors.report(f"Error running postprocess: {script.filename}", exc_info=True)
def postprocess_batch(self, p, images, **kwargs): def postprocess_batch(self, p, images, **kwargs):
for script in self.ordered_scripts('postprocess_batch'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_batch(p, *script_args, images=images, **kwargs) script.postprocess_batch(p, *script_args, images=images, **kwargs)
@ -882,7 +826,7 @@ class ScriptRunner:
errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True) errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True)
def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, **kwargs): def postprocess_batch_list(self, p, pp: PostprocessBatchListArgs, **kwargs):
for script in self.ordered_scripts('postprocess_batch_list'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_batch_list(p, pp, *script_args, **kwargs) script.postprocess_batch_list(p, pp, *script_args, **kwargs)
@ -890,7 +834,7 @@ class ScriptRunner:
errors.report(f"Error running postprocess_batch_list: {script.filename}", exc_info=True) errors.report(f"Error running postprocess_batch_list: {script.filename}", exc_info=True)
def post_sample(self, p, ps: PostSampleArgs): def post_sample(self, p, ps: PostSampleArgs):
for script in self.ordered_scripts('post_sample'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.post_sample(p, ps, *script_args) script.post_sample(p, ps, *script_args)
@ -898,7 +842,7 @@ class ScriptRunner:
errors.report(f"Error running post_sample: {script.filename}", exc_info=True) errors.report(f"Error running post_sample: {script.filename}", exc_info=True)
def on_mask_blend(self, p, mba: MaskBlendArgs): def on_mask_blend(self, p, mba: MaskBlendArgs):
for script in self.ordered_scripts('on_mask_blend'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.on_mask_blend(p, mba, *script_args) script.on_mask_blend(p, mba, *script_args)
@ -906,7 +850,7 @@ class ScriptRunner:
errors.report(f"Error running post_sample: {script.filename}", exc_info=True) errors.report(f"Error running post_sample: {script.filename}", exc_info=True)
def postprocess_image(self, p, pp: PostprocessImageArgs): def postprocess_image(self, p, pp: PostprocessImageArgs):
for script in self.ordered_scripts('postprocess_image'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_image(p, pp, *script_args) script.postprocess_image(p, pp, *script_args)
@ -914,7 +858,7 @@ class ScriptRunner:
errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True) errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True)
def postprocess_maskoverlay(self, p, ppmo: PostProcessMaskOverlayArgs): def postprocess_maskoverlay(self, p, ppmo: PostProcessMaskOverlayArgs):
for script in self.ordered_scripts('postprocess_maskoverlay'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_maskoverlay(p, ppmo, *script_args) script.postprocess_maskoverlay(p, ppmo, *script_args)
@ -922,7 +866,7 @@ class ScriptRunner:
errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True) errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True)
def postprocess_image_after_composite(self, p, pp: PostprocessImageArgs): def postprocess_image_after_composite(self, p, pp: PostprocessImageArgs):
for script in self.ordered_scripts('postprocess_image_after_composite'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_image_after_composite(p, pp, *script_args) script.postprocess_image_after_composite(p, pp, *script_args)
@ -936,7 +880,7 @@ class ScriptRunner:
except Exception: except Exception:
errors.report(f"Error running on_before_component: {script.filename}", exc_info=True) errors.report(f"Error running on_before_component: {script.filename}", exc_info=True)
for script in self.ordered_scripts('before_component'): for script in self.scripts:
try: try:
script.before_component(component, **kwargs) script.before_component(component, **kwargs)
except Exception: except Exception:
@ -949,7 +893,7 @@ class ScriptRunner:
except Exception: except Exception:
errors.report(f"Error running on_after_component: {script.filename}", exc_info=True) errors.report(f"Error running on_after_component: {script.filename}", exc_info=True)
for script in self.ordered_scripts('after_component'): for script in self.scripts:
try: try:
script.after_component(component, **kwargs) script.after_component(component, **kwargs)
except Exception: except Exception:
@ -977,7 +921,7 @@ class ScriptRunner:
self.scripts[si].args_to = args_to self.scripts[si].args_to = args_to
def before_hr(self, p): def before_hr(self, p):
for script in self.ordered_scripts('before_hr'): for script in self.alwayson_scripts:
try: try:
script_args = p.script_args[script.args_from:script.args_to] script_args = p.script_args[script.args_from:script.args_to]
script.before_hr(p, *script_args) script.before_hr(p, *script_args)
@ -985,7 +929,7 @@ class ScriptRunner:
errors.report(f"Error running before_hr: {script.filename}", exc_info=True) errors.report(f"Error running before_hr: {script.filename}", exc_info=True)
def setup_scrips(self, p, *, is_ui=True): def setup_scrips(self, p, *, is_ui=True):
for script in self.ordered_scripts('setup'): for script in self.alwayson_scripts:
if not is_ui and script.setup_for_ui_only: if not is_ui and script.setup_for_ui_only:
continue continue

View File

@ -143,7 +143,6 @@ class ScriptPostprocessingRunner:
self.initialize_scripts(modules.scripts.postprocessing_scripts_data) self.initialize_scripts(modules.scripts.postprocessing_scripts_data)
scripts_order = shared.opts.postprocessing_operation_order scripts_order = shared.opts.postprocessing_operation_order
scripts_filter_out = set(shared.opts.postprocessing_disable_in_extras)
def script_score(name): def script_score(name):
for i, possible_match in enumerate(scripts_order): for i, possible_match in enumerate(scripts_order):
@ -152,10 +151,9 @@ class ScriptPostprocessingRunner:
return len(self.scripts) return len(self.scripts)
filtered_scripts = [script for script in self.scripts if script.name not in scripts_filter_out] script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(self.scripts)}
script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(filtered_scripts)}
return sorted(filtered_scripts, key=lambda x: script_scores[x.name]) return sorted(self.scripts, key=lambda x: script_scores[x.name])
def setup_ui(self): def setup_ui(self):
inputs = [] inputs = []

View File

@ -35,7 +35,7 @@ class EmphasisIgnore(Emphasis):
class EmphasisOriginal(Emphasis): class EmphasisOriginal(Emphasis):
name = "Original" name = "Original"
description = "the original emphasis implementation" description = "the orginal emphasis implementation"
def after_transformers(self): def after_transformers(self):
original_mean = self.z.mean() original_mean = self.z.mean()
@ -48,7 +48,7 @@ class EmphasisOriginal(Emphasis):
class EmphasisOriginalNoNorm(EmphasisOriginal): class EmphasisOriginalNoNorm(EmphasisOriginal):
name = "No norm" name = "No norm"
description = "same as original, but without normalization (seems to work better for SDXL)" description = "same as orginal, but without normalization (seems to work better for SDXL)"
def after_transformers(self): def after_transformers(self):
self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape) self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape)

View File

@ -325,10 +325,7 @@ class StableDiffusionModelHijack:
if self.clip is None: if self.clip is None:
return "-", "-" return "-", "-"
if hasattr(self.clip, 'get_token_count'): _, token_count = self.clip.process_texts([text])
token_count = self.clip.get_token_count(text)
else:
_, token_count = self.clip.process_texts([text])
return token_count, self.clip.get_target_prompt_token_count(token_count) return token_count, self.clip.get_target_prompt_token_count(token_count)
@ -359,28 +356,13 @@ class EmbeddingsWithFixes(torch.nn.Module):
vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
emb = devices.cond_cast_unet(vec) emb = devices.cond_cast_unet(vec)
emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0]) emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]]).to(dtype=inputs_embeds.dtype) tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]])
vecs.append(tensor) vecs.append(tensor)
return torch.stack(vecs) return torch.stack(vecs)
class TextualInversionEmbeddings(torch.nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, textual_inversion_key='clip_l', **kwargs):
super().__init__(num_embeddings, embedding_dim, **kwargs)
self.embeddings = model_hijack
self.textual_inversion_key = textual_inversion_key
@property
def wrapped(self):
return super().forward
def forward(self, input_ids):
return EmbeddingsWithFixes.forward(self, input_ids)
def add_circular_option_to_conv_2d(): def add_circular_option_to_conv_2d():
conv2d_constructor = torch.nn.Conv2d.__init__ conv2d_constructor = torch.nn.Conv2d.__init__

View File

@ -23,25 +23,28 @@ class PromptChunk:
PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding']) PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding'])
"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt """An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt
chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally chunk. Thos objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally
are applied by sd_hijack.EmbeddingsWithFixes's forward function.""" are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
class TextConditionalModel(torch.nn.Module): class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
def __init__(self): """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
have unlimited prompt length and assign weights to tokens in prompt.
"""
def __init__(self, wrapped, hijack):
super().__init__() super().__init__()
self.hijack = sd_hijack.model_hijack self.wrapped = wrapped
"""Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
depending on model."""
self.hijack: sd_hijack.StableDiffusionModelHijack = hijack
self.chunk_length = 75 self.chunk_length = 75
self.is_trainable = False self.is_trainable = getattr(wrapped, 'is_trainable', False)
self.input_key = 'txt' self.input_key = getattr(wrapped, 'input_key', 'txt')
self.return_pooled = False self.legacy_ucg_val = None
self.comma_token = None
self.id_start = None
self.id_end = None
self.id_pad = None
def empty_chunk(self): def empty_chunk(self):
"""creates an empty PromptChunk and returns it""" """creates an empty PromptChunk and returns it"""
@ -63,7 +66,7 @@ class TextConditionalModel(torch.nn.Module):
def encode_with_transformers(self, tokens): def encode_with_transformers(self, tokens):
""" """
converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens; converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens;
All python lists with tokens are assumed to have same length, usually 77. All python lists with tokens are assumed to have same length, usually 77.
if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on
model - can be 768 and 1024. model - can be 768 and 1024.
@ -133,7 +136,7 @@ class TextConditionalModel(torch.nn.Module):
if token == self.comma_token: if token == self.comma_token:
last_comma = len(chunk.tokens) last_comma = len(chunk.tokens)
# this is when we are at the end of allotted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
# is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next. # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack: elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack:
break_location = last_comma + 1 break_location = last_comma + 1
@ -203,10 +206,14 @@ class TextConditionalModel(torch.nn.Module):
be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280. be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280.
An example shape returned by this function can be: (2, 77, 768). An example shape returned by this function can be: (2, 77, 768).
For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values. For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values.
Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one element Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream" is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
""" """
if opts.use_old_emphasis_implementation:
import modules.sd_hijack_clip_old
return modules.sd_hijack_clip_old.forward_old(self, texts)
batch_chunks, token_count = self.process_texts(texts) batch_chunks, token_count = self.process_texts(texts)
used_embeddings = {} used_embeddings = {}
@ -223,7 +230,7 @@ class TextConditionalModel(torch.nn.Module):
for fixes in self.hijack.fixes: for fixes in self.hijack.fixes:
for _position, embedding in fixes: for _position, embedding in fixes:
used_embeddings[embedding.name] = embedding used_embeddings[embedding.name] = embedding
devices.torch_npu_set_device()
z = self.process_tokens(tokens, multipliers) z = self.process_tokens(tokens, multipliers)
zs.append(z) zs.append(z)
@ -245,7 +252,7 @@ class TextConditionalModel(torch.nn.Module):
if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original": if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original":
self.hijack.extra_generation_params["Emphasis"] = opts.emphasis self.hijack.extra_generation_params["Emphasis"] = opts.emphasis
if self.return_pooled: if getattr(self.wrapped, 'return_pooled', False):
return torch.hstack(zs), zs[0].pooled return torch.hstack(zs), zs[0].pooled
else: else:
return torch.hstack(zs) return torch.hstack(zs)
@ -285,34 +292,6 @@ class TextConditionalModel(torch.nn.Module):
return z return z
class FrozenCLIPEmbedderWithCustomWordsBase(TextConditionalModel):
"""A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
have unlimited prompt length and assign weights to tokens in prompt.
"""
def __init__(self, wrapped, hijack):
super().__init__()
self.hijack = hijack
self.wrapped = wrapped
"""Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
depending on model."""
self.is_trainable = getattr(wrapped, 'is_trainable', False)
self.input_key = getattr(wrapped, 'input_key', 'txt')
self.return_pooled = getattr(self.wrapped, 'return_pooled', False)
self.legacy_ucg_val = None # for sgm codebase
def forward(self, texts):
if opts.use_old_emphasis_implementation:
import modules.sd_hijack_clip_old
return modules.sd_hijack_clip_old.forward_old(self, texts)
return super().forward(texts)
class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
def __init__(self, wrapped, hijack): def __init__(self, wrapped, hijack):
super().__init__(wrapped, hijack) super().__init__(wrapped, hijack)
@ -374,9 +353,7 @@ class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCustomWords
def encode_with_transformers(self, tokens): def encode_with_transformers(self, tokens):
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=self.wrapped.layer == "hidden") outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=self.wrapped.layer == "hidden")
if opts.sdxl_clip_l_skip is True: if self.wrapped.layer == "last":
z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers]
elif self.wrapped.layer == "last":
z = outputs.last_hidden_state z = outputs.last_hidden_state
else: else:
z = outputs.hidden_states[self.wrapped.layer_idx] z = outputs.hidden_states[self.wrapped.layer_idx]

View File

@ -486,8 +486,7 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
k_in = self.to_k(context_k) k_in = self.to_k(context_k)
v_in = self.to_v(context_v) v_in = self.to_v(context_v)
q, k, v = (t.reshape(t.shape[0], t.shape[1], h, -1) for t in (q_in, k_in, v_in)) q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in))
del q_in, k_in, v_in del q_in, k_in, v_in
dtype = q.dtype dtype = q.dtype
@ -498,8 +497,7 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
out = out.to(dtype) out = out.to(dtype)
b, n, h, d = out.shape out = rearrange(out, 'b n h d -> b n (h d)', h=h)
out = out.reshape(b, n, h * d)
return self.to_out(out) return self.to_out(out)

View File

@ -1,7 +1,5 @@
import torch import torch
from packaging import version from packaging import version
from einops import repeat
import math
from modules import devices from modules import devices
from modules.sd_hijack_utils import CondFunc from modules.sd_hijack_utils import CondFunc
@ -38,7 +36,7 @@ th = TorchHijackForUnet()
# Below are monkey patches to enable upcasting a float16 UNet for float32 sampling # Below are monkey patches to enable upcasting a float16 UNet for float32 sampling
def apply_model(orig_func, self, x_noisy, t, cond, **kwargs): def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
"""Always make sure inputs to unet are in correct dtype."""
if isinstance(cond, dict): if isinstance(cond, dict):
for y in cond.keys(): for y in cond.keys():
if isinstance(cond[y], list): if isinstance(cond[y], list):
@ -47,59 +45,7 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y] cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
with devices.autocast(): with devices.autocast():
result = orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs) return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
if devices.unet_needs_upcast:
return result.float()
else:
return result
# Monkey patch to create timestep embed tensor on device, avoiding a block.
def timestep_embedding(_, timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half
)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
else:
embedding = repeat(timesteps, 'b -> b d', d=dim)
return embedding
# Monkey patch to SpatialTransformer removing unnecessary contiguous calls.
# Prevents a lot of unnecessary aten::copy_ calls
def spatial_transformer_forward(_, self, x: torch.Tensor, context=None):
# note: if no context is given, cross-attention defaults to self-attention
if not isinstance(context, list):
context = [context]
b, c, h, w = x.shape
x_in = x
x = self.norm(x)
if not self.use_linear:
x = self.proj_in(x)
x = x.permute(0, 2, 3, 1).reshape(b, h * w, c)
if self.use_linear:
x = self.proj_in(x)
for i, block in enumerate(self.transformer_blocks):
x = block(x, context=context[i])
if self.use_linear:
x = self.proj_out(x)
x = x.view(b, h, w, c).permute(0, 3, 1, 2)
if not self.use_linear:
x = self.proj_out(x)
return x + x_in
class GELUHijack(torch.nn.GELU, torch.nn.Module): class GELUHijack(torch.nn.GELU, torch.nn.Module):
@ -118,15 +64,12 @@ def hijack_ddpm_edit():
if not ddpm_edit_hijack: if not ddpm_edit_hijack:
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond) CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond) CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model) ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding)
CondFunc('ldm.modules.attention.SpatialTransformer.forward', spatial_transformer_forward)
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available(): if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available():
CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast) CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast) CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
@ -138,17 +81,5 @@ CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_s
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond) CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond) CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model) CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast)
CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model) CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
def timestep_embedding_cast_result(orig_func, timesteps, *args, **kwargs):
if devices.unet_needs_upcast and timesteps.dtype == torch.int64:
dtype = torch.float32
else:
dtype = devices.dtype_unet
return orig_func(timesteps, *args, **kwargs).to(dtype=dtype)
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result)
CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result)

View File

@ -1,11 +1,7 @@
import importlib import importlib
always_true_func = lambda *args, **kwargs: True
class CondFunc: class CondFunc:
def __new__(cls, orig_func, sub_func, cond_func=always_true_func): def __new__(cls, orig_func, sub_func, cond_func):
self = super(CondFunc, cls).__new__(cls) self = super(CondFunc, cls).__new__(cls)
if isinstance(orig_func, str): if isinstance(orig_func, str):
func_path = orig_func.split('.') func_path = orig_func.split('.')
@ -24,13 +20,13 @@ class CondFunc:
print(f"Warning: Failed to resolve {orig_func} for CondFunc hijack") print(f"Warning: Failed to resolve {orig_func} for CondFunc hijack")
pass pass
self.__init__(orig_func, sub_func, cond_func) self.__init__(orig_func, sub_func, cond_func)
return lambda *args, **kwargs: self(*args, **kwargs) return lambda *args, **kwargs: self(*args, **kwargs)
def __init__(self, orig_func, sub_func, cond_func): def __init__(self, orig_func, sub_func, cond_func):
self.__orig_func = orig_func self.__orig_func = orig_func
self.__sub_func = sub_func self.__sub_func = sub_func
self.__cond_func = cond_func self.__cond_func = cond_func
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
return self.__sub_func(self.__orig_func, *args, **kwargs) return self.__sub_func(self.__orig_func, *args, **kwargs)
else: else:
return self.__orig_func(*args, **kwargs) return self.__orig_func(*args, **kwargs)

View File

@ -1,17 +1,18 @@
import collections import collections
import importlib import os.path
import os
import sys import sys
import threading import threading
import enum
import torch import torch
import re import re
import safetensors.torch import safetensors.torch
from omegaconf import OmegaConf, ListConfig from omegaconf import OmegaConf, ListConfig
from os import mkdir
from urllib import request from urllib import request
import ldm.modules.midas as midas import ldm.modules.midas as midas
from ldm.util import instantiate_from_config
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches
from modules.timer import Timer from modules.timer import Timer
from modules.shared import opts from modules.shared import opts
@ -27,14 +28,6 @@ checkpoint_alisases = checkpoint_aliases # for compatibility with old name
checkpoints_loaded = collections.OrderedDict() checkpoints_loaded = collections.OrderedDict()
class ModelType(enum.Enum):
SD1 = 1
SD2 = 2
SDXL = 3
SSD = 4
SD3 = 5
def replace_key(d, key, new_key, value): def replace_key(d, key, new_key, value):
keys = list(d.keys()) keys = list(d.keys())
@ -157,12 +150,10 @@ def list_models():
cmd_ckpt = shared.cmd_opts.ckpt cmd_ckpt = shared.cmd_opts.ckpt
if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt): if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
model_url = None model_url = None
expected_sha256 = None
else: else:
model_url = f"{shared.hf_endpoint}/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
expected_sha256 = '6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa'
model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"], hash_prefix=expected_sha256) model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
if os.path.exists(cmd_ckpt): if os.path.exists(cmd_ckpt):
checkpoint_info = CheckpointInfo(cmd_ckpt) checkpoint_info = CheckpointInfo(cmd_ckpt)
@ -290,21 +281,17 @@ def read_metadata_from_safetensors(filename):
json_start = file.read(2) json_start = file.read(2)
assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file" assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file"
json_data = json_start + file.read(metadata_len-2)
json_obj = json.loads(json_data)
res = {} res = {}
for k, v in json_obj.get("__metadata__", {}).items():
try: res[k] = v
json_data = json_start + file.read(metadata_len-2) if isinstance(v, str) and v[0:1] == '{':
json_obj = json.loads(json_data) try:
for k, v in json_obj.get("__metadata__", {}).items(): res[k] = json.loads(v)
res[k] = v except Exception:
if isinstance(v, str) and v[0:1] == '{': pass
try:
res[k] = json.loads(v)
except Exception:
pass
except Exception:
errors.report(f"Error reading metadata from file: {filename}", exc_info=True)
return res return res
@ -376,37 +363,6 @@ def check_fp8(model):
return enable_fp8 return enable_fp8
def set_model_type(model, state_dict):
model.is_sd1 = False
model.is_sd2 = False
model.is_sdxl = False
model.is_ssd = False
model.is_sd3 = False
if "model.diffusion_model.x_embedder.proj.weight" in state_dict:
model.is_sd3 = True
model.model_type = ModelType.SD3
elif hasattr(model, 'conditioner'):
model.is_sdxl = True
if 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys():
model.is_ssd = True
model.model_type = ModelType.SSD
else:
model.model_type = ModelType.SDXL
elif hasattr(model.cond_stage_model, 'model'):
model.is_sd2 = True
model.model_type = ModelType.SD2
else:
model.is_sd1 = True
model.model_type = ModelType.SD1
def set_model_fields(model):
if not hasattr(model, 'latent_channels'):
model.latent_channels = 4
def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer): def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
sd_model_hash = checkpoint_info.calculate_shorthash() sd_model_hash = checkpoint_info.calculate_shorthash()
timer.record("calculate hash") timer.record("calculate hash")
@ -421,9 +377,10 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
if state_dict is None: if state_dict is None:
state_dict = get_checkpoint_state_dict(checkpoint_info, timer) state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
set_model_type(model, state_dict) model.is_sdxl = hasattr(model, 'conditioner')
set_model_fields(model) model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
model.is_sd1 = not model.is_sdxl and not model.is_sd2
model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
if model.is_sdxl: if model.is_sdxl:
sd_models_xl.extend_sdxl(model) sd_models_xl.extend_sdxl(model)
@ -434,30 +391,11 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
# cache newly loaded model # cache newly loaded model
checkpoints_loaded[checkpoint_info] = state_dict.copy() checkpoints_loaded[checkpoint_info] = state_dict.copy()
if hasattr(model, "before_load_weights"):
model.before_load_weights(state_dict)
model.load_state_dict(state_dict, strict=False) model.load_state_dict(state_dict, strict=False)
timer.record("apply weights to model") timer.record("apply weights to model")
if hasattr(model, "after_load_weights"):
model.after_load_weights(state_dict)
del state_dict del state_dict
# Set is_sdxl_inpaint flag.
# Checks Unet structure to detect inpaint model. The inpaint model's
# checkpoint state_dict does not contain the key
# 'diffusion_model.input_blocks.0.0.weight'.
diffusion_model_input = model.model.state_dict().get(
'diffusion_model.input_blocks.0.0.weight'
)
model.is_sdxl_inpaint = (
model.is_sdxl and
diffusion_model_input is not None and
diffusion_model_input.shape[1] == 9
)
if shared.cmd_opts.opt_channelslast: if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last) model.to(memory_format=torch.channels_last)
timer.record("apply channels_last") timer.record("apply channels_last")
@ -466,7 +404,6 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
model.float() model.float()
model.alphas_cumprod_original = model.alphas_cumprod model.alphas_cumprod_original = model.alphas_cumprod
devices.dtype_unet = torch.float32 devices.dtype_unet = torch.float32
assert shared.cmd_opts.precision != "half", "Cannot use --precision half with --no-half"
timer.record("apply float()") timer.record("apply float()")
else: else:
vae = model.first_stage_model vae = model.first_stage_model
@ -571,7 +508,7 @@ def enable_midas_autodownload():
path = midas.api.ISL_PATHS[model_type] path = midas.api.ISL_PATHS[model_type]
if not os.path.exists(path): if not os.path.exists(path):
if not os.path.exists(midas_path): if not os.path.exists(midas_path):
os.mkdir(midas_path) mkdir(midas_path)
print(f"Downloading midas model weights for {model_type} to {path}") print(f"Downloading midas model weights for {model_type} to {path}")
request.urlretrieve(midas_urls[model_type], path) request.urlretrieve(midas_urls[model_type], path)
@ -596,34 +533,25 @@ def patch_given_betas():
original_register_schedule = patches.patch(__name__, ldm.models.diffusion.ddpm.DDPM, 'register_schedule', patched_register_schedule) original_register_schedule = patches.patch(__name__, ldm.models.diffusion.ddpm.DDPM, 'register_schedule', patched_register_schedule)
def repair_config(sd_config, state_dict=None): def repair_config(sd_config):
if not hasattr(sd_config.model.params, "use_ema"): if not hasattr(sd_config.model.params, "use_ema"):
sd_config.model.params.use_ema = False sd_config.model.params.use_ema = False
if hasattr(sd_config.model.params, 'unet_config'): if hasattr(sd_config.model.params, 'unet_config'):
if shared.cmd_opts.no_half: if shared.cmd_opts.no_half:
sd_config.model.params.unet_config.params.use_fp16 = False sd_config.model.params.unet_config.params.use_fp16 = False
elif shared.cmd_opts.upcast_sampling or shared.cmd_opts.precision == "half": elif shared.cmd_opts.upcast_sampling:
sd_config.model.params.unet_config.params.use_fp16 = True sd_config.model.params.unet_config.params.use_fp16 = True
if hasattr(sd_config.model.params, 'first_stage_config'): if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available: sd_config.model.params.first_stage_config.params.ddconfig.attn_type = "vanilla"
sd_config.model.params.first_stage_config.params.ddconfig.attn_type = "vanilla"
# For UnCLIP-L, override the hardcoded karlo directory # For UnCLIP-L, override the hardcoded karlo directory
if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"): if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"):
karlo_path = os.path.join(paths.models_path, 'karlo') karlo_path = os.path.join(paths.models_path, 'karlo')
sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
# Do not use checkpoint for inference.
# This helps prevent extra performance overhead on checking parameters.
# The perf overhead is about 100ms/it on 4090 for SDXL.
if hasattr(sd_config.model.params, "network_config"):
sd_config.model.params.network_config.params.use_checkpoint = False
if hasattr(sd_config.model.params, "unet_config"):
sd_config.model.params.unet_config.params.use_checkpoint = False
def rescale_zero_terminal_snr_abar(alphas_cumprod): def rescale_zero_terminal_snr_abar(alphas_cumprod):
alphas_bar_sqrt = alphas_cumprod.sqrt() alphas_bar_sqrt = alphas_cumprod.sqrt()
@ -724,23 +652,18 @@ def get_empty_cond(sd_model):
p = processing.StableDiffusionProcessingTxt2Img() p = processing.StableDiffusionProcessingTxt2Img()
extra_networks.activate(p, {}) extra_networks.activate(p, {})
if hasattr(sd_model, 'get_learned_conditioning'): if hasattr(sd_model, 'conditioner'):
d = sd_model.get_learned_conditioning([""]) d = sd_model.get_learned_conditioning([""])
return d['crossattn']
else: else:
d = sd_model.cond_stage_model([""]) return sd_model.cond_stage_model([""])
if isinstance(d, dict):
d = d['crossattn']
return d
def send_model_to_cpu(m): def send_model_to_cpu(m):
if m is not None: if m.lowvram:
if m.lowvram: lowvram.send_everything_to_cpu()
lowvram.send_everything_to_cpu() else:
else: m.to(devices.cpu)
m.to(devices.cpu)
devices.torch_gc() devices.torch_gc()
@ -764,25 +687,6 @@ def send_model_to_trash(m):
devices.torch_gc() devices.torch_gc()
def instantiate_from_config(config, state_dict=None):
constructor = get_obj_from_str(config["target"])
params = {**config.get("params", {})}
if state_dict and "state_dict" in params and params["state_dict"] is None:
params["state_dict"] = state_dict
return constructor(**params)
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def load_model(checkpoint_info=None, already_loaded_state_dict=None): def load_model(checkpoint_info=None, already_loaded_state_dict=None):
from modules import sd_hijack from modules import sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint() checkpoint_info = checkpoint_info or select_checkpoint()
@ -807,7 +711,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
timer.record("find config") timer.record("find config")
sd_config = OmegaConf.load(checkpoint_config) sd_config = OmegaConf.load(checkpoint_config)
repair_config(sd_config, state_dict) repair_config(sd_config)
timer.record("load config") timer.record("load config")
@ -817,7 +721,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
try: try:
with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip): with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip):
with sd_disable_initialization.InitializeOnMeta(): with sd_disable_initialization.InitializeOnMeta():
sd_model = instantiate_from_config(sd_config.model, state_dict) sd_model = instantiate_from_config(sd_config.model)
except Exception as e: except Exception as e:
errors.display(e, "creating model quickly", full_traceback=True) errors.display(e, "creating model quickly", full_traceback=True)
@ -826,7 +730,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
print('Failed to create model quickly; will retry using slow method.', file=sys.stderr) print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
with sd_disable_initialization.InitializeOnMeta(): with sd_disable_initialization.InitializeOnMeta():
sd_model = instantiate_from_config(sd_config.model, state_dict) sd_model = instantiate_from_config(sd_config.model)
sd_model.used_config = checkpoint_config sd_model.used_config = checkpoint_config
@ -843,7 +747,6 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
with sd_disable_initialization.LoadStateDictOnMeta(state_dict, device=model_target_device(sd_model), weight_dtype_conversion=weight_dtype_conversion): with sd_disable_initialization.LoadStateDictOnMeta(state_dict, device=model_target_device(sd_model), weight_dtype_conversion=weight_dtype_conversion):
load_model_weights(sd_model, checkpoint_info, state_dict, timer) load_model_weights(sd_model, checkpoint_info, state_dict, timer)
timer.record("load weights from state dict") timer.record("load weights from state dict")
send_model_to_device(sd_model) send_model_to_device(sd_model)
@ -881,16 +784,9 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary). If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary).
If not, returns the model that can be used to load weights from checkpoint_info's file. If not, returns the model that can be used to load weights from checkpoint_info's file.
If no such model exists, returns None. If no such model exists, returns None.
Additionally deletes loaded models that are over the limit set in settings (sd_checkpoints_limit). Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit).
""" """
if sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename:
return sd_model
if shared.opts.sd_checkpoints_keep_in_cpu:
send_model_to_cpu(sd_model)
timer.record("send model to cpu")
already_loaded = None already_loaded = None
for i in reversed(range(len(model_data.loaded_sd_models))): for i in reversed(range(len(model_data.loaded_sd_models))):
loaded_model = model_data.loaded_sd_models[i] loaded_model = model_data.loaded_sd_models[i]
@ -900,10 +796,14 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
if len(model_data.loaded_sd_models) > shared.opts.sd_checkpoints_limit > 0: if len(model_data.loaded_sd_models) > shared.opts.sd_checkpoints_limit > 0:
print(f"Unloading model {len(model_data.loaded_sd_models)} over the limit of {shared.opts.sd_checkpoints_limit}: {loaded_model.sd_checkpoint_info.title}") print(f"Unloading model {len(model_data.loaded_sd_models)} over the limit of {shared.opts.sd_checkpoints_limit}: {loaded_model.sd_checkpoint_info.title}")
del model_data.loaded_sd_models[i] model_data.loaded_sd_models.pop()
send_model_to_trash(loaded_model) send_model_to_trash(loaded_model)
timer.record("send model to trash") timer.record("send model to trash")
if shared.opts.sd_checkpoints_keep_in_cpu:
send_model_to_cpu(sd_model)
timer.record("send model to cpu")
if already_loaded is not None: if already_loaded is not None:
send_model_to_device(already_loaded) send_model_to_device(already_loaded)
timer.record("send model to device") timer.record("send model to device")

View File

@ -23,8 +23,6 @@ config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml"
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml") config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml") config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
config_alt_diffusion_m18 = os.path.join(sd_configs_path, "alt-diffusion-m18-inference.yaml") config_alt_diffusion_m18 = os.path.join(sd_configs_path, "alt-diffusion-m18-inference.yaml")
config_sd3 = os.path.join(sd_configs_path, "sd3-inference.yaml")
def is_using_v_parameterization_for_sd2(state_dict): def is_using_v_parameterization_for_sd2(state_dict):
""" """
@ -33,11 +31,11 @@ def is_using_v_parameterization_for_sd2(state_dict):
import ldm.modules.diffusionmodules.openaimodel import ldm.modules.diffusionmodules.openaimodel
device = devices.device device = devices.cpu
with sd_disable_initialization.DisableInitialization(): with sd_disable_initialization.DisableInitialization():
unet = ldm.modules.diffusionmodules.openaimodel.UNetModel( unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
use_checkpoint=False, use_checkpoint=True,
use_fp16=False, use_fp16=False,
image_size=32, image_size=32,
in_channels=4, in_channels=4,
@ -58,13 +56,12 @@ def is_using_v_parameterization_for_sd2(state_dict):
with torch.no_grad(): with torch.no_grad():
unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k} unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k}
unet.load_state_dict(unet_sd, strict=True) unet.load_state_dict(unet_sd, strict=True)
unet.to(device=device, dtype=devices.dtype_unet) unet.to(device=device, dtype=torch.float)
test_cond = torch.ones((1, 2, 1024), device=device) * 0.5 test_cond = torch.ones((1, 2, 1024), device=device) * 0.5
x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5 x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5
with devices.autocast(): out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().item()
out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().cpu().item()
return out < -1 return out < -1
@ -74,15 +71,11 @@ def guess_model_config_from_state_dict(sd, filename):
diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None) diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None) sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
if "model.diffusion_model.x_embedder.proj.weight" in sd:
return config_sd3
if sd.get('conditioner.embedders.1.model.ln_final.weight', None) is not None: if sd.get('conditioner.embedders.1.model.ln_final.weight', None) is not None:
if diffusion_model_input.shape[1] == 9: if diffusion_model_input.shape[1] == 9:
return config_sdxl_inpainting return config_sdxl_inpainting
else: else:
return config_sdxl return config_sdxl
if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None: if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None:
return config_sdxl_refiner return config_sdxl_refiner
elif sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None: elif sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
@ -106,6 +99,7 @@ def guess_model_config_from_state_dict(sd, filename):
if diffusion_model_input.shape[1] == 8: if diffusion_model_input.shape[1] == 8:
return config_instruct_pix2pix return config_instruct_pix2pix
if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None: if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024: if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024:
return config_alt_diffusion_m18 return config_alt_diffusion_m18

View File

@ -32,9 +32,3 @@ class WebuiSdModel(LatentDiffusion):
is_sd1: bool is_sd1: bool
"""True if the model's architecture is SD 1.x""" """True if the model's architecture is SD 1.x"""
is_sd3: bool
"""True if the model's architecture is SD 3"""
latent_channels: int
"""number of layer in latent image representation; will be 16 in SD3 and 4 in other version"""

Some files were not shown because too many files have changed in this diff Show More