mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-04-06 04:39:01 +08:00
Compare commits
No commits in common. "master" and "v1.3.2-RC" have entirely different histories.
16
.eslintrc.js
16
.eslintrc.js
@ -50,14 +50,13 @@ module.exports = {
|
||||
globals: {
|
||||
//script.js
|
||||
gradioApp: "readonly",
|
||||
executeCallbacks: "readonly",
|
||||
onAfterUiUpdate: "readonly",
|
||||
onOptionsChanged: "readonly",
|
||||
onUiLoaded: "readonly",
|
||||
onUiUpdate: "readonly",
|
||||
onOptionsChanged: "readonly",
|
||||
uiCurrentTab: "writable",
|
||||
uiElementInSight: "readonly",
|
||||
uiElementIsVisible: "readonly",
|
||||
uiElementInSight: "readonly",
|
||||
executeCallbacks: "readonly",
|
||||
//ui.js
|
||||
opts: "writable",
|
||||
all_gallery_buttons: "readonly",
|
||||
@ -74,12 +73,9 @@ module.exports = {
|
||||
create_submit_args: "readonly",
|
||||
restart_reload: "readonly",
|
||||
updateInput: "readonly",
|
||||
onEdit: "readonly",
|
||||
//extraNetworks.js
|
||||
requestGet: "readonly",
|
||||
popup: "readonly",
|
||||
// profilerVisualization.js
|
||||
createVisualizationTable: "readonly",
|
||||
// from python
|
||||
localization: "readonly",
|
||||
// progrssbar.js
|
||||
@ -88,11 +84,5 @@ module.exports = {
|
||||
// imageviewer.js
|
||||
modalPrevImage: "readonly",
|
||||
modalNextImage: "readonly",
|
||||
// localStorage.js
|
||||
localSet: "readonly",
|
||||
localGet: "readonly",
|
||||
localRemove: "readonly",
|
||||
// resizeHandle.js
|
||||
setupResizeHandle: "writable"
|
||||
}
|
||||
};
|
||||
|
112
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
112
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,55 +1,35 @@
|
||||
name: Bug Report
|
||||
description: You think something is broken in the UI
|
||||
description: You think somethings is broken in the UI
|
||||
title: "[Bug]: "
|
||||
labels: ["bug-report"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
> The title of the bug report should be short and descriptive.
|
||||
> Use relevant keywords for searchability.
|
||||
> Do not leave it blank, but also do not put an entire error log in it.
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Checklist
|
||||
description: |
|
||||
Please perform basic debugging to see if extensions or configuration is the cause of the issue.
|
||||
Basic debug procedure
|
||||
1. Disable all third-party extensions - check if extension is the cause
|
||||
2. Update extensions and webui - sometimes things just need to be updated
|
||||
3. Backup and remove your config.json and ui-config.json - check if the issue is caused by bad configuration
|
||||
4. Delete venv with third-party extensions disabled - sometimes extensions might cause wrong libraries to be installed
|
||||
5. Try a fresh installation webui in a different directory - see if a clean installation solves the issue
|
||||
Before making a issue report please, check that the issue hasn't been reported recently.
|
||||
label: Is there an existing issue for this?
|
||||
description: Please search to see if an issue already exists for the bug you encountered, and that it hasn't been fixed in a recent build/commit.
|
||||
options:
|
||||
- label: The issue exists after disabling all extensions
|
||||
- label: The issue exists on a clean installation of webui
|
||||
- label: The issue is caused by an extension, but I believe it is caused by a bug in the webui
|
||||
- label: The issue exists in the current version of the webui
|
||||
- label: The issue has not been reported before recently
|
||||
- label: The issue has been reported before but has not been fixed yet
|
||||
- label: I have searched the existing issues and checked the recent builds/commits
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
> Please fill this form with as much information as possible. Don't forget to "Upload Sysinfo" and "What browsers" and provide screenshots if possible
|
||||
*Please fill this form with as much information as possible, don't forget to fill "What OS..." and "What browsers" and *provide screenshots if possible**
|
||||
- type: textarea
|
||||
id: what-did
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: Tell us what happened in a very clear and simple way
|
||||
placeholder: |
|
||||
txt2img is not working as intended.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: Steps to reproduce the problem
|
||||
description: Please provide us with precise step by step instructions on how to reproduce the bug
|
||||
placeholder: |
|
||||
1. Go to ...
|
||||
2. Press ...
|
||||
description: Please provide us with precise step by step information on how to reproduce the bug
|
||||
value: |
|
||||
1. Go to ....
|
||||
2. Press ....
|
||||
3. ...
|
||||
validations:
|
||||
required: true
|
||||
@ -57,11 +37,49 @@ body:
|
||||
id: what-should
|
||||
attributes:
|
||||
label: What should have happened?
|
||||
description: Tell us what you think the normal behavior should be
|
||||
placeholder: |
|
||||
WebUI should ...
|
||||
description: Tell what you think the normal behavior should be
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: commit
|
||||
attributes:
|
||||
label: Commit where the problem happens
|
||||
description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: py-version
|
||||
attributes:
|
||||
label: What Python version are you running on ?
|
||||
multiple: false
|
||||
options:
|
||||
- Python 3.10.x
|
||||
- Python 3.11.x (above, no supported yet)
|
||||
- Python 3.9.x (below, no recommended)
|
||||
- type: dropdown
|
||||
id: platforms
|
||||
attributes:
|
||||
label: What platforms do you use to access the UI ?
|
||||
multiple: true
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
- MacOS
|
||||
- iOS
|
||||
- Android
|
||||
- Other/Cloud
|
||||
- type: dropdown
|
||||
id: device
|
||||
attributes:
|
||||
label: What device are you running WebUI on?
|
||||
multiple: true
|
||||
options:
|
||||
- Nvidia GPUs (RTX 20 above)
|
||||
- Nvidia GPUs (GTX 16 below)
|
||||
- AMD GPUs (RX 6000 above)
|
||||
- AMD GPUs (RX 5000 below)
|
||||
- CPU
|
||||
- Other GPUs
|
||||
- type: dropdown
|
||||
id: browsers
|
||||
attributes:
|
||||
@ -73,25 +91,26 @@ body:
|
||||
- Brave
|
||||
- Apple Safari
|
||||
- Microsoft Edge
|
||||
- Android
|
||||
- iOS
|
||||
- Other
|
||||
- type: textarea
|
||||
id: sysinfo
|
||||
id: cmdargs
|
||||
attributes:
|
||||
label: Sysinfo
|
||||
description: System info file, generated by WebUI. You can generate it in settings, on the Sysinfo page. Drag the file into the field to upload it. If you submit your report without including the sysinfo file, the report will be closed. If needed, review the report to make sure it includes no personal information you don't want to share. If you can't start WebUI, you can use --dump-sysinfo commandline argument to generate the file.
|
||||
placeholder: |
|
||||
1. Go to WebUI Settings -> Sysinfo -> Download system info.
|
||||
If WebUI fails to launch, use --dump-sysinfo commandline argument to generate the file
|
||||
2. Upload the Sysinfo as a attached file, Do NOT paste it in as plain text.
|
||||
label: Command Line Arguments
|
||||
description: Are you using any launching parameters/command line arguments (modified webui-user .bat/.sh) ? If yes, please write them below. Write "No" otherwise.
|
||||
render: Shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: extensions
|
||||
attributes:
|
||||
label: List of extensions
|
||||
description: Are you using any extensions other than built-ins? If yes, provide a list, you can copy it at "Extensions" tab. Write "No" otherwise.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Console logs
|
||||
description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occurred. If it's very long, provide a link to pastebin or similar service.
|
||||
description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service.
|
||||
render: Shell
|
||||
validations:
|
||||
required: true
|
||||
@ -99,7 +118,4 @@ body:
|
||||
id: misc
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: |
|
||||
Please provide us with any relevant additional info or context.
|
||||
Examples:
|
||||
I have updated my GPU driver recently.
|
||||
description: Please provide us with any relevant additional info or context.
|
||||
|
16
.github/workflows/on_pull_request.yaml
vendored
16
.github/workflows/on_pull_request.yaml
vendored
@ -1,4 +1,4 @@
|
||||
name: Linter
|
||||
name: Run Linting/Formatting on Pull Requests
|
||||
|
||||
on:
|
||||
- push
|
||||
@ -6,13 +6,11 @@ on:
|
||||
|
||||
jobs:
|
||||
lint-python:
|
||||
name: ruff
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.11
|
||||
# NB: there's no cache: pip here since we're not installing anything
|
||||
@ -20,18 +18,16 @@ jobs:
|
||||
# not to have GHA download an (at the time of writing) 4 GB cache
|
||||
# of PyTorch and other dependencies.
|
||||
- name: Install Ruff
|
||||
run: pip install ruff==0.3.3
|
||||
run: pip install ruff==0.0.265
|
||||
- name: Run Ruff
|
||||
run: ruff .
|
||||
lint-js:
|
||||
name: eslint
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18
|
||||
- run: npm i --ci
|
||||
|
27
.github/workflows/run_tests.yaml
vendored
27
.github/workflows/run_tests.yaml
vendored
@ -1,4 +1,4 @@
|
||||
name: Tests
|
||||
name: Run basic features tests on CPU with empty SD model
|
||||
|
||||
on:
|
||||
- push
|
||||
@ -6,26 +6,18 @@ on:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: tests on CPU with empty model
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.10.6
|
||||
cache: pip
|
||||
cache-dependency-path: |
|
||||
**/requirements*txt
|
||||
launch.py
|
||||
- name: Cache models
|
||||
id: cache-models
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: models
|
||||
key: "2023-12-30"
|
||||
- name: Install test dependencies
|
||||
run: pip install wait-for-it -r requirements-test.txt
|
||||
env:
|
||||
@ -39,8 +31,6 @@ jobs:
|
||||
TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu
|
||||
WEBUI_LAUNCH_LIVE_OUTPUT: "1"
|
||||
PYTHONUNBUFFERED: "1"
|
||||
- name: Print installed packages
|
||||
run: pip freeze
|
||||
- name: Start test server
|
||||
run: >
|
||||
python -m coverage run
|
||||
@ -49,32 +39,31 @@ jobs:
|
||||
--skip-prepare-environment
|
||||
--skip-torch-cuda-test
|
||||
--test-server
|
||||
--do-not-download-clip
|
||||
--no-half
|
||||
--disable-opt-split-attention
|
||||
--use-cpu all
|
||||
--api-server-stop
|
||||
--add-stop-route
|
||||
2>&1 | tee output.txt &
|
||||
- name: Run tests
|
||||
run: |
|
||||
wait-for-it --service 127.0.0.1:7860 -t 20
|
||||
wait-for-it --service 127.0.0.1:7860 -t 600
|
||||
python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test
|
||||
- name: Kill test server
|
||||
if: always()
|
||||
run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-stop && sleep 10
|
||||
run: curl -vv -XPOST http://127.0.0.1:7860/_stop && sleep 10
|
||||
- name: Show coverage
|
||||
run: |
|
||||
python -m coverage combine .coverage*
|
||||
python -m coverage report -i
|
||||
python -m coverage html -i
|
||||
- name: Upload main app output
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: output
|
||||
path: output.txt
|
||||
- name: Upload coverage HTML
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: htmlcov
|
||||
|
19
.github/workflows/warns_merge_master.yml
vendored
19
.github/workflows/warns_merge_master.yml
vendored
@ -1,19 +0,0 @@
|
||||
name: Pull requests can't target master branch
|
||||
|
||||
"on":
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Warning marge into master
|
||||
run: |
|
||||
echo -e "::warning::This pull request directly merge into \"master\" branch, normally development happens on \"dev\" branch."
|
||||
exit 1
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -2,7 +2,6 @@ __pycache__
|
||||
*.ckpt
|
||||
*.safetensors
|
||||
*.pth
|
||||
.DS_Store
|
||||
/ESRGAN/*
|
||||
/SwinIR/*
|
||||
/repositories
|
||||
@ -38,7 +37,3 @@ notification.mp3
|
||||
/node_modules
|
||||
/package-lock.json
|
||||
/.coverage*
|
||||
/test/test_outputs
|
||||
/cache
|
||||
trace.json
|
||||
/sysinfo-????-??-??-??-??.json
|
||||
|
891
CHANGELOG.md
891
CHANGELOG.md
@ -1,888 +1,3 @@
|
||||
## 1.10.1
|
||||
|
||||
### Bug Fixes:
|
||||
* fix image upscale on cpu ([#16275](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16275))
|
||||
|
||||
|
||||
## 1.10.0
|
||||
|
||||
### Features:
|
||||
* A lot of performance improvements (see below in Performance section)
|
||||
* Stable Diffusion 3 support ([#16030](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16030), [#16164](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16164), [#16212](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16212))
|
||||
* Recommended Euler sampler; DDIM and other timestamp samplers currently not supported
|
||||
* T5 text model is disabled by default, enable it in settings
|
||||
* New schedulers:
|
||||
* Align Your Steps ([#15751](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15751))
|
||||
* KL Optimal ([#15608](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15608))
|
||||
* Normal ([#16149](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16149))
|
||||
* DDIM ([#16149](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16149))
|
||||
* Simple ([#16142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16142))
|
||||
* Beta ([#16235](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16235))
|
||||
* New sampler: DDIM CFG++ ([#16035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16035))
|
||||
|
||||
### Minor:
|
||||
* Option to skip CFG on early steps ([#15607](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15607))
|
||||
* Add --models-dir option ([#15742](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15742))
|
||||
* Allow mobile users to open context menu by using two fingers press ([#15682](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15682))
|
||||
* Infotext: add Lora name as TI hashes for bundled Textual Inversion ([#15679](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15679))
|
||||
* Check model's hash after downloading it to prevent corruped downloads ([#15602](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15602))
|
||||
* More extension tag filtering options ([#15627](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15627))
|
||||
* When saving AVIF, use JPEG's quality setting ([#15610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15610))
|
||||
* Add filename pattern: `[basename]` ([#15978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15978))
|
||||
* Add option to enable clip skip for clip L on SDXL ([#15992](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15992))
|
||||
* Option to prevent screen sleep during generation ([#16001](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16001))
|
||||
* ToggleLivePriview button in image viewer ([#16065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16065))
|
||||
* Remove ui flashing on reloading and fast scrollong ([#16153](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16153))
|
||||
* option to disable save button log.csv ([#16242](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16242))
|
||||
|
||||
### Extensions and API:
|
||||
* Add process_before_every_sampling hook ([#15984](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15984))
|
||||
* Return HTTP 400 instead of 404 on invalid sampler error ([#16140](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16140))
|
||||
|
||||
### Performance:
|
||||
* [Performance 1/6] use_checkpoint = False ([#15803](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15803))
|
||||
* [Performance 2/6] Replace einops.rearrange with torch native ops ([#15804](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15804))
|
||||
* [Performance 4/6] Precompute is_sdxl_inpaint flag ([#15806](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15806))
|
||||
* [Performance 5/6] Prevent unnecessary extra networks bias backup ([#15816](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15816))
|
||||
* [Performance 6/6] Add --precision half option to avoid casting during inference ([#15820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15820))
|
||||
* [Performance] LDM optimization patches ([#15824](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15824))
|
||||
* [Performance] Keep sigmas on CPU ([#15823](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15823))
|
||||
* Check for nans in unet only once, after all steps have been completed
|
||||
* Added pption to run torch profiler for image generation
|
||||
|
||||
### Bug Fixes:
|
||||
* Fix for grids without comprehensive infotexts ([#15958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15958))
|
||||
* feat: lora partial update precede full update ([#15943](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15943))
|
||||
* Fix bug where file extension had an extra '.' under some circumstances ([#15893](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15893))
|
||||
* Fix corrupt model initial load loop ([#15600](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15600))
|
||||
* Allow old sampler names in API ([#15656](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15656))
|
||||
* more old sampler scheduler compatibility ([#15681](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15681))
|
||||
* Fix Hypertile xyz ([#15831](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15831))
|
||||
* XYZ CSV skipinitialspace ([#15832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15832))
|
||||
* fix soft inpainting on mps and xpu, torch_utils.float64 ([#15815](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15815))
|
||||
* fix extention update when not on main branch ([#15797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15797))
|
||||
* update pickle safe filenames
|
||||
* use relative path for webui-assets css ([#15757](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15757))
|
||||
* When creating a virtual environment, upgrade pip in webui.bat/webui.sh ([#15750](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15750))
|
||||
* Fix AttributeError ([#15738](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15738))
|
||||
* use script_path for webui root in launch_utils ([#15705](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15705))
|
||||
* fix extra batch mode P Transparency ([#15664](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15664))
|
||||
* use gradio theme colors in css ([#15680](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15680))
|
||||
* Fix dragging text within prompt input ([#15657](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15657))
|
||||
* Add correct mimetype for .mjs files ([#15654](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15654))
|
||||
* QOL Items - handle metadata issues more cleanly for SD models, Loras and embeddings ([#15632](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15632))
|
||||
* replace wsl-open with wslpath and explorer.exe ([#15968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15968))
|
||||
* Fix SDXL Inpaint ([#15976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15976))
|
||||
* multi size grid ([#15988](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15988))
|
||||
* fix Replace preview ([#16118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16118))
|
||||
* Possible fix of wrong scale in weight decomposition ([#16151](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16151))
|
||||
* Ensure use of python from venv on Mac and Linux ([#16116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16116))
|
||||
* Prioritize python3.10 over python3 if both are available on Linux and Mac (with fallback) ([#16092](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16092))
|
||||
* stoping generation extras ([#16085](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16085))
|
||||
* Fix SD2 loading ([#16078](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16078), [#16079](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16079))
|
||||
* fix infotext Lora hashes for hires fix different lora ([#16062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16062))
|
||||
* Fix sampler scheduler autocorrection warning ([#16054](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16054))
|
||||
* fix ui flashing on reloading and fast scrollong ([#16153](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16153))
|
||||
* fix upscale logic ([#16239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16239))
|
||||
* [bug] do not break progressbar on non-job actions (add wrap_gradio_call_no_job) ([#16202](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16202))
|
||||
* fix OSError: cannot write mode P as JPEG ([#16194](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16194))
|
||||
|
||||
### Other:
|
||||
* fix changelog #15883 -> #15882 ([#15907](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15907))
|
||||
* ReloadUI backgroundColor --background-fill-primary ([#15864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15864))
|
||||
* Use different torch versions for Intel and ARM Macs ([#15851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15851))
|
||||
* XYZ override rework ([#15836](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15836))
|
||||
* scroll extensions table on overflow ([#15830](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15830))
|
||||
* img2img batch upload method ([#15817](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15817))
|
||||
* chore: sync v1.8.0 packages according to changelog ([#15783](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15783))
|
||||
* Add AVIF MIME type support to mimetype definitions ([#15739](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15739))
|
||||
* Update imageviewer.js ([#15730](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15730))
|
||||
* no-referrer ([#15641](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15641))
|
||||
* .gitignore trace.json ([#15980](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15980))
|
||||
* Bump spandrel to 0.3.4 ([#16144](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16144))
|
||||
* Defunct --max-batch-count ([#16119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16119))
|
||||
* docs: update bug_report.yml ([#16102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16102))
|
||||
* Maintaining Project Compatibility for Python 3.9 Users Without Upgrade Requirements. ([#16088](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16088), [#16169](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16169), [#16192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16192))
|
||||
* Update torch for ARM Macs to 2.3.1 ([#16059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16059))
|
||||
* remove deprecated setting dont_fix_second_order_samplers_schedule ([#16061](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16061))
|
||||
* chore: fix typos ([#16060](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16060))
|
||||
* shlex.join launch args in console log ([#16170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16170))
|
||||
* activate venv .bat ([#16231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16231))
|
||||
* add ids to the resize tabs in img2img ([#16218](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16218))
|
||||
* update installation guide linux ([#16178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16178))
|
||||
* Robust sysinfo ([#16173](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16173))
|
||||
* do not send image size on paste inpaint ([#16180](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16180))
|
||||
* Fix noisy DS_Store files for MacOS ([#16166](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/16166))
|
||||
|
||||
|
||||
## 1.9.4
|
||||
|
||||
### Bug Fixes:
|
||||
* pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882))
|
||||
|
||||
## 1.9.3
|
||||
|
||||
### Bug Fixes:
|
||||
* fix get_crop_region_v2 ([#15594](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15594))
|
||||
|
||||
## 1.9.2
|
||||
|
||||
### Extensions and API:
|
||||
* restore 1.8.0-style naming of scripts
|
||||
|
||||
## 1.9.1
|
||||
|
||||
### Minor:
|
||||
* Add avif support ([#15582](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15582))
|
||||
* Add filename patterns: `[sampler_scheduler]` and `[scheduler]` ([#15581](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15581))
|
||||
|
||||
### Extensions and API:
|
||||
* undo adding scripts to sys.modules
|
||||
* Add schedulers API endpoint ([#15577](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15577))
|
||||
* Remove API upscaling factor limits ([#15560](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15560))
|
||||
|
||||
### Bug Fixes:
|
||||
* Fix images do not match / Coordinate 'right' is less than 'left' ([#15534](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15534))
|
||||
* fix: remove_callbacks_for_function should also remove from the ordered map ([#15533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15533))
|
||||
* fix x1 upscalers ([#15555](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15555))
|
||||
* Fix cls.__module__ value in extension script ([#15532](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15532))
|
||||
* fix typo in function call (eror -> error) ([#15531](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15531))
|
||||
|
||||
### Other:
|
||||
* Hide 'No Image data blocks found.' message ([#15567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15567))
|
||||
* Allow webui.sh to be runnable from arbitrary directories containing a .git file ([#15561](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15561))
|
||||
* Compatibility with Debian 11, Fedora 34+ and openSUSE 15.4+ ([#15544](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15544))
|
||||
* numpy DeprecationWarning product -> prod ([#15547](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15547))
|
||||
* get_crop_region_v2 ([#15583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15583), [#15587](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15587))
|
||||
|
||||
|
||||
## 1.9.0
|
||||
|
||||
### Features:
|
||||
* Make refiner switchover based on model timesteps instead of sampling steps ([#14978](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14978))
|
||||
* add an option to have old-style directory view instead of tree view; stylistic changes for extra network sorting/search controls
|
||||
* add UI for reordering callbacks, support for specifying callback order in extension metadata ([#15205](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15205))
|
||||
* Sgm uniform scheduler for SDXL-Lightning models ([#15325](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15325))
|
||||
* Scheduler selection in main UI ([#15333](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15333), [#15361](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15361), [#15394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15394))
|
||||
|
||||
### Minor:
|
||||
* "open images directory" button now opens the actual dir ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947))
|
||||
* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973))
|
||||
* make extra network card description plaintext by default, with an option to re-enable HTML as it was
|
||||
* resize handle for extra networks ([#15041](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15041))
|
||||
* cmd args: `--unix-filenames-sanitization` and `--filenames-max-length` ([#15031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15031))
|
||||
* show extra networks parameters in HTML table rather than raw JSON ([#15131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15131))
|
||||
* Add DoRA (weight-decompose) support for LoRA/LoHa/LoKr ([#15160](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15160), [#15283](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15283))
|
||||
* Add '--no-prompt-history' cmd args for disable last generation prompt history ([#15189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15189))
|
||||
* update preview on Replace Preview ([#15201](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15201))
|
||||
* only fetch updates for extensions' active git branches ([#15233](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15233))
|
||||
* put upscale postprocessing UI into an accordion ([#15223](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15223))
|
||||
* Support dragdrop for URLs to read infotext ([#15262](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15262))
|
||||
* use diskcache library for caching ([#15287](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15287), [#15299](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15299))
|
||||
* Allow PNG-RGBA for Extras Tab ([#15334](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15334))
|
||||
* Support cover images embedded in safetensors metadata ([#15319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15319))
|
||||
* faster interrupt when using NN upscale ([#15380](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15380))
|
||||
* Extras upscaler: an input field to limit maximul side length for the output image ([#15293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15293), [#15415](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15415), [#15417](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15417), [#15425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15425))
|
||||
* add an option to hide postprocessing options in Extras tab
|
||||
|
||||
### Extensions and API:
|
||||
* ResizeHandleRow - allow overriden column scale parametr ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004))
|
||||
* call script_callbacks.ui_settings_callback earlier; fix extra-options-section built-in extension killing the ui if using a setting that doesn't exist
|
||||
* make it possible to use zoom.js outside webui context ([#15286](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15286), [#15288](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15288))
|
||||
* allow variants for extension name in metadata.ini ([#15290](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15290))
|
||||
* make reloading UI scripts optional when doing Reload UI, and off by default
|
||||
* put request: gr.Request at start of img2img function similar to txt2img
|
||||
* open_folder as util ([#15442](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15442))
|
||||
* make it possible to import extensions' script files as `import scripts.<filename>` ([#15423](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15423))
|
||||
|
||||
### Performance:
|
||||
* performance optimization for extra networks HTML pages
|
||||
* optimization for extra networks filtering
|
||||
* optimization for extra networks sorting
|
||||
|
||||
### Bug Fixes:
|
||||
* prevent escape button causing an interrupt when no generation has been made yet
|
||||
* [bug] avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966))
|
||||
* possible fix for reload button not appearing in some cases for extra networks.
|
||||
* fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006))
|
||||
* Fix resize-handle visability for vertical layout (mobile) ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010))
|
||||
* register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012))
|
||||
* Protect alphas_cumprod during refiner switchover ([#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979))
|
||||
* Fix EXIF orientation in API image loading ([#15062](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15062))
|
||||
* Only override emphasis if actually used in prompt ([#15141](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15141))
|
||||
* Fix emphasis infotext missing from `params.txt` ([#15142](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15142))
|
||||
* fix extract_style_text_from_prompt #15132 ([#15135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15135))
|
||||
* Fix Soft Inpaint for AnimateDiff ([#15148](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15148))
|
||||
* edit-attention: deselect surrounding whitespace ([#15178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15178))
|
||||
* chore: fix font not loaded ([#15183](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15183))
|
||||
* use natural sort in extra networks when ordering by path
|
||||
* Fix built-in lora system bugs caused by torch.nn.MultiheadAttention ([#15190](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15190))
|
||||
* Avoid error from None in get_learned_conditioning ([#15191](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15191))
|
||||
* Add entry to MassFileLister after writing metadata ([#15199](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15199))
|
||||
* fix issue with Styles when Hires prompt is used ([#15269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15269), [#15276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15276))
|
||||
* Strip comments from hires fix prompt ([#15263](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15263))
|
||||
* Make imageviewer event listeners browser consistent ([#15261](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15261))
|
||||
* Fix AttributeError in OFT when trying to get MultiheadAttention weight ([#15260](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15260))
|
||||
* Add missing .mean() back ([#15239](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15239))
|
||||
* fix "Restore progress" button ([#15221](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15221))
|
||||
* fix ui-config for InputAccordion [custom_script_source] ([#15231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15231))
|
||||
* handle 0 wheel deltaY ([#15268](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15268))
|
||||
* prevent alt menu for firefox ([#15267](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15267))
|
||||
* fix: fix syntax errors ([#15179](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15179))
|
||||
* restore outputs path ([#15307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15307))
|
||||
* Escape btn_copy_path filename ([#15316](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15316))
|
||||
* Fix extra networks buttons when filename contains an apostrophe ([#15331](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15331))
|
||||
* escape brackets in lora random prompt generator ([#15343](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15343))
|
||||
* fix: Python version check for PyTorch installation compatibility ([#15390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15390))
|
||||
* fix typo in call_queue.py ([#15386](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15386))
|
||||
* fix: when find already_loaded model, remove loaded by array index ([#15382](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15382))
|
||||
* minor bug fix of sd model memory management ([#15350](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15350))
|
||||
* Fix CodeFormer weight ([#15414](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15414))
|
||||
* Fix: Remove script callbacks in ordered_callbacks_map ([#15428](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15428))
|
||||
* fix limited file write (thanks, Sylwia)
|
||||
* Fix extra-single-image API not doing upscale failed ([#15465](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15465))
|
||||
* error handling paste_field callables ([#15470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15470))
|
||||
|
||||
### Hardware:
|
||||
* Add training support and change lspci for Ascend NPU ([#14981](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14981))
|
||||
* Update to ROCm5.7 and PyTorch ([#14820](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14820))
|
||||
* Better workaround for Navi1, removing --pre for Navi3 ([#15224](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15224))
|
||||
* Ascend NPU wiki page ([#15228](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15228))
|
||||
|
||||
### Other:
|
||||
* Update comment for Pad prompt/negative prompt v0 to add a warning about truncation, make it override the v1 implementation
|
||||
* support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002))
|
||||
* Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995))
|
||||
* Use `absolute` path for normalized filepath ([#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035))
|
||||
* resizeHandle handle double tap ([#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065))
|
||||
* --dat-models-path cmd flag ([#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039))
|
||||
* Add a direct link to the binary release ([#15059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15059))
|
||||
* upscaler_utils: Reduce logging ([#15084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15084))
|
||||
* Fix various typos with crate-ci/typos ([#15116](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15116))
|
||||
* fix_jpeg_live_preview ([#15102](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15102))
|
||||
* [alternative fix] can't load webui if selected wrong extra option in ui ([#15121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15121))
|
||||
* Error handling for unsupported transparency ([#14958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14958))
|
||||
* Add model description to searched terms ([#15198](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15198))
|
||||
* bump action version ([#15272](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15272))
|
||||
* PEP 604 annotations ([#15259](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15259))
|
||||
* Automatically Set the Scale by value when user selects an Upscale Model ([#15244](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15244))
|
||||
* move postprocessing-for-training into builtin extensions ([#15222](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15222))
|
||||
* type hinting in shared.py ([#15211](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15211))
|
||||
* update ruff to 0.3.3
|
||||
* Update pytorch lightning utilities ([#15310](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15310))
|
||||
* Add Size as an XYZ Grid option ([#15354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15354))
|
||||
* Use HF_ENDPOINT variable for HuggingFace domain with default ([#15443](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15443))
|
||||
* re-add update_file_entry ([#15446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15446))
|
||||
* create_infotext allow index and callable, re-work Hires prompt infotext ([#15460](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15460))
|
||||
* update restricted_opts to include more options for --hide-ui-dir-config ([#15492](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15492))
|
||||
|
||||
|
||||
## 1.8.0
|
||||
|
||||
### Features:
|
||||
* Update torch to version 2.1.2
|
||||
* Soft Inpainting ([#14208](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14208))
|
||||
* FP8 support ([#14031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14031), [#14327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14327))
|
||||
* Support for SDXL-Inpaint Model ([#14390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14390))
|
||||
* Use Spandrel for upscaling and face restoration architectures ([#14425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14425), [#14467](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14467), [#14473](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14473), [#14474](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14474), [#14477](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14477), [#14476](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14476), [#14484](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14484), [#14500](https://github.com/AUTOMATIC1111/stable-difusion-webui/pull/14500), [#14501](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14501), [#14504](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14504), [#14524](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14524), [#14809](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14809))
|
||||
* Automatic backwards version compatibility (when loading infotexts from old images with program version specified, will add compatibility settings)
|
||||
* Implement zero terminal SNR noise schedule option (**[SEED BREAKING CHANGE](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Seed-breaking-changes#180-dev-170-225-2024-01-01---zero-terminal-snr-noise-schedule-option)**, [#14145](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14145), [#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979))
|
||||
* Add a [✨] button to run hires fix on selected image in the gallery (with help from [#14598](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14598), [#14626](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14626), [#14728](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14728))
|
||||
* [Separate assets repository](https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets); serve fonts locally rather than from google's servers
|
||||
* Official LCM Sampler Support ([#14583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14583))
|
||||
* Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690), [#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039))
|
||||
* Extra Networks Tree View ([#14588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14588), [#14900](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14900))
|
||||
* NPU Support ([#14801](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14801))
|
||||
* Prompt comments support
|
||||
|
||||
### Minor:
|
||||
* Allow pasting in WIDTHxHEIGHT strings into the width/height fields ([#14296](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14296))
|
||||
* add option: Live preview in full page image viewer ([#14230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14230), [#14307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14307))
|
||||
* Add keyboard shortcuts for generate/skip/interrupt ([#14269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14269))
|
||||
* Better TCMALLOC support on different platforms ([#14227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14227), [#14883](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14883), [#14910](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14910))
|
||||
* Lora not found warning ([#14464](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14464))
|
||||
* Adding negative prompts to Loras in extra networks ([#14475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14475))
|
||||
* xyz_grid: allow varying the seed along an axis separate from axis options ([#12180](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12180))
|
||||
* option to convert VAE to bfloat16 (implementation of [#9295](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9295))
|
||||
* Better IPEX support ([#14229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14229), [#14353](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14353), [#14559](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14559), [#14562](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14562), [#14597](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14597))
|
||||
* Option to interrupt after current generation rather than immediately ([#13653](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13653), [#14659](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14659))
|
||||
* Fullscreen Preview control fading/disable ([#14291](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14291))
|
||||
* Finer settings freezing control ([#13789](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13789))
|
||||
* Increase Upscaler Limits ([#14589](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14589))
|
||||
* Adjust brush size with hotkeys ([#14638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14638))
|
||||
* Add checkpoint info to csv log file when saving images ([#14663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14663))
|
||||
* Make more columns resizable ([#14740](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14740), [#14884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14884))
|
||||
* Add an option to not overlay original image for inpainting for #14727
|
||||
* Add Pad conds v0 option to support same generation with DDIM as before 1.6.0
|
||||
* Add "Interrupting..." placeholder.
|
||||
* Button for refresh extensions list ([#14857](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14857))
|
||||
* Add an option to disable normalization after calculating emphasis. ([#14874](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14874))
|
||||
* When counting tokens, also include enabled styles (can be disabled in settings to revert to previous behavior)
|
||||
* Configuration for the [📂] button for image gallery ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947))
|
||||
* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973))
|
||||
* support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002))
|
||||
|
||||
### Extensions and API:
|
||||
* Removed packages from requirements: basicsr, gfpgan, realesrgan; as well as their dependencies: absl-py, addict, beautifulsoup4, future, gdown, grpcio, importlib-metadata, lmdb, lpips, Markdown, platformdirs, PySocks, soupsieve, tb-nightly, tensorboard-data-server, tomli, Werkzeug, yapf, zipp, soupsieve
|
||||
* Enable task ids for API ([#14314](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14314))
|
||||
* add override_settings support for infotext API
|
||||
* rename generation_parameters_copypaste module to infotext_utils
|
||||
* prevent crash due to Script __init__ exception ([#14407](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14407))
|
||||
* Bump numpy to 1.26.2 ([#14471](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14471))
|
||||
* Add utility to inspect a model's dtype/device ([#14478](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14478))
|
||||
* Implement general forward method for all method in built-in lora ext ([#14547](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14547))
|
||||
* Execute model_loaded_callback after moving to target device ([#14563](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14563))
|
||||
* Add self to CFGDenoiserParams ([#14573](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14573))
|
||||
* Allow TLS with API only mode (--nowebui) ([#14593](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14593))
|
||||
* New callback: postprocess_image_after_composite ([#14657](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14657))
|
||||
* modules/api/api.py: add api endpoint to refresh embeddings list ([#14715](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14715))
|
||||
* set_named_arg ([#14773](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14773))
|
||||
* add before_token_counter callback and use it for prompt comments
|
||||
* ResizeHandleRow - allow overridden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004))
|
||||
|
||||
### Performance:
|
||||
* Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528))
|
||||
* Reduce unnecessary re-indexing extra networks directory ([#14512](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14512))
|
||||
* Avoid unnecessary `isfile`/`exists` calls ([#14527](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14527))
|
||||
|
||||
### Bug Fixes:
|
||||
* fix multiple bugs related to styles multi-file support ([#14203](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14203), [#14276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14276), [#14707](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14707))
|
||||
* Lora fixes ([#14300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14300), [#14237](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14237), [#14546](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14546), [#14726](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14726))
|
||||
* Re-add setting lost as part of e294e46 ([#14266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14266))
|
||||
* fix extras caption BLIP ([#14330](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14330))
|
||||
* include infotext into saved init image for img2img ([#14452](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14452))
|
||||
* xyz grid handle axis_type is None ([#14394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14394))
|
||||
* Update Added (Fixed) IPV6 Functionality When there is No Webui Argument Passed webui.py ([#14354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14354))
|
||||
* fix API thread safe issues of txt2img and img2img ([#14421](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14421))
|
||||
* handle selectable script_index is None ([#14487](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14487))
|
||||
* handle config.json failed to load ([#14525](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14525), [#14767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14767))
|
||||
* paste infotext cast int as float ([#14523](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14523))
|
||||
* Ensure GRADIO_ANALYTICS_ENABLED is set early enough ([#14537](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14537))
|
||||
* Fix logging configuration again ([#14538](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14538))
|
||||
* Handle CondFunc exception when resolving attributes ([#14560](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14560))
|
||||
* Fix extras big batch crashes ([#14699](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14699))
|
||||
* Fix using wrong model caused by alias ([#14655](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14655))
|
||||
* Add # to the invalid_filename_chars list ([#14640](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14640))
|
||||
* Fix extension check for requirements ([#14639](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14639))
|
||||
* Fix tab indexes are reset after restart UI ([#14637](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14637))
|
||||
* Fix nested manual cast ([#14689](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14689))
|
||||
* Keep postprocessing upscale selected tab after restart ([#14702](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14702))
|
||||
* XYZ grid: filter out blank vals when axis is int or float type (like int axis seed) ([#14754](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14754))
|
||||
* fix CLIP Interrogator topN regex ([#14775](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14775))
|
||||
* Fix dtype error in MHA layer/change dtype checking mechanism for manual cast ([#14791](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14791))
|
||||
* catch load style.csv error ([#14814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14814))
|
||||
* fix error when editing extra networks card
|
||||
* fix extra networks metadata failing to work properly when you create the .json file with metadata for the first time.
|
||||
* util.walk_files extensions case insensitive ([#14879](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14879))
|
||||
* if extensions page not loaded, prevent apply ([#14873](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14873))
|
||||
* call the right function for token counter in img2img
|
||||
* Fix the bugs that search/reload will disappear when using other ExtraNetworks extensions ([#14939](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14939))
|
||||
* Gracefully handle mtime read exception from cache ([#14933](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14933))
|
||||
* Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932))
|
||||
* Disable prompt token counters option actually disables token counting rather than just hiding results.
|
||||
* avoid double upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966))
|
||||
* Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995))
|
||||
* fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006))
|
||||
* Fix resize-handle for mobile ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010), [#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065))
|
||||
|
||||
### Other:
|
||||
* Assign id for "extra_options". Replace numeric field with slider. ([#14270](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14270))
|
||||
* change state dict comparison to ref compare ([#14216](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14216))
|
||||
* Bump torch-rocm to 5.6/5.7 ([#14293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14293))
|
||||
* Base output path off data path ([#14446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14446))
|
||||
* reorder training preprocessing modules in extras tab ([#14367](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14367))
|
||||
* Remove `cleanup_models` code ([#14472](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14472))
|
||||
* only rewrite ui-config when there is change ([#14352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14352))
|
||||
* Fix lint issue from 501993eb ([#14495](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14495))
|
||||
* Update README.md ([#14548](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14548))
|
||||
* hires button, fix seeds ()
|
||||
* Logging: set formatter correctly for fallback logger too ([#14618](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14618))
|
||||
* Read generation info from infotexts rather than json for internal needs (save, extract seed from generated pic) ([#14645](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14645))
|
||||
* improve get_crop_region ([#14709](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14709))
|
||||
* Bump safetensors' version to 0.4.2 ([#14782](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14782))
|
||||
* add tooltip create_submit_box ([#14803](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14803))
|
||||
* extensions tab table row hover highlight ([#14885](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14885))
|
||||
* Always add timestamp to displayed image ([#14890](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14890))
|
||||
* Added core.filemode=false so doesn't track changes in file permission… ([#14930](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14930))
|
||||
* Normalize command-line argument paths ([#14934](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14934), [#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035))
|
||||
* Use original App Title in progress bar ([#14916](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14916))
|
||||
* register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012))
|
||||
|
||||
## 1.7.0
|
||||
|
||||
### Features:
|
||||
* settings tab rework: add search field, add categories, split UI settings page into many
|
||||
* add altdiffusion-m18 support ([#13364](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13364))
|
||||
* support inference with LyCORIS GLora networks ([#13610](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13610))
|
||||
* add lora-embedding bundle system ([#13568](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13568))
|
||||
* option to move prompt from top row into generation parameters
|
||||
* add support for SSD-1B ([#13865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13865))
|
||||
* support inference with OFT networks ([#13692](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13692))
|
||||
* script metadata and DAG sorting mechanism ([#13944](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13944))
|
||||
* support HyperTile optimization ([#13948](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13948))
|
||||
* add support for SD 2.1 Turbo ([#14170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14170))
|
||||
* remove Train->Preprocessing tab and put all its functionality into Extras tab
|
||||
* initial IPEX support for Intel Arc GPU ([#14171](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14171))
|
||||
|
||||
### Minor:
|
||||
* allow reading model hash from images in img2img batch mode ([#12767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12767))
|
||||
* add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818))
|
||||
* extra field for lora metadata viewer: `ss_output_name` ([#12838](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12838))
|
||||
* add action in settings page to calculate all SD checkpoint hashes ([#12909](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12909))
|
||||
* add button to copy prompt to style editor ([#12975](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12975))
|
||||
* add --skip-load-model-at-start option ([#13253](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13253))
|
||||
* write infotext to gif images
|
||||
* read infotext from gif images ([#13068](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13068))
|
||||
* allow configuring the initial state of InputAccordion in ui-config.json ([#13189](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13189))
|
||||
* allow editing whitespace delimiters for ctrl+up/ctrl+down prompt editing ([#13444](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13444))
|
||||
* prevent accidentally closing popup dialogs ([#13480](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13480))
|
||||
* added option to play notification sound or not ([#13631](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13631))
|
||||
* show the preview image in the full screen image viewer if available ([#13459](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13459))
|
||||
* support for webui.settings.bat ([#13638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13638))
|
||||
* add an option to not print stack traces on ctrl+c
|
||||
* start/restart generation by Ctrl (Alt) + Enter ([#13644](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13644))
|
||||
* update prompts_from_file script to allow concatenating entries with the general prompt ([#13733](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13733))
|
||||
* added a visible checkbox to input accordion
|
||||
* added an option to hide all txt2img/img2img parameters in an accordion ([#13826](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13826))
|
||||
* added 'Path' sorting option for Extra network cards ([#13968](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13968))
|
||||
* enable prompt hotkeys in style editor ([#13931](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13931))
|
||||
* option to show batch img2img results in UI ([#14009](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14009))
|
||||
* infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page
|
||||
* add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046))
|
||||
* support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126))
|
||||
* allow use of multiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125))
|
||||
* make extra network card description plaintext by default, with an option (Treat card description as HTML) to re-enable HTML as it was (originally by [#13241](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13241))
|
||||
|
||||
### Extensions and API:
|
||||
* update gradio to 3.41.2
|
||||
* support installed extensions list api ([#12774](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12774))
|
||||
* update pnginfo API to return dict with parsed values
|
||||
* add noisy latent to `ExtraNoiseParams` for callback ([#12856](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12856))
|
||||
* show extension datetime in UTC ([#12864](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12864), [#12865](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12865), [#13281](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13281))
|
||||
* add an option to choose how to combine hires fix and refiner
|
||||
* include program version in info response. ([#13135](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13135))
|
||||
* sd_unet support for SDXL
|
||||
* patch DDPM.register_betas so that users can put given_betas in model yaml ([#13276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13276))
|
||||
* xyz_grid: add prepare ([#13266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13266))
|
||||
* allow multiple localization files with same language in extensions ([#13077](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13077))
|
||||
* add onEdit function for js and rework token-counter.js to use it
|
||||
* fix the key error exception when processing override_settings keys ([#13567](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13567))
|
||||
* ability for extensions to return custom data via api in response.images ([#13463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13463))
|
||||
* call state.jobnext() before postproces*() ([#13762](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13762))
|
||||
* add option to set notification sound volume ([#13884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13884))
|
||||
* update Ruff to 0.1.6 ([#14059](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14059))
|
||||
* add Block component creation callback ([#14119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14119))
|
||||
* catch uncaught exception with ui creation scripts ([#14120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14120))
|
||||
* use extension name for determining an extension is installed in the index ([#14063](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14063))
|
||||
* update is_installed() from launch_utils.py to fix reinstalling already installed packages ([#14192](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14192))
|
||||
|
||||
### Bug Fixes:
|
||||
* fix pix2pix producing bad results
|
||||
* fix defaults settings page breaking when any of main UI tabs are hidden
|
||||
* fix error that causes some extra networks to be disabled if both <lora:> and <lyco:> are present in the prompt
|
||||
* fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working
|
||||
* prevent duplicate resize handler ([#12795](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12795))
|
||||
* small typo: vae resolve bug ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12797))
|
||||
* hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12792))
|
||||
* don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12780))
|
||||
* fix style editing dialog breaking if it's opened in both img2img and txt2img tabs
|
||||
* hide --gradio-auth and --api-auth values from /internal/sysinfo report
|
||||
* add missing infotext for RNG in options ([#12819](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12819))
|
||||
* fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834))
|
||||
* honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832))
|
||||
* don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12833), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855))
|
||||
* get progressbar to display correctly in extensions tab
|
||||
* keep order in list of checkpoints when loading model that doesn't have a checksum
|
||||
* fix inpainting models in txt2img creating black pictures
|
||||
* fix generation params regex ([#12876](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12876))
|
||||
* fix batch img2img output dir with script ([#12926](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12926))
|
||||
* fix #13080 - Hypernetwork/TI preview generation ([#13084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13084))
|
||||
* fix bug with sigma min/max overrides. ([#12995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12995))
|
||||
* more accurate check for enabling cuDNN benchmark on 16XX cards ([#12924](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12924))
|
||||
* don't use multicond parser for negative prompt counter ([#13118](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13118))
|
||||
* fix data-sort-name containing spaces ([#13412](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13412))
|
||||
* update card on correct tab when editing metadata ([#13411](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13411))
|
||||
* fix viewing/editing metadata when filename contains an apostrophe ([#13395](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13395))
|
||||
* fix: --sd_model in "Prompts from file or textbox" script is not working ([#13302](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13302))
|
||||
* better Support for Portable Git ([#13231](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13231))
|
||||
* fix issues when webui_dir is not work_dir ([#13210](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13210))
|
||||
* fix: lora-bias-backup don't reset cache ([#13178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13178))
|
||||
* account for customizable extra network separators whyen removing extra network text from the prompt ([#12877](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12877))
|
||||
* re fix batch img2img output dir with script ([#13170](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13170))
|
||||
* fix `--ckpt-dir` path separator and option use `short name` for checkpoint dropdown ([#13139](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13139))
|
||||
* consolidated allowed preview formats, Fix extra network `.gif` not woking as preview ([#13121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13121))
|
||||
* fix venv_dir=- environment variable not working as expected on linux ([#13469](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13469))
|
||||
* repair unload sd checkpoint button
|
||||
* edit-attention fixes ([#13533](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13533))
|
||||
* fix bug when using --gfpgan-models-path ([#13718](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13718))
|
||||
* properly apply sort order for extra network cards when selected from dropdown
|
||||
* fixes generation restart not working for some users when 'Ctrl+Enter' is pressed ([#13962](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13962))
|
||||
* thread safe extra network list_items ([#13014](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13014))
|
||||
* fix not able to exit metadata popup when pop up is too big ([#14156](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14156))
|
||||
* fix auto focal point crop for opencv >= 4.8 ([#14121](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14121))
|
||||
* make 'use-cpu all' actually apply to 'all' ([#14131](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14131))
|
||||
* extras tab batch: actually use original filename
|
||||
* make webui not crash when running with --disable-all-extensions option
|
||||
|
||||
### Other:
|
||||
* non-local condition ([#12814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12814))
|
||||
* fix minor typos ([#12827](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12827))
|
||||
* remove xformers Python version check ([#12842](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12842))
|
||||
* style: file-metadata word-break ([#12837](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12837))
|
||||
* revert SGM noise multiplier change for img2img because it breaks hires fix
|
||||
* do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854))
|
||||
* [RC 1.6.0 - zoom is partly hidden] Update style.css ([#12839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12839))
|
||||
* chore: change extension time format ([#12851](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12851))
|
||||
* WEBUI.SH - Use torch 2.1.0 release candidate for Navi 3 ([#12929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12929))
|
||||
* add Fallback at images.read_info_from_image if exif data was invalid ([#13028](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13028))
|
||||
* update cmd arg description ([#12986](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12986))
|
||||
* fix: update shared.opts.data when add_option ([#12957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12957), [#13213](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13213))
|
||||
* restore missing tooltips ([#12976](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12976))
|
||||
* use default dropdown padding on mobile ([#12880](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12880))
|
||||
* put enable console prompts option into settings from commandline args ([#13119](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13119))
|
||||
* fix some deprecated types ([#12846](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12846))
|
||||
* bump to torchsde==0.2.6 ([#13418](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13418))
|
||||
* update dragdrop.js ([#13372](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13372))
|
||||
* use orderdict as lru cache:opt/bug ([#13313](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13313))
|
||||
* XYZ if not include sub grids do not save sub grid ([#13282](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13282))
|
||||
* initialize state.time_start befroe state.job_count ([#13229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13229))
|
||||
* fix fieldname regex ([#13458](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13458))
|
||||
* change denoising_strength default to None. ([#13466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13466))
|
||||
* fix regression ([#13475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13475))
|
||||
* fix IndexError ([#13630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13630))
|
||||
* fix: checkpoints_loaded:{checkpoint:state_dict}, model.load_state_dict issue in dict value empty ([#13535](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13535))
|
||||
* update bug_report.yml ([#12991](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12991))
|
||||
* requirements_versions httpx==0.24.1 ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839))
|
||||
* fix parenthesis auto selection ([#13829](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13829))
|
||||
* fix #13796 ([#13797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13797))
|
||||
* corrected a typo in `modules/cmd_args.py` ([#13855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13855))
|
||||
* feat: fix randn found element of type float at pos 2 ([#14004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14004))
|
||||
* adds tqdm handler to logging_config.py for progress bar integration ([#13996](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13996))
|
||||
* hotfix: call shared.state.end() after postprocessing done ([#13977](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13977))
|
||||
* fix dependency address patch 1 ([#13929](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13929))
|
||||
* save sysinfo as .json ([#14035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14035))
|
||||
* move exception_records related methods to errors.py ([#14084](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14084))
|
||||
* compatibility ([#13936](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13936))
|
||||
* json.dump(ensure_ascii=False) ([#14108](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14108))
|
||||
* dir buttons start with / so only the correct dir will be shown and no… ([#13957](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13957))
|
||||
* alternate implementation for unet forward replacement that does not depend on hijack being applied
|
||||
* re-add `keyedit_delimiters_whitespace` setting lost as part of commit e294e46 ([#14178](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14178))
|
||||
* fix `save_samples` being checked early when saving masked composite ([#14177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14177))
|
||||
* slight optimization for mask and mask_composite ([#14181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14181))
|
||||
* add import_hook hack to work around basicsr/torchvision incompatibility ([#14186](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14186))
|
||||
|
||||
## 1.6.1
|
||||
|
||||
### Bug Fixes:
|
||||
* fix an error causing the webui to fail to start ([#13839](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13839))
|
||||
|
||||
## 1.6.0
|
||||
|
||||
### Features:
|
||||
* refiner support [#12371](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371)
|
||||
* add NV option for Random number generator source setting, which allows to generate same pictures on CPU/AMD/Mac as on NVidia videocards
|
||||
* add style editor dialog
|
||||
* hires fix: add an option to use a different checkpoint for second pass ([#12181](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12181))
|
||||
* option to keep multiple loaded models in memory ([#12227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12227))
|
||||
* new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential ([#12300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12300), [#12519](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12519), [#12542](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12542))
|
||||
* rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers:
|
||||
* makes all of them work with img2img
|
||||
* makes prompt composition possible (AND)
|
||||
* makes them available for SDXL
|
||||
* always show extra networks tabs in the UI ([#11808](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11808))
|
||||
* use less RAM when creating models ([#11958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11958), [#12599](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12599))
|
||||
* textual inversion inference support for SDXL
|
||||
* extra networks UI: show metadata for SD checkpoints
|
||||
* checkpoint merger: add metadata support
|
||||
* prompt editing and attention: add support for whitespace after the number ([ red : green : 0.5 ]) (seed breaking change) ([#12177](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12177))
|
||||
* VAE: allow selecting own VAE for each checkpoint (in user metadata editor)
|
||||
* VAE: add selected VAE to infotext
|
||||
* options in main UI: add own separate setting for txt2img and img2img, correctly read values from pasted infotext, add setting for column count ([#12551](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12551))
|
||||
* add resize handle to txt2img and img2img tabs, allowing to change the amount of horizontable space given to generation parameters and resulting image gallery ([#12687](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12687), [#12723](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12723))
|
||||
* change default behavior for batching cond/uncond -- now it's on by default, and is disabled by an UI setting (Optimizatios -> Batch cond/uncond) - if you are on lowvram/medvram and are getting OOM exceptions, you will need to enable it
|
||||
* show current position in queue and make it so that requests are processed in the order of arrival ([#12707](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12707))
|
||||
* add `--medvram-sdxl` flag that only enables `--medvram` for SDXL models
|
||||
* prompt editing timeline has separate range for first pass and hires-fix pass (seed breaking change) ([#12457](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12457))
|
||||
|
||||
### Minor:
|
||||
* img2img batch: RAM savings, VRAM savings, .tif, .tiff in img2img batch ([#12120](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12120), [#12514](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12514), [#12515](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12515))
|
||||
* postprocessing/extras: RAM savings ([#12479](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12479))
|
||||
* XYZ: in the axis labels, remove pathnames from model filenames
|
||||
* XYZ: support hires sampler ([#12298](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12298))
|
||||
* XYZ: new option: use text inputs instead of dropdowns ([#12491](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12491))
|
||||
* add gradio version warning
|
||||
* sort list of VAE checkpoints ([#12297](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12297))
|
||||
* use transparent white for mask in inpainting, along with an option to select the color ([#12326](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12326))
|
||||
* move some settings to their own section: img2img, VAE
|
||||
* add checkbox to show/hide dirs for extra networks
|
||||
* Add TAESD(or more) options for all the VAE encode/decode operation ([#12311](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12311))
|
||||
* gradio theme cache, new gradio themes, along with explanation that the user can input his own values ([#12346](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12346), [#12355](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12355))
|
||||
* sampler fixes/tweaks: s_tmax, s_churn, s_noise, s_tmax ([#12354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12354), [#12356](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12356), [#12357](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12357), [#12358](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12358), [#12375](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12375), [#12521](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12521))
|
||||
* update README.md with correct instructions for Linux installation ([#12352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12352))
|
||||
* option to not save incomplete images, on by default ([#12338](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12338))
|
||||
* enable cond cache by default
|
||||
* git autofix for repos that are corrupted ([#12230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12230))
|
||||
* allow to open images in new browser tab by middle mouse button ([#12379](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12379))
|
||||
* automatically open webui in browser when running "locally" ([#12254](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12254))
|
||||
* put commonly used samplers on top, make DPM++ 2M Karras the default choice
|
||||
* zoom and pan: option to auto-expand a wide image, improved integration ([#12413](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12413), [#12727](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12727))
|
||||
* option to cache Lora networks in memory
|
||||
* rework hires fix UI to use accordion
|
||||
* face restoration and tiling moved to settings - use "Options in main UI" setting if you want them back
|
||||
* change quicksettings items to have variable width
|
||||
* Lora: add Norm module, add support for bias ([#12503](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12503))
|
||||
* Lora: output warnings in UI rather than fail for unfitting loras; switch to logging for error output in console
|
||||
* support search and display of hashes for all extra network items ([#12510](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12510))
|
||||
* add extra noise param for img2img operations ([#12564](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12564))
|
||||
* support for Lora with bias ([#12584](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12584))
|
||||
* make interrupt quicker ([#12634](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12634))
|
||||
* configurable gallery height ([#12648](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12648))
|
||||
* make results column sticky ([#12645](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12645))
|
||||
* more hash filename patterns ([#12639](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12639))
|
||||
* make image viewer actually fit the whole page ([#12635](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12635))
|
||||
* make progress bar work independently from live preview display which results in it being updated a lot more often
|
||||
* forbid Full live preview method for medvram and add a setting to undo the forbidding
|
||||
* make it possible to localize tooltips and placeholders
|
||||
* add option to align with sgm repo's sampling implementation ([#12818](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818))
|
||||
* Restore faces and Tiling generation parameters have been moved to settings out of main UI
|
||||
* if you want to put them back into main UI, use `Options in main UI` setting on the UI page.
|
||||
|
||||
### Extensions and API:
|
||||
* gradio 3.41.2
|
||||
* also bump versions for packages: transformers, GitPython, accelerate, scikit-image, timm, tomesd
|
||||
* support tooltip kwarg for gradio elements: gr.Textbox(label='hello', tooltip='world')
|
||||
* properly clear the total console progressbar when using txt2img and img2img from API
|
||||
* add cmd_arg --disable-extra-extensions and --disable-all-extensions ([#12294](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12294))
|
||||
* shared.py and webui.py split into many files
|
||||
* add --loglevel commandline argument for logging
|
||||
* add a custom UI element that combines accordion and checkbox
|
||||
* avoid importing gradio in tests because it spams warnings
|
||||
* put infotext label for setting into OptionInfo definition rather than in a separate list
|
||||
* make `StableDiffusionProcessingImg2Img.mask_blur` a property, make more inline with PIL `GaussianBlur` ([#12470](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12470))
|
||||
* option to make scripts UI without gr.Group
|
||||
* add a way for scripts to register a callback for before/after just a single component's creation
|
||||
* use dataclass for StableDiffusionProcessing
|
||||
* store patches for Lora in a specialized module instead of inside torch
|
||||
* support http/https URLs in API ([#12663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12663), [#12698](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12698))
|
||||
* add extra noise callback ([#12616](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12616))
|
||||
* dump current stack traces when exiting with SIGINT
|
||||
* add type annotations for extra fields of shared.sd_model
|
||||
|
||||
### Bug Fixes:
|
||||
* Don't crash if out of local storage quota for javascriot localStorage
|
||||
* XYZ plot do not fail if an exception occurs
|
||||
* fix missing TI hash in infotext if generation uses both negative and positive TI ([#12269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12269))
|
||||
* localization fixes ([#12307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12307))
|
||||
* fix sdxl model invalid configuration after the hijack
|
||||
* correctly toggle extras checkbox for infotext paste ([#12304](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12304))
|
||||
* open raw sysinfo link in new page ([#12318](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12318))
|
||||
* prompt parser: Account for empty field in alternating words syntax ([#12319](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12319))
|
||||
* add tab and carriage return to invalid filename chars ([#12327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12327))
|
||||
* fix api only Lora not working ([#12387](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12387))
|
||||
* fix options in main UI misbehaving when there's just one element
|
||||
* make it possible to use a sampler from infotext even if it's hidden in the dropdown
|
||||
* fix styles missing from the prompt in infotext when making a grid of batch of multiplie images
|
||||
* prevent bogus progress output in console when calculating hires fix dimensions
|
||||
* fix --use-textbox-seed
|
||||
* fix broken `Lora/Networks: use old method` option ([#12466](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12466))
|
||||
* properly return `None` for VAE hash when using `--no-hashing` ([#12463](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12463))
|
||||
* MPS/macOS fixes and optimizations ([#12526](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12526))
|
||||
* add second_order to samplers that mistakenly didn't have it
|
||||
* when refreshing cards in extra networks UI, do not discard user's custom resolution
|
||||
* fix processing error that happens if batch_size is not a multiple of how many prompts/negative prompts there are ([#12509](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12509))
|
||||
* fix inpaint upload for alpha masks ([#12588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12588))
|
||||
* fix exception when image sizes are not integers ([#12586](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12586))
|
||||
* fix incorrect TAESD Latent scale ([#12596](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12596))
|
||||
* auto add data-dir to gradio-allowed-path ([#12603](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12603))
|
||||
* fix exception if extensuions dir is missing ([#12607](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12607))
|
||||
* fix issues with api model-refresh and vae-refresh ([#12638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12638))
|
||||
* fix img2img background color for transparent images option not being used ([#12633](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12633))
|
||||
* attempt to resolve NaN issue with unstable VAEs in fp32 mk2 ([#12630](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12630))
|
||||
* implement missing undo hijack for SDXL
|
||||
* fix xyz swap axes ([#12684](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12684))
|
||||
* fix errors in backup/restore tab if any of config files are broken ([#12689](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12689))
|
||||
* fix SD VAE switch error after model reuse ([#12685](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12685))
|
||||
* fix trying to create images too large for the chosen format ([#12667](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12667))
|
||||
* create Gradio temp directory if necessary ([#12717](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12717))
|
||||
* prevent possible cache loss if exiting as it's being written by using an atomic operation to replace the cache with the new version
|
||||
* set devices.dtype_unet correctly
|
||||
* run RealESRGAN on GPU for non-CUDA devices ([#12737](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
|
||||
* prevent extra network buttons being obscured by description for very small card sizes ([#12745](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12745))
|
||||
* fix error that causes some extra networks to be disabled if both <lora:> and <lyco:> are present in the prompt
|
||||
* fix defaults settings page breaking when any of main UI tabs are hidden
|
||||
* fix incorrect save/display of new values in Defaults page in settings
|
||||
* fix for Reload UI function: if you reload UI on one tab, other opened tabs will no longer stop working
|
||||
* fix an error that prevents VAE being reloaded after an option change if a VAE near the checkpoint exists ([#12797](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
|
||||
* hide broken image crop tool ([#12792](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
|
||||
* don't show hidden samplers in dropdown for XYZ script ([#12780](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12737))
|
||||
* fix style editing dialog breaking if it's opened in both img2img and txt2img tabs
|
||||
* fix a bug allowing users to bypass gradio and API authentication (reported by vysecurity)
|
||||
* fix notification not playing when built-in webui tab is inactive ([#12834](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12834))
|
||||
* honor `--skip-install` for extension installers ([#12832](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832))
|
||||
* don't print blank stdout in extension installers ([#12833](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12832), [#12855](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12855))
|
||||
* do not change quicksettings dropdown option when value returned is `None` ([#12854](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12854))
|
||||
* get progressbar to display correctly in extensions tab
|
||||
|
||||
|
||||
## 1.5.2
|
||||
|
||||
### Bug Fixes:
|
||||
* fix memory leak when generation fails
|
||||
* update doggettx cross attention optimization to not use an unreasonable amount of memory in some edge cases -- suggestion by MorkTheOrk
|
||||
|
||||
|
||||
## 1.5.1
|
||||
|
||||
### Minor:
|
||||
* support parsing text encoder blocks in some new LoRAs
|
||||
* delete scale checker script due to user demand
|
||||
|
||||
### Extensions and API:
|
||||
* add postprocess_batch_list script callback
|
||||
|
||||
### Bug Fixes:
|
||||
* fix TI training for SD1
|
||||
* fix reload altclip model error
|
||||
* prepend the pythonpath instead of overriding it
|
||||
* fix typo in SD_WEBUI_RESTARTING
|
||||
* if txt2img/img2img raises an exception, finally call state.end()
|
||||
* fix composable diffusion weight parsing
|
||||
* restyle Startup profile for black users
|
||||
* fix webui not launching with --nowebui
|
||||
* catch exception for non git extensions
|
||||
* fix some options missing from /sdapi/v1/options
|
||||
* fix for extension update status always saying "unknown"
|
||||
* fix display of extra network cards that have `<>` in the name
|
||||
* update lora extension to work with python 3.8
|
||||
|
||||
|
||||
## 1.5.0
|
||||
|
||||
### Features:
|
||||
* SD XL support
|
||||
* user metadata system for custom networks
|
||||
* extended Lora metadata editor: set activation text, default weight, view tags, training info
|
||||
* Lora extension rework to include other types of networks (all that were previously handled by LyCORIS extension)
|
||||
* show github stars for extensions
|
||||
* img2img batch mode can read extra stuff from png info
|
||||
* img2img batch works with subdirectories
|
||||
* hotkeys to move prompt elements: alt+left/right
|
||||
* restyle time taken/VRAM display
|
||||
* add textual inversion hashes to infotext
|
||||
* optimization: cache git extension repo information
|
||||
* move generate button next to the generated picture for mobile clients
|
||||
* hide cards for networks of incompatible Stable Diffusion version in Lora extra networks interface
|
||||
* skip installing packages with pip if they all are already installed - startup speedup of about 2 seconds
|
||||
|
||||
### Minor:
|
||||
* checkbox to check/uncheck all extensions in the Installed tab
|
||||
* add gradio user to infotext and to filename patterns
|
||||
* allow gif for extra network previews
|
||||
* add options to change colors in grid
|
||||
* use natural sort for items in extra networks
|
||||
* Mac: use empty_cache() from torch 2 to clear VRAM
|
||||
* added automatic support for installing the right libraries for Navi3 (AMD)
|
||||
* add option SWIN_torch_compile to accelerate SwinIR upscale
|
||||
* suppress printing TI embedding info at start to console by default
|
||||
* speedup extra networks listing
|
||||
* added `[none]` filename token.
|
||||
* removed thumbs extra networks view mode (use settings tab to change width/height/scale to get thumbs)
|
||||
* add always_discard_next_to_last_sigma option to XYZ plot
|
||||
* automatically switch to 32-bit float VAE if the generated picture has NaNs without the need for `--no-half-vae` commandline flag.
|
||||
|
||||
### Extensions and API:
|
||||
* api endpoints: /sdapi/v1/server-kill, /sdapi/v1/server-restart, /sdapi/v1/server-stop
|
||||
* allow Script to have custom metaclass
|
||||
* add model exists status check /sdapi/v1/options
|
||||
* rename --add-stop-route to --api-server-stop
|
||||
* add `before_hr` script callback
|
||||
* add callback `after_extra_networks_activate`
|
||||
* disable rich exception output in console for API by default, use WEBUI_RICH_EXCEPTIONS env var to enable
|
||||
* return http 404 when thumb file not found
|
||||
* allow replacing extensions index with environment variable
|
||||
|
||||
### Bug Fixes:
|
||||
* fix for catch errors when retrieving extension index #11290
|
||||
* fix very slow loading speed of .safetensors files when reading from network drives
|
||||
* API cache cleanup
|
||||
* fix UnicodeEncodeError when writing to file CLIP Interrogator batch mode
|
||||
* fix warning of 'has_mps' deprecated from PyTorch
|
||||
* fix problem with extra network saving images as previews losing generation info
|
||||
* fix throwing exception when trying to resize image with I;16 mode
|
||||
* fix for #11534: canvas zoom and pan extension hijacking shortcut keys
|
||||
* fixed launch script to be runnable from any directory
|
||||
* don't add "Seed Resize: -1x-1" to API image metadata
|
||||
* correctly remove end parenthesis with ctrl+up/down
|
||||
* fixing --subpath on newer gradio version
|
||||
* fix: check fill size none zero when resize (fixes #11425)
|
||||
* use submit and blur for quick settings textbox
|
||||
* save img2img batch with images.save_image()
|
||||
* prevent running preload.py for disabled extensions
|
||||
* fix: previously, model name was added together with directory name to infotext and to [model_name] filename pattern; directory name is now not included
|
||||
|
||||
|
||||
## 1.4.1
|
||||
|
||||
### Bug Fixes:
|
||||
* add queue lock for refresh-checkpoints
|
||||
|
||||
## 1.4.0
|
||||
|
||||
### Features:
|
||||
* zoom controls for inpainting
|
||||
* run basic torch calculation at startup in parallel to reduce the performance impact of first generation
|
||||
* option to pad prompt/neg prompt to be same length
|
||||
* remove taming_transformers dependency
|
||||
* custom k-diffusion scheduler settings
|
||||
* add an option to show selected settings in main txt2img/img2img UI
|
||||
* sysinfo tab in settings
|
||||
* infer styles from prompts when pasting params into the UI
|
||||
* an option to control the behavior of the above
|
||||
|
||||
### Minor:
|
||||
* bump Gradio to 3.32.0
|
||||
* bump xformers to 0.0.20
|
||||
* Add option to disable token counters
|
||||
* tooltip fixes & optimizations
|
||||
* make it possible to configure filename for the zip download
|
||||
* `[vae_filename]` pattern for filenames
|
||||
* Revert discarding penultimate sigma for DPM-Solver++(2M) SDE
|
||||
* change UI reorder setting to multiselect
|
||||
* read version info form CHANGELOG.md if git version info is not available
|
||||
* link footer API to Wiki when API is not active
|
||||
* persistent conds cache (opt-in optimization)
|
||||
|
||||
### Extensions:
|
||||
* After installing extensions, webui properly restarts the process rather than reloads the UI
|
||||
* Added VAE listing to web API. Via: /sdapi/v1/sd-vae
|
||||
* custom unet support
|
||||
* Add onAfterUiUpdate callback
|
||||
* refactor EmbeddingDatabase.register_embedding() to allow unregistering
|
||||
* add before_process callback for scripts
|
||||
* add ability for alwayson scripts to specify section and let user reorder those sections
|
||||
|
||||
### Bug Fixes:
|
||||
* Fix dragging text to prompt
|
||||
* fix incorrect quoting for infotext values with colon in them
|
||||
* fix "hires. fix" prompt sharing same labels with txt2img_prompt
|
||||
* Fix s_min_uncond default type int
|
||||
* Fix for #10643 (Inpainting mask sometimes not working)
|
||||
* fix bad styling for thumbs view in extra networks #10639
|
||||
* fix for empty list of optimizations #10605
|
||||
* small fixes to prepare_tcmalloc for Debian/Ubuntu compatibility
|
||||
* fix --ui-debug-mode exit
|
||||
* patch GitPython to not use leaky persistent processes
|
||||
* fix duplicate Cross attention optimization after UI reload
|
||||
* torch.cuda.is_available() check for SdOptimizationXformers
|
||||
* fix hires fix using wrong conds in second pass if using Loras.
|
||||
* handle exception when parsing generation parameters from png info
|
||||
* fix upcast attention dtype error
|
||||
* forcing Torch Version to 1.13.1 for RX 5000 series GPUs
|
||||
* split mask blur into X and Y components, patch Outpainting MK2 accordingly
|
||||
* don't die when a LoRA is a broken symlink
|
||||
* allow activation of Generate Forever during generation
|
||||
|
||||
|
||||
## 1.3.2
|
||||
|
||||
### Bug Fixes:
|
||||
@ -982,7 +97,7 @@
|
||||
* do not wait for Stable Diffusion model to load at startup
|
||||
* add filename patterns: `[denoising]`
|
||||
* directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for
|
||||
* LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metadata of the file, if present, instead of filename (both can be used to activate LoRA)
|
||||
* LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metdata of the file, if present, instead of filename (both can be used to activate LoRA)
|
||||
* LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
|
||||
* LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer)
|
||||
* LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss)
|
||||
@ -1012,7 +127,7 @@
|
||||
* fix gamepad navigation
|
||||
* make the lightbox fullscreen image function properly
|
||||
* fix squished thumbnails in extras tab
|
||||
* keep "search" filter for extra networks when user refreshes the tab (previously it showed everything after you refreshed)
|
||||
* keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed)
|
||||
* fix webui showing the same image if you configure the generation to always save results into same file
|
||||
* fix bug with upscalers not working properly
|
||||
* fix MPS on PyTorch 2.0.1, Intel Macs
|
||||
@ -1030,7 +145,7 @@
|
||||
* switch to PyTorch 2.0.0 (except for AMD GPUs)
|
||||
* visual improvements to custom code scripts
|
||||
* add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]`
|
||||
* add support for saving init images in img2img, and record their hashes in infotext for reproducibility
|
||||
* add support for saving init images in img2img, and record their hashes in infotext for reproducability
|
||||
* automatically select current word when adjusting weight with ctrl+up/down
|
||||
* add dropdowns for X/Y/Z plot
|
||||
* add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs
|
||||
|
@ -1,7 +0,0 @@
|
||||
cff-version: 1.2.0
|
||||
message: "If you use this software, please cite it as below."
|
||||
authors:
|
||||
- given-names: AUTOMATIC1111
|
||||
title: "Stable Diffusion Web UI"
|
||||
date-released: 2022-08-22
|
||||
url: "https://github.com/AUTOMATIC1111/stable-diffusion-webui"
|
64
README.md
64
README.md
@ -1,5 +1,5 @@
|
||||
# Stable Diffusion web UI
|
||||
A web interface for Stable Diffusion, implemented using Gradio library.
|
||||
A browser interface based on Gradio library for Stable Diffusion.
|
||||
|
||||

|
||||
|
||||
@ -78,7 +78,7 @@ A web interface for Stable Diffusion, implemented using Gradio library.
|
||||
- Clip skip
|
||||
- Hypernetworks
|
||||
- Loras (same as Hypernetworks but more pretty)
|
||||
- A separate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
|
||||
- A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
|
||||
- Can select to load a different VAE from settings screen
|
||||
- Estimated completion time in progress bar
|
||||
- API
|
||||
@ -88,24 +88,19 @@ A web interface for Stable Diffusion, implemented using Gradio library.
|
||||
- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions
|
||||
- Now without any bad letters!
|
||||
- Load checkpoints in safetensors format
|
||||
- Eased resolution restriction: generated image's dimensions must be a multiple of 8 rather than 64
|
||||
- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64
|
||||
- Now with a license!
|
||||
- Reorder elements in the UI from settings screen
|
||||
- [Segmind Stable Diffusion](https://huggingface.co/segmind/SSD-1B) support
|
||||
|
||||
## Installation and Running
|
||||
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for:
|
||||
- [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended)
|
||||
- [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
|
||||
- [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page)
|
||||
- [Ascend NPUs](https://github.com/wangshuai09/stable-diffusion-webui/wiki/Install-and-run-on-Ascend-NPUs) (external wiki page)
|
||||
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
|
||||
|
||||
Alternatively, use online services (like Google Colab):
|
||||
|
||||
- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services)
|
||||
|
||||
### Installation on Windows 10/11 with NVidia-GPUs using release package
|
||||
1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents.
|
||||
1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract it's contents.
|
||||
2. Run `update.bat`.
|
||||
3. Run `run.bat`.
|
||||
> For more details see [Install-and-Run-on-NVidia-GPUs](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs)
|
||||
@ -120,40 +115,16 @@ Alternatively, use online services (like Google Colab):
|
||||
1. Install the dependencies:
|
||||
```bash
|
||||
# Debian-based:
|
||||
sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
|
||||
sudo apt install wget git python3 python3-venv
|
||||
# Red Hat-based:
|
||||
sudo dnf install wget git python3 gperftools-libs libglvnd-glx
|
||||
# openSUSE-based:
|
||||
sudo zypper install wget git python3 libtcmalloc4 libglvnd
|
||||
sudo dnf install wget git python3
|
||||
# Arch-based:
|
||||
sudo pacman -S wget git python3
|
||||
```
|
||||
If your system is very new, you need to install python3.11 or python3.10:
|
||||
```bash
|
||||
# Ubuntu 24.04
|
||||
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||
sudo apt update
|
||||
sudo apt install python3.11
|
||||
|
||||
# Manjaro/Arch
|
||||
sudo pacman -S yay
|
||||
yay -S python311 # do not confuse with python3.11 package
|
||||
|
||||
# Only for 3.11
|
||||
# Then set up env variable in launch script
|
||||
export python_cmd="python3.11"
|
||||
# or in webui-user.sh
|
||||
python_cmd="python3.11"
|
||||
```
|
||||
2. Navigate to the directory you would like the webui to be installed and execute the following command:
|
||||
```bash
|
||||
wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh
|
||||
bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh)
|
||||
```
|
||||
Or just clone the repo wherever you want:
|
||||
```bash
|
||||
git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
||||
```
|
||||
|
||||
3. Run `webui.sh`.
|
||||
4. Check `webui-user.sh` for options.
|
||||
### Installation on Apple Silicon
|
||||
@ -164,22 +135,18 @@ Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-w
|
||||
Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki).
|
||||
|
||||
For the purposes of getting Google and other search engines to crawl the wiki, here's a link to the (not for humans) [crawlable wiki](https://github-wiki-see.page/m/AUTOMATIC1111/stable-diffusion-webui/wiki).
|
||||
|
||||
## Credits
|
||||
Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.
|
||||
|
||||
- Stable Diffusion - https://github.com/Stability-AI/stablediffusion, https://github.com/CompVis/taming-transformers, https://github.com/mcmonkey4eva/sd3-ref
|
||||
- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers
|
||||
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
|
||||
- Spandrel - https://github.com/chaiNNer-org/spandrel implementing
|
||||
- GFPGAN - https://github.com/TencentARC/GFPGAN.git
|
||||
- CodeFormer - https://github.com/sczhou/CodeFormer
|
||||
- ESRGAN - https://github.com/xinntao/ESRGAN
|
||||
- SwinIR - https://github.com/JingyunLiang/SwinIR
|
||||
- Swin2SR - https://github.com/mv-lab/swin2sr
|
||||
- GFPGAN - https://github.com/TencentARC/GFPGAN.git
|
||||
- CodeFormer - https://github.com/sczhou/CodeFormer
|
||||
- ESRGAN - https://github.com/xinntao/ESRGAN
|
||||
- SwinIR - https://github.com/JingyunLiang/SwinIR
|
||||
- Swin2SR - https://github.com/mv-lab/swin2sr
|
||||
- LDSR - https://github.com/Hafiidz/latent-diffusion
|
||||
- MiDaS - https://github.com/isl-org/MiDaS
|
||||
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
|
||||
@ -198,8 +165,5 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al
|
||||
- Security advice - RyotaK
|
||||
- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC
|
||||
- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd
|
||||
- LyCORIS - KohakuBlueleaf
|
||||
- Restart sampling - lambertae - https://github.com/Newbeeer/diffusion_restart_sampling
|
||||
- Hypertile - tfernd - https://github.com/tfernd/HyperTile
|
||||
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
|
||||
- (You)
|
||||
|
@ -1,5 +0,0 @@
|
||||
[default.extend-words]
|
||||
# Part of "RGBa" (Pillow's pre-multiplied alpha RGB mode)
|
||||
Ba = "Ba"
|
||||
# HSA is something AMD uses for their GPUs
|
||||
HSA = "HSA"
|
@ -40,7 +40,7 @@ model:
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: False
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
|
@ -1,73 +0,0 @@
|
||||
model:
|
||||
base_learning_rate: 1.0e-04
|
||||
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||
params:
|
||||
linear_start: 0.00085
|
||||
linear_end: 0.0120
|
||||
num_timesteps_cond: 1
|
||||
log_every_t: 200
|
||||
timesteps: 1000
|
||||
first_stage_key: "jpg"
|
||||
cond_stage_key: "txt"
|
||||
image_size: 64
|
||||
channels: 4
|
||||
cond_stage_trainable: false # Note: different from the one we trained before
|
||||
conditioning_key: crossattn
|
||||
monitor: val/loss_simple_ema
|
||||
scale_factor: 0.18215
|
||||
use_ema: False
|
||||
|
||||
scheduler_config: # 10000 warmup steps
|
||||
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||
params:
|
||||
warm_up_steps: [ 10000 ]
|
||||
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
||||
f_start: [ 1.e-6 ]
|
||||
f_max: [ 1. ]
|
||||
f_min: [ 1. ]
|
||||
|
||||
unet_config:
|
||||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
image_size: 32 # unused
|
||||
in_channels: 4
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [ 4, 2, 1 ]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [ 1, 2, 4, 4 ]
|
||||
num_head_channels: 64
|
||||
use_spatial_transformer: True
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 1024
|
||||
use_checkpoint: False
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
target: ldm.models.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult:
|
||||
- 1
|
||||
- 2
|
||||
- 4
|
||||
- 4
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
cond_stage_config:
|
||||
target: modules.xlmr_m18.BertSeriesModelWithTransformation
|
||||
params:
|
||||
name: "XLMR-Large"
|
@ -45,7 +45,7 @@ model:
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: False
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
|
@ -1,5 +0,0 @@
|
||||
model:
|
||||
target: modules.models.sd3.sd3_model.SD3Inferencer
|
||||
params:
|
||||
shift: 3
|
||||
state_dict: null
|
@ -1,98 +0,0 @@
|
||||
model:
|
||||
target: sgm.models.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.13025
|
||||
disable_first_stage_autocast: True
|
||||
|
||||
denoiser_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
||||
params:
|
||||
num_idx: 1000
|
||||
|
||||
weighting_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
||||
scaling_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
||||
discretization_config:
|
||||
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
||||
|
||||
network_config:
|
||||
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
adm_in_channels: 2816
|
||||
num_classes: sequential
|
||||
use_checkpoint: False
|
||||
in_channels: 9
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [4, 2]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4]
|
||||
num_head_channels: 64
|
||||
use_spatial_transformer: True
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
|
||||
context_dim: 2048
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
legacy: False
|
||||
|
||||
conditioner_config:
|
||||
target: sgm.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
# crossattn cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
params:
|
||||
layer: hidden
|
||||
layer_idx: 11
|
||||
# crossattn and vector cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
|
||||
params:
|
||||
arch: ViT-bigG-14
|
||||
version: laion2b_s39b_b160k
|
||||
freeze: True
|
||||
layer: penultimate
|
||||
always_return_pooled: True
|
||||
legacy: False
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: original_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: crop_coords_top_left
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: target_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
|
||||
first_stage_config:
|
||||
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
@ -40,7 +40,7 @@ model:
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: False
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
|
@ -40,7 +40,7 @@ model:
|
||||
use_spatial_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 768
|
||||
use_checkpoint: False
|
||||
use_checkpoint: True
|
||||
legacy: False
|
||||
|
||||
first_stage_config:
|
||||
|
@ -12,7 +12,7 @@ import safetensors.torch
|
||||
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.util import instantiate_from_config, ismap
|
||||
from modules import shared, sd_hijack, devices
|
||||
from modules import shared, sd_hijack
|
||||
|
||||
cached_ldsr_model: torch.nn.Module = None
|
||||
|
||||
@ -112,7 +112,8 @@ class LDSR:
|
||||
|
||||
|
||||
gc.collect()
|
||||
devices.torch_gc()
|
||||
if torch.cuda.is_available:
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
im_og = image
|
||||
width_og, height_og = im_og.size
|
||||
@ -149,7 +150,8 @@ class LDSR:
|
||||
|
||||
del model
|
||||
gc.collect()
|
||||
devices.torch_gc()
|
||||
if torch.cuda.is_available:
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
return a
|
||||
|
||||
|
@ -1,9 +1,12 @@
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
|
||||
from modules.modelloader import load_file_from_url
|
||||
from modules.upscaler import Upscaler, UpscalerData
|
||||
from ldsr_model_arch import LDSR
|
||||
from modules import shared, script_callbacks, errors
|
||||
from modules import shared, script_callbacks
|
||||
import sd_hijack_autoencoder # noqa: F401
|
||||
import sd_hijack_ddpm_v1 # noqa: F401
|
||||
|
||||
@ -42,17 +45,22 @@ class UpscalerLDSR(Upscaler):
|
||||
if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
|
||||
model = local_safetensors_path
|
||||
else:
|
||||
model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt")
|
||||
model = local_ckpt_path if local_ckpt_path is not None else load_file_from_url(url=self.model_url, model_dir=self.model_download_path, file_name="model.ckpt", progress=True)
|
||||
|
||||
yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml")
|
||||
yaml = local_yaml_path if local_yaml_path is not None else load_file_from_url(url=self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml", progress=True)
|
||||
|
||||
return LDSR(model, yaml)
|
||||
try:
|
||||
return LDSR(model, yaml)
|
||||
|
||||
except Exception:
|
||||
print("Error importing LDSR:", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return None
|
||||
|
||||
def do_upscale(self, img, path):
|
||||
try:
|
||||
ldsr = self.load_model(path)
|
||||
except Exception:
|
||||
errors.report(f"Failed loading LDSR model {path}", exc_info=True)
|
||||
ldsr = self.load_model(path)
|
||||
if ldsr is None:
|
||||
print("NO LDSR!")
|
||||
return img
|
||||
ddim_steps = shared.opts.ldsr_steps
|
||||
return ldsr.super_resolution(img, ddim_steps, self.scale)
|
||||
|
@ -10,7 +10,7 @@ from contextlib import contextmanager
|
||||
from torch.optim.lr_scheduler import LambdaLR
|
||||
|
||||
from ldm.modules.ema import LitEma
|
||||
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
|
||||
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
||||
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
||||
from ldm.util import instantiate_from_config
|
||||
|
||||
@ -91,9 +91,8 @@ class VQModel(pl.LightningModule):
|
||||
del sd[k]
|
||||
missing, unexpected = self.load_state_dict(sd, strict=False)
|
||||
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
||||
if missing:
|
||||
if len(missing) > 0:
|
||||
print(f"Missing Keys: {missing}")
|
||||
if unexpected:
|
||||
print(f"Unexpected Keys: {unexpected}")
|
||||
|
||||
def on_train_batch_end(self, *args, **kwargs):
|
||||
|
@ -195,9 +195,9 @@ class DDPMV1(pl.LightningModule):
|
||||
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
||||
sd, strict=False)
|
||||
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
||||
if missing:
|
||||
if len(missing) > 0:
|
||||
print(f"Missing Keys: {missing}")
|
||||
if unexpected:
|
||||
if len(unexpected) > 0:
|
||||
print(f"Unexpected Keys: {unexpected}")
|
||||
|
||||
def q_mean_variance(self, x_start, t):
|
||||
@ -301,7 +301,7 @@ class DDPMV1(pl.LightningModule):
|
||||
elif self.parameterization == "x0":
|
||||
target = x_start
|
||||
else:
|
||||
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
|
||||
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
|
||||
|
||||
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
||||
|
||||
@ -572,7 +572,7 @@ class LatentDiffusionV1(DDPMV1):
|
||||
:param h: height
|
||||
:param w: width
|
||||
:return: normalized distance to image border,
|
||||
with min distance = 0 at border and max dist = 0.5 at image center
|
||||
wtith min distance = 0 at border and max dist = 0.5 at image center
|
||||
"""
|
||||
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
||||
arr = self.meshgrid(h, w) / lower_right_corner
|
||||
@ -880,7 +880,7 @@ class LatentDiffusionV1(DDPMV1):
|
||||
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
||||
|
||||
if isinstance(cond, dict):
|
||||
# hybrid case, cond is expected to be a dict
|
||||
# hybrid case, cond is exptected to be a dict
|
||||
pass
|
||||
else:
|
||||
if not isinstance(cond, list):
|
||||
@ -916,7 +916,7 @@ class LatentDiffusionV1(DDPMV1):
|
||||
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
|
||||
|
||||
elif self.cond_stage_key == 'coordinates_bbox':
|
||||
assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size'
|
||||
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
|
||||
|
||||
# assuming padding of unfold is always 0 and its dilation is always 1
|
||||
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
|
||||
@ -926,7 +926,7 @@ class LatentDiffusionV1(DDPMV1):
|
||||
num_downs = self.first_stage_model.encoder.num_resolutions - 1
|
||||
rescale_latent = 2 ** (num_downs)
|
||||
|
||||
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we
|
||||
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
|
||||
# need to rescale the tl patch coordinates to be in between (0,1)
|
||||
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
|
||||
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
|
||||
|
@ -1,147 +0,0 @@
|
||||
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
|
||||
# where the license is as follows:
|
||||
#
|
||||
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
||||
# OR OTHER DEALINGS IN THE SOFTWARE./
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from einops import rearrange
|
||||
|
||||
|
||||
class VectorQuantizer2(nn.Module):
|
||||
"""
|
||||
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
|
||||
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
|
||||
"""
|
||||
|
||||
# NOTE: due to a bug the beta term was applied to the wrong term. for
|
||||
# backwards compatibility we use the buggy version by default, but you can
|
||||
# specify legacy=False to fix it.
|
||||
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
|
||||
sane_index_shape=False, legacy=True):
|
||||
super().__init__()
|
||||
self.n_e = n_e
|
||||
self.e_dim = e_dim
|
||||
self.beta = beta
|
||||
self.legacy = legacy
|
||||
|
||||
self.embedding = nn.Embedding(self.n_e, self.e_dim)
|
||||
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
|
||||
|
||||
self.remap = remap
|
||||
if self.remap is not None:
|
||||
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
||||
self.re_embed = self.used.shape[0]
|
||||
self.unknown_index = unknown_index # "random" or "extra" or integer
|
||||
if self.unknown_index == "extra":
|
||||
self.unknown_index = self.re_embed
|
||||
self.re_embed = self.re_embed + 1
|
||||
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
|
||||
f"Using {self.unknown_index} for unknown indices.")
|
||||
else:
|
||||
self.re_embed = n_e
|
||||
|
||||
self.sane_index_shape = sane_index_shape
|
||||
|
||||
def remap_to_used(self, inds):
|
||||
ishape = inds.shape
|
||||
assert len(ishape) > 1
|
||||
inds = inds.reshape(ishape[0], -1)
|
||||
used = self.used.to(inds)
|
||||
match = (inds[:, :, None] == used[None, None, ...]).long()
|
||||
new = match.argmax(-1)
|
||||
unknown = match.sum(2) < 1
|
||||
if self.unknown_index == "random":
|
||||
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
|
||||
else:
|
||||
new[unknown] = self.unknown_index
|
||||
return new.reshape(ishape)
|
||||
|
||||
def unmap_to_all(self, inds):
|
||||
ishape = inds.shape
|
||||
assert len(ishape) > 1
|
||||
inds = inds.reshape(ishape[0], -1)
|
||||
used = self.used.to(inds)
|
||||
if self.re_embed > self.used.shape[0]: # extra token
|
||||
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
|
||||
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
|
||||
return back.reshape(ishape)
|
||||
|
||||
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
|
||||
assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
|
||||
assert rescale_logits is False, "Only for interface compatible with Gumbel"
|
||||
assert return_logits is False, "Only for interface compatible with Gumbel"
|
||||
# reshape z -> (batch, height, width, channel) and flatten
|
||||
z = rearrange(z, 'b c h w -> b h w c').contiguous()
|
||||
z_flattened = z.view(-1, self.e_dim)
|
||||
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
||||
|
||||
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
|
||||
torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
|
||||
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
|
||||
|
||||
min_encoding_indices = torch.argmin(d, dim=1)
|
||||
z_q = self.embedding(min_encoding_indices).view(z.shape)
|
||||
perplexity = None
|
||||
min_encodings = None
|
||||
|
||||
# compute loss for embedding
|
||||
if not self.legacy:
|
||||
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
|
||||
torch.mean((z_q - z.detach()) ** 2)
|
||||
else:
|
||||
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
|
||||
torch.mean((z_q - z.detach()) ** 2)
|
||||
|
||||
# preserve gradients
|
||||
z_q = z + (z_q - z).detach()
|
||||
|
||||
# reshape back to match original input shape
|
||||
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
|
||||
|
||||
if self.remap is not None:
|
||||
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
|
||||
min_encoding_indices = self.remap_to_used(min_encoding_indices)
|
||||
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
|
||||
|
||||
if self.sane_index_shape:
|
||||
min_encoding_indices = min_encoding_indices.reshape(
|
||||
z_q.shape[0], z_q.shape[2], z_q.shape[3])
|
||||
|
||||
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
|
||||
|
||||
def get_codebook_entry(self, indices, shape):
|
||||
# shape specifying (batch, height, width, channel)
|
||||
if self.remap is not None:
|
||||
indices = indices.reshape(shape[0], -1) # add batch axis
|
||||
indices = self.unmap_to_all(indices)
|
||||
indices = indices.reshape(-1) # flatten again
|
||||
|
||||
# get quantized latent vectors
|
||||
z_q = self.embedding(indices)
|
||||
|
||||
if shape is not None:
|
||||
z_q = z_q.view(shape)
|
||||
# reshape back to match original input shape
|
||||
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
||||
|
||||
return z_q
|
@ -1,62 +1,45 @@
|
||||
from modules import extra_networks, shared
|
||||
import networks
|
||||
import lora
|
||||
|
||||
|
||||
class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
||||
def __init__(self):
|
||||
super().__init__('lora')
|
||||
|
||||
self.errors = {}
|
||||
"""mapping of network names to the number of errors the network had during operation"""
|
||||
|
||||
remove_symbols = str.maketrans('', '', ":,")
|
||||
|
||||
def activate(self, p, params_list):
|
||||
additional = shared.opts.sd_lora
|
||||
|
||||
self.errors.clear()
|
||||
|
||||
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
|
||||
if additional != "None" and additional in lora.available_loras and len([x for x in params_list if x.items[0] == additional]) == 0:
|
||||
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
|
||||
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
|
||||
|
||||
names = []
|
||||
te_multipliers = []
|
||||
unet_multipliers = []
|
||||
dyn_dims = []
|
||||
multipliers = []
|
||||
for params in params_list:
|
||||
assert params.items
|
||||
assert len(params.items) > 0
|
||||
|
||||
names.append(params.positional[0])
|
||||
names.append(params.items[0])
|
||||
multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)
|
||||
|
||||
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
|
||||
te_multiplier = float(params.named.get("te", te_multiplier))
|
||||
|
||||
unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier
|
||||
unet_multiplier = float(params.named.get("unet", unet_multiplier))
|
||||
|
||||
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
|
||||
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
|
||||
|
||||
te_multipliers.append(te_multiplier)
|
||||
unet_multipliers.append(unet_multiplier)
|
||||
dyn_dims.append(dyn_dim)
|
||||
|
||||
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
|
||||
lora.load_loras(names, multipliers)
|
||||
|
||||
if shared.opts.lora_add_hashes_to_infotext:
|
||||
if not getattr(p, "is_hr_pass", False) or not hasattr(p, "lora_hashes"):
|
||||
p.lora_hashes = {}
|
||||
lora_hashes = []
|
||||
for item in lora.loaded_loras:
|
||||
shorthash = item.lora_on_disk.shorthash
|
||||
if not shorthash:
|
||||
continue
|
||||
|
||||
for item in networks.loaded_networks:
|
||||
if item.network_on_disk.shorthash and item.mentioned_name:
|
||||
p.lora_hashes[item.mentioned_name.translate(self.remove_symbols)] = item.network_on_disk.shorthash
|
||||
alias = item.mentioned_name
|
||||
if not alias:
|
||||
continue
|
||||
|
||||
if p.lora_hashes:
|
||||
p.extra_generation_params["Lora hashes"] = ', '.join(f'{k}: {v}' for k, v in p.lora_hashes.items())
|
||||
alias = alias.replace(":", "").replace(",", "")
|
||||
|
||||
lora_hashes.append(f"{alias}: {shorthash}")
|
||||
|
||||
if lora_hashes:
|
||||
p.extra_generation_params["Lora hashes"] = ", ".join(lora_hashes)
|
||||
|
||||
def deactivate(self, p):
|
||||
if self.errors:
|
||||
p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
|
||||
|
||||
self.errors.clear()
|
||||
pass
|
||||
|
@ -1,9 +1,502 @@
|
||||
import networks
|
||||
import os
|
||||
import re
|
||||
import torch
|
||||
from typing import Union
|
||||
|
||||
list_available_loras = networks.list_available_networks
|
||||
from modules import shared, devices, sd_models, errors, scripts, sd_hijack, hashes
|
||||
|
||||
available_loras = networks.available_networks
|
||||
available_lora_aliases = networks.available_network_aliases
|
||||
available_lora_hash_lookup = networks.available_network_hash_lookup
|
||||
forbidden_lora_aliases = networks.forbidden_network_aliases
|
||||
loaded_loras = networks.loaded_networks
|
||||
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
|
||||
|
||||
re_digits = re.compile(r"\d+")
|
||||
re_x_proj = re.compile(r"(.*)_([qkv]_proj)$")
|
||||
re_compiled = {}
|
||||
|
||||
suffix_conversion = {
|
||||
"attentions": {},
|
||||
"resnets": {
|
||||
"conv1": "in_layers_2",
|
||||
"conv2": "out_layers_3",
|
||||
"time_emb_proj": "emb_layers_1",
|
||||
"conv_shortcut": "skip_connection",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def convert_diffusers_name_to_compvis(key, is_sd2):
|
||||
def match(match_list, regex_text):
|
||||
regex = re_compiled.get(regex_text)
|
||||
if regex is None:
|
||||
regex = re.compile(regex_text)
|
||||
re_compiled[regex_text] = regex
|
||||
|
||||
r = re.match(regex, key)
|
||||
if not r:
|
||||
return False
|
||||
|
||||
match_list.clear()
|
||||
match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])
|
||||
return True
|
||||
|
||||
m = []
|
||||
|
||||
if match(m, r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
|
||||
suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
|
||||
return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
|
||||
|
||||
if match(m, r"lora_unet_mid_block_(attentions|resnets)_(\d+)_(.+)"):
|
||||
suffix = suffix_conversion.get(m[0], {}).get(m[2], m[2])
|
||||
return f"diffusion_model_middle_block_{1 if m[0] == 'attentions' else m[1] * 2}_{suffix}"
|
||||
|
||||
if match(m, r"lora_unet_up_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
|
||||
suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
|
||||
return f"diffusion_model_output_blocks_{m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
|
||||
|
||||
if match(m, r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv"):
|
||||
return f"diffusion_model_input_blocks_{3 + m[0] * 3}_0_op"
|
||||
|
||||
if match(m, r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv"):
|
||||
return f"diffusion_model_output_blocks_{2 + m[0] * 3}_{2 if m[0]>0 else 1}_conv"
|
||||
|
||||
if match(m, r"lora_te_text_model_encoder_layers_(\d+)_(.+)"):
|
||||
if is_sd2:
|
||||
if 'mlp_fc1' in m[1]:
|
||||
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
|
||||
elif 'mlp_fc2' in m[1]:
|
||||
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
|
||||
else:
|
||||
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
|
||||
|
||||
return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
|
||||
|
||||
return key
|
||||
|
||||
|
||||
class LoraOnDisk:
|
||||
def __init__(self, name, filename):
|
||||
self.name = name
|
||||
self.filename = filename
|
||||
self.metadata = {}
|
||||
self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
|
||||
|
||||
if self.is_safetensors:
|
||||
try:
|
||||
self.metadata = sd_models.read_metadata_from_safetensors(filename)
|
||||
except Exception as e:
|
||||
errors.display(e, f"reading lora {filename}")
|
||||
|
||||
if self.metadata:
|
||||
m = {}
|
||||
for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
|
||||
m[k] = v
|
||||
|
||||
self.metadata = m
|
||||
|
||||
self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text
|
||||
self.alias = self.metadata.get('ss_output_name', self.name)
|
||||
|
||||
self.hash = None
|
||||
self.shorthash = None
|
||||
self.set_hash(
|
||||
self.metadata.get('sshs_model_hash') or
|
||||
hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or
|
||||
''
|
||||
)
|
||||
|
||||
def set_hash(self, v):
|
||||
self.hash = v
|
||||
self.shorthash = self.hash[0:12]
|
||||
|
||||
if self.shorthash:
|
||||
available_lora_hash_lookup[self.shorthash] = self
|
||||
|
||||
def read_hash(self):
|
||||
if not self.hash:
|
||||
self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '')
|
||||
|
||||
def get_alias(self):
|
||||
if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in forbidden_lora_aliases:
|
||||
return self.name
|
||||
else:
|
||||
return self.alias
|
||||
|
||||
|
||||
class LoraModule:
|
||||
def __init__(self, name, lora_on_disk: LoraOnDisk):
|
||||
self.name = name
|
||||
self.lora_on_disk = lora_on_disk
|
||||
self.multiplier = 1.0
|
||||
self.modules = {}
|
||||
self.mtime = None
|
||||
|
||||
self.mentioned_name = None
|
||||
"""the text that was used to add lora to prompt - can be either name or an alias"""
|
||||
|
||||
|
||||
class LoraUpDownModule:
|
||||
def __init__(self):
|
||||
self.up = None
|
||||
self.down = None
|
||||
self.alpha = None
|
||||
|
||||
|
||||
def assign_lora_names_to_compvis_modules(sd_model):
|
||||
lora_layer_mapping = {}
|
||||
|
||||
for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules():
|
||||
lora_name = name.replace(".", "_")
|
||||
lora_layer_mapping[lora_name] = module
|
||||
module.lora_layer_name = lora_name
|
||||
|
||||
for name, module in shared.sd_model.model.named_modules():
|
||||
lora_name = name.replace(".", "_")
|
||||
lora_layer_mapping[lora_name] = module
|
||||
module.lora_layer_name = lora_name
|
||||
|
||||
sd_model.lora_layer_mapping = lora_layer_mapping
|
||||
|
||||
|
||||
def load_lora(name, lora_on_disk):
|
||||
lora = LoraModule(name, lora_on_disk)
|
||||
lora.mtime = os.path.getmtime(lora_on_disk.filename)
|
||||
|
||||
sd = sd_models.read_state_dict(lora_on_disk.filename)
|
||||
|
||||
# this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0
|
||||
if not hasattr(shared.sd_model, 'lora_layer_mapping'):
|
||||
assign_lora_names_to_compvis_modules(shared.sd_model)
|
||||
|
||||
keys_failed_to_match = {}
|
||||
is_sd2 = 'model_transformer_resblocks' in shared.sd_model.lora_layer_mapping
|
||||
|
||||
for key_diffusers, weight in sd.items():
|
||||
key_diffusers_without_lora_parts, lora_key = key_diffusers.split(".", 1)
|
||||
key = convert_diffusers_name_to_compvis(key_diffusers_without_lora_parts, is_sd2)
|
||||
|
||||
sd_module = shared.sd_model.lora_layer_mapping.get(key, None)
|
||||
|
||||
if sd_module is None:
|
||||
m = re_x_proj.match(key)
|
||||
if m:
|
||||
sd_module = shared.sd_model.lora_layer_mapping.get(m.group(1), None)
|
||||
|
||||
if sd_module is None:
|
||||
keys_failed_to_match[key_diffusers] = key
|
||||
continue
|
||||
|
||||
lora_module = lora.modules.get(key, None)
|
||||
if lora_module is None:
|
||||
lora_module = LoraUpDownModule()
|
||||
lora.modules[key] = lora_module
|
||||
|
||||
if lora_key == "alpha":
|
||||
lora_module.alpha = weight.item()
|
||||
continue
|
||||
|
||||
if type(sd_module) == torch.nn.Linear:
|
||||
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
|
||||
elif type(sd_module) == torch.nn.modules.linear.NonDynamicallyQuantizableLinear:
|
||||
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
|
||||
elif type(sd_module) == torch.nn.MultiheadAttention:
|
||||
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
|
||||
elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (1, 1):
|
||||
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
|
||||
elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (3, 3):
|
||||
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (3, 3), bias=False)
|
||||
else:
|
||||
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
|
||||
continue
|
||||
raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}")
|
||||
|
||||
with torch.no_grad():
|
||||
module.weight.copy_(weight)
|
||||
|
||||
module.to(device=devices.cpu, dtype=devices.dtype)
|
||||
|
||||
if lora_key == "lora_up.weight":
|
||||
lora_module.up = module
|
||||
elif lora_key == "lora_down.weight":
|
||||
lora_module.down = module
|
||||
else:
|
||||
raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")
|
||||
|
||||
if len(keys_failed_to_match) > 0:
|
||||
print(f"Failed to match keys when loading Lora {lora_on_disk.filename}: {keys_failed_to_match}")
|
||||
|
||||
return lora
|
||||
|
||||
|
||||
def load_loras(names, multipliers=None):
|
||||
already_loaded = {}
|
||||
|
||||
for lora in loaded_loras:
|
||||
if lora.name in names:
|
||||
already_loaded[lora.name] = lora
|
||||
|
||||
loaded_loras.clear()
|
||||
|
||||
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
|
||||
if any(x is None for x in loras_on_disk):
|
||||
list_available_loras()
|
||||
|
||||
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
|
||||
|
||||
failed_to_load_loras = []
|
||||
|
||||
for i, name in enumerate(names):
|
||||
lora = already_loaded.get(name, None)
|
||||
|
||||
lora_on_disk = loras_on_disk[i]
|
||||
|
||||
if lora_on_disk is not None:
|
||||
if lora is None or os.path.getmtime(lora_on_disk.filename) > lora.mtime:
|
||||
try:
|
||||
lora = load_lora(name, lora_on_disk)
|
||||
except Exception as e:
|
||||
errors.display(e, f"loading Lora {lora_on_disk.filename}")
|
||||
continue
|
||||
|
||||
lora.mentioned_name = name
|
||||
|
||||
lora_on_disk.read_hash()
|
||||
|
||||
if lora is None:
|
||||
failed_to_load_loras.append(name)
|
||||
print(f"Couldn't find Lora with name {name}")
|
||||
continue
|
||||
|
||||
lora.multiplier = multipliers[i] if multipliers else 1.0
|
||||
loaded_loras.append(lora)
|
||||
|
||||
if len(failed_to_load_loras) > 0:
|
||||
sd_hijack.model_hijack.comments.append("Failed to find Loras: " + ", ".join(failed_to_load_loras))
|
||||
|
||||
|
||||
def lora_calc_updown(lora, module, target):
|
||||
with torch.no_grad():
|
||||
up = module.up.weight.to(target.device, dtype=target.dtype)
|
||||
down = module.down.weight.to(target.device, dtype=target.dtype)
|
||||
|
||||
if up.shape[2:] == (1, 1) and down.shape[2:] == (1, 1):
|
||||
updown = (up.squeeze(2).squeeze(2) @ down.squeeze(2).squeeze(2)).unsqueeze(2).unsqueeze(3)
|
||||
elif up.shape[2:] == (3, 3) or down.shape[2:] == (3, 3):
|
||||
updown = torch.nn.functional.conv2d(down.permute(1, 0, 2, 3), up).permute(1, 0, 2, 3)
|
||||
else:
|
||||
updown = up @ down
|
||||
|
||||
updown = updown * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
|
||||
|
||||
return updown
|
||||
|
||||
|
||||
def lora_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
|
||||
weights_backup = getattr(self, "lora_weights_backup", None)
|
||||
|
||||
if weights_backup is None:
|
||||
return
|
||||
|
||||
if isinstance(self, torch.nn.MultiheadAttention):
|
||||
self.in_proj_weight.copy_(weights_backup[0])
|
||||
self.out_proj.weight.copy_(weights_backup[1])
|
||||
else:
|
||||
self.weight.copy_(weights_backup)
|
||||
|
||||
|
||||
def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
|
||||
"""
|
||||
Applies the currently selected set of Loras to the weights of torch layer self.
|
||||
If weights already have this particular set of loras applied, does nothing.
|
||||
If not, restores orginal weights from backup and alters weights according to loras.
|
||||
"""
|
||||
|
||||
lora_layer_name = getattr(self, 'lora_layer_name', None)
|
||||
if lora_layer_name is None:
|
||||
return
|
||||
|
||||
current_names = getattr(self, "lora_current_names", ())
|
||||
wanted_names = tuple((x.name, x.multiplier) for x in loaded_loras)
|
||||
|
||||
weights_backup = getattr(self, "lora_weights_backup", None)
|
||||
if weights_backup is None:
|
||||
if isinstance(self, torch.nn.MultiheadAttention):
|
||||
weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
|
||||
else:
|
||||
weights_backup = self.weight.to(devices.cpu, copy=True)
|
||||
|
||||
self.lora_weights_backup = weights_backup
|
||||
|
||||
if current_names != wanted_names:
|
||||
lora_restore_weights_from_backup(self)
|
||||
|
||||
for lora in loaded_loras:
|
||||
module = lora.modules.get(lora_layer_name, None)
|
||||
if module is not None and hasattr(self, 'weight'):
|
||||
self.weight += lora_calc_updown(lora, module, self.weight)
|
||||
continue
|
||||
|
||||
module_q = lora.modules.get(lora_layer_name + "_q_proj", None)
|
||||
module_k = lora.modules.get(lora_layer_name + "_k_proj", None)
|
||||
module_v = lora.modules.get(lora_layer_name + "_v_proj", None)
|
||||
module_out = lora.modules.get(lora_layer_name + "_out_proj", None)
|
||||
|
||||
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
|
||||
updown_q = lora_calc_updown(lora, module_q, self.in_proj_weight)
|
||||
updown_k = lora_calc_updown(lora, module_k, self.in_proj_weight)
|
||||
updown_v = lora_calc_updown(lora, module_v, self.in_proj_weight)
|
||||
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
|
||||
|
||||
self.in_proj_weight += updown_qkv
|
||||
self.out_proj.weight += lora_calc_updown(lora, module_out, self.out_proj.weight)
|
||||
continue
|
||||
|
||||
if module is None:
|
||||
continue
|
||||
|
||||
print(f'failed to calculate lora weights for layer {lora_layer_name}')
|
||||
|
||||
self.lora_current_names = wanted_names
|
||||
|
||||
|
||||
def lora_forward(module, input, original_forward):
|
||||
"""
|
||||
Old way of applying Lora by executing operations during layer's forward.
|
||||
Stacking many loras this way results in big performance degradation.
|
||||
"""
|
||||
|
||||
if len(loaded_loras) == 0:
|
||||
return original_forward(module, input)
|
||||
|
||||
input = devices.cond_cast_unet(input)
|
||||
|
||||
lora_restore_weights_from_backup(module)
|
||||
lora_reset_cached_weight(module)
|
||||
|
||||
res = original_forward(module, input)
|
||||
|
||||
lora_layer_name = getattr(module, 'lora_layer_name', None)
|
||||
for lora in loaded_loras:
|
||||
module = lora.modules.get(lora_layer_name, None)
|
||||
if module is None:
|
||||
continue
|
||||
|
||||
module.up.to(device=devices.device)
|
||||
module.down.to(device=devices.device)
|
||||
|
||||
res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
|
||||
self.lora_current_names = ()
|
||||
self.lora_weights_backup = None
|
||||
|
||||
|
||||
def lora_Linear_forward(self, input):
|
||||
if shared.opts.lora_functional:
|
||||
return lora_forward(self, input, torch.nn.Linear_forward_before_lora)
|
||||
|
||||
lora_apply_weights(self)
|
||||
|
||||
return torch.nn.Linear_forward_before_lora(self, input)
|
||||
|
||||
|
||||
def lora_Linear_load_state_dict(self, *args, **kwargs):
|
||||
lora_reset_cached_weight(self)
|
||||
|
||||
return torch.nn.Linear_load_state_dict_before_lora(self, *args, **kwargs)
|
||||
|
||||
|
||||
def lora_Conv2d_forward(self, input):
|
||||
if shared.opts.lora_functional:
|
||||
return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora)
|
||||
|
||||
lora_apply_weights(self)
|
||||
|
||||
return torch.nn.Conv2d_forward_before_lora(self, input)
|
||||
|
||||
|
||||
def lora_Conv2d_load_state_dict(self, *args, **kwargs):
|
||||
lora_reset_cached_weight(self)
|
||||
|
||||
return torch.nn.Conv2d_load_state_dict_before_lora(self, *args, **kwargs)
|
||||
|
||||
|
||||
def lora_MultiheadAttention_forward(self, *args, **kwargs):
|
||||
lora_apply_weights(self)
|
||||
|
||||
return torch.nn.MultiheadAttention_forward_before_lora(self, *args, **kwargs)
|
||||
|
||||
|
||||
def lora_MultiheadAttention_load_state_dict(self, *args, **kwargs):
|
||||
lora_reset_cached_weight(self)
|
||||
|
||||
return torch.nn.MultiheadAttention_load_state_dict_before_lora(self, *args, **kwargs)
|
||||
|
||||
|
||||
def list_available_loras():
|
||||
available_loras.clear()
|
||||
available_lora_aliases.clear()
|
||||
forbidden_lora_aliases.clear()
|
||||
available_lora_hash_lookup.clear()
|
||||
forbidden_lora_aliases.update({"none": 1, "Addams": 1})
|
||||
|
||||
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
||||
|
||||
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
||||
for filename in sorted(candidates, key=str.lower):
|
||||
if os.path.isdir(filename):
|
||||
continue
|
||||
|
||||
name = os.path.splitext(os.path.basename(filename))[0]
|
||||
entry = LoraOnDisk(name, filename)
|
||||
|
||||
available_loras[name] = entry
|
||||
|
||||
if entry.alias in available_lora_aliases:
|
||||
forbidden_lora_aliases[entry.alias.lower()] = 1
|
||||
|
||||
available_lora_aliases[name] = entry
|
||||
available_lora_aliases[entry.alias] = entry
|
||||
|
||||
|
||||
re_lora_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
||||
|
||||
|
||||
def infotext_pasted(infotext, params):
|
||||
if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
|
||||
return # if the other extension is active, it will handle those fields, no need to do anything
|
||||
|
||||
added = []
|
||||
|
||||
for k in params:
|
||||
if not k.startswith("AddNet Model "):
|
||||
continue
|
||||
|
||||
num = k[13:]
|
||||
|
||||
if params.get("AddNet Module " + num) != "LoRA":
|
||||
continue
|
||||
|
||||
name = params.get("AddNet Model " + num)
|
||||
if name is None:
|
||||
continue
|
||||
|
||||
m = re_lora_name.match(name)
|
||||
if m:
|
||||
name = m.group(1)
|
||||
|
||||
multiplier = params.get("AddNet Weight A " + num, "1.0")
|
||||
|
||||
added.append(f"<lora:{name}:{multiplier}>")
|
||||
|
||||
if added:
|
||||
params["Prompt"] += "\n" + "".join(added)
|
||||
|
||||
|
||||
available_loras = {}
|
||||
available_lora_aliases = {}
|
||||
available_lora_hash_lookup = {}
|
||||
forbidden_lora_aliases = {}
|
||||
loaded_loras = []
|
||||
|
||||
list_available_loras()
|
||||
|
@ -1,33 +0,0 @@
|
||||
import sys
|
||||
import copy
|
||||
import logging
|
||||
|
||||
|
||||
class ColoredFormatter(logging.Formatter):
|
||||
COLORS = {
|
||||
"DEBUG": "\033[0;36m", # CYAN
|
||||
"INFO": "\033[0;32m", # GREEN
|
||||
"WARNING": "\033[0;33m", # YELLOW
|
||||
"ERROR": "\033[0;31m", # RED
|
||||
"CRITICAL": "\033[0;37;41m", # WHITE ON RED
|
||||
"RESET": "\033[0m", # RESET COLOR
|
||||
}
|
||||
|
||||
def format(self, record):
|
||||
colored_record = copy.copy(record)
|
||||
levelname = colored_record.levelname
|
||||
seq = self.COLORS.get(levelname, self.COLORS["RESET"])
|
||||
colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
|
||||
return super().format(colored_record)
|
||||
|
||||
|
||||
logger = logging.getLogger("lora")
|
||||
logger.propagate = False
|
||||
|
||||
|
||||
if not logger.handlers:
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setFormatter(
|
||||
ColoredFormatter("[%(name)s]-%(levelname)s: %(message)s")
|
||||
)
|
||||
logger.addHandler(handler)
|
@ -1,31 +0,0 @@
|
||||
import torch
|
||||
|
||||
import networks
|
||||
from modules import patches
|
||||
|
||||
|
||||
class LoraPatches:
|
||||
def __init__(self):
|
||||
self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward)
|
||||
self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict)
|
||||
self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward)
|
||||
self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict)
|
||||
self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward)
|
||||
self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict)
|
||||
self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward)
|
||||
self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict)
|
||||
self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward)
|
||||
self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict)
|
||||
|
||||
def undo(self):
|
||||
self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward')
|
||||
self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict')
|
||||
self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward')
|
||||
self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict')
|
||||
self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward')
|
||||
self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict')
|
||||
self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward')
|
||||
self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict')
|
||||
self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward')
|
||||
self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict')
|
||||
|
@ -1,68 +0,0 @@
|
||||
import torch
|
||||
|
||||
|
||||
def make_weight_cp(t, wa, wb):
|
||||
temp = torch.einsum('i j k l, j r -> i r k l', t, wb)
|
||||
return torch.einsum('i j k l, i r -> r j k l', temp, wa)
|
||||
|
||||
|
||||
def rebuild_conventional(up, down, shape, dyn_dim=None):
|
||||
up = up.reshape(up.size(0), -1)
|
||||
down = down.reshape(down.size(0), -1)
|
||||
if dyn_dim is not None:
|
||||
up = up[:, :dyn_dim]
|
||||
down = down[:dyn_dim, :]
|
||||
return (up @ down).reshape(shape)
|
||||
|
||||
|
||||
def rebuild_cp_decomposition(up, down, mid):
|
||||
up = up.reshape(up.size(0), -1)
|
||||
down = down.reshape(down.size(0), -1)
|
||||
return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down)
|
||||
|
||||
|
||||
# copied from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/lokr.py
|
||||
def factorization(dimension: int, factor:int=-1) -> tuple[int, int]:
|
||||
'''
|
||||
return a tuple of two value of input dimension decomposed by the number closest to factor
|
||||
second value is higher or equal than first value.
|
||||
|
||||
In LoRA with Kroneckor Product, first value is a value for weight scale.
|
||||
secon value is a value for weight.
|
||||
|
||||
Because of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
|
||||
|
||||
examples)
|
||||
factor
|
||||
-1 2 4 8 16 ...
|
||||
127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127 127 -> 1, 127
|
||||
128 -> 8, 16 128 -> 2, 64 128 -> 4, 32 128 -> 8, 16 128 -> 8, 16
|
||||
250 -> 10, 25 250 -> 2, 125 250 -> 2, 125 250 -> 5, 50 250 -> 10, 25
|
||||
360 -> 8, 45 360 -> 2, 180 360 -> 4, 90 360 -> 8, 45 360 -> 12, 30
|
||||
512 -> 16, 32 512 -> 2, 256 512 -> 4, 128 512 -> 8, 64 512 -> 16, 32
|
||||
1024 -> 32, 32 1024 -> 2, 512 1024 -> 4, 256 1024 -> 8, 128 1024 -> 16, 64
|
||||
'''
|
||||
|
||||
if factor > 0 and (dimension % factor) == 0:
|
||||
m = factor
|
||||
n = dimension // factor
|
||||
if m > n:
|
||||
n, m = m, n
|
||||
return m, n
|
||||
if factor < 0:
|
||||
factor = dimension
|
||||
m, n = 1, dimension
|
||||
length = m + n
|
||||
while m<n:
|
||||
new_m = m + 1
|
||||
while dimension%new_m != 0:
|
||||
new_m += 1
|
||||
new_n = dimension // new_m
|
||||
if new_m + new_n > length or new_m>factor:
|
||||
break
|
||||
else:
|
||||
m, n = new_m, new_n
|
||||
if m > n:
|
||||
n, m = m, n
|
||||
return m, n
|
||||
|
@ -1,228 +0,0 @@
|
||||
from __future__ import annotations
|
||||
import os
|
||||
from collections import namedtuple
|
||||
import enum
|
||||
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from modules import sd_models, cache, errors, hashes, shared
|
||||
import modules.models.sd3.mmdit
|
||||
|
||||
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
|
||||
|
||||
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
|
||||
|
||||
|
||||
class SdVersion(enum.Enum):
|
||||
Unknown = 1
|
||||
SD1 = 2
|
||||
SD2 = 3
|
||||
SDXL = 4
|
||||
|
||||
|
||||
class NetworkOnDisk:
|
||||
def __init__(self, name, filename):
|
||||
self.name = name
|
||||
self.filename = filename
|
||||
self.metadata = {}
|
||||
self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
|
||||
|
||||
def read_metadata():
|
||||
metadata = sd_models.read_metadata_from_safetensors(filename)
|
||||
|
||||
return metadata
|
||||
|
||||
if self.is_safetensors:
|
||||
try:
|
||||
self.metadata = cache.cached_data_for_file('safetensors-metadata', "lora/" + self.name, filename, read_metadata)
|
||||
except Exception as e:
|
||||
errors.display(e, f"reading lora {filename}")
|
||||
|
||||
if self.metadata:
|
||||
m = {}
|
||||
for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
|
||||
m[k] = v
|
||||
|
||||
self.metadata = m
|
||||
|
||||
self.alias = self.metadata.get('ss_output_name', self.name)
|
||||
|
||||
self.hash = None
|
||||
self.shorthash = None
|
||||
self.set_hash(
|
||||
self.metadata.get('sshs_model_hash') or
|
||||
hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or
|
||||
''
|
||||
)
|
||||
|
||||
self.sd_version = self.detect_version()
|
||||
|
||||
def detect_version(self):
|
||||
if str(self.metadata.get('ss_base_model_version', "")).startswith("sdxl_"):
|
||||
return SdVersion.SDXL
|
||||
elif str(self.metadata.get('ss_v2', "")) == "True":
|
||||
return SdVersion.SD2
|
||||
elif len(self.metadata):
|
||||
return SdVersion.SD1
|
||||
|
||||
return SdVersion.Unknown
|
||||
|
||||
def set_hash(self, v):
|
||||
self.hash = v
|
||||
self.shorthash = self.hash[0:12]
|
||||
|
||||
if self.shorthash:
|
||||
import networks
|
||||
networks.available_network_hash_lookup[self.shorthash] = self
|
||||
|
||||
def read_hash(self):
|
||||
if not self.hash:
|
||||
self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '')
|
||||
|
||||
def get_alias(self):
|
||||
import networks
|
||||
if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in networks.forbidden_network_aliases:
|
||||
return self.name
|
||||
else:
|
||||
return self.alias
|
||||
|
||||
|
||||
class Network: # LoraModule
|
||||
def __init__(self, name, network_on_disk: NetworkOnDisk):
|
||||
self.name = name
|
||||
self.network_on_disk = network_on_disk
|
||||
self.te_multiplier = 1.0
|
||||
self.unet_multiplier = 1.0
|
||||
self.dyn_dim = None
|
||||
self.modules = {}
|
||||
self.bundle_embeddings = {}
|
||||
self.mtime = None
|
||||
|
||||
self.mentioned_name = None
|
||||
"""the text that was used to add the network to prompt - can be either name or an alias"""
|
||||
|
||||
|
||||
class ModuleType:
|
||||
def create_module(self, net: Network, weights: NetworkWeights) -> Network | None:
|
||||
return None
|
||||
|
||||
|
||||
class NetworkModule:
|
||||
def __init__(self, net: Network, weights: NetworkWeights):
|
||||
self.network = net
|
||||
self.network_key = weights.network_key
|
||||
self.sd_key = weights.sd_key
|
||||
self.sd_module = weights.sd_module
|
||||
|
||||
if isinstance(self.sd_module, modules.models.sd3.mmdit.QkvLinear):
|
||||
s = self.sd_module.weight.shape
|
||||
self.shape = (s[0] // 3, s[1])
|
||||
elif hasattr(self.sd_module, 'weight'):
|
||||
self.shape = self.sd_module.weight.shape
|
||||
elif isinstance(self.sd_module, nn.MultiheadAttention):
|
||||
# For now, only self-attn use Pytorch's MHA
|
||||
# So assume all qkvo proj have same shape
|
||||
self.shape = self.sd_module.out_proj.weight.shape
|
||||
else:
|
||||
self.shape = None
|
||||
|
||||
self.ops = None
|
||||
self.extra_kwargs = {}
|
||||
if isinstance(self.sd_module, nn.Conv2d):
|
||||
self.ops = F.conv2d
|
||||
self.extra_kwargs = {
|
||||
'stride': self.sd_module.stride,
|
||||
'padding': self.sd_module.padding
|
||||
}
|
||||
elif isinstance(self.sd_module, nn.Linear):
|
||||
self.ops = F.linear
|
||||
elif isinstance(self.sd_module, nn.LayerNorm):
|
||||
self.ops = F.layer_norm
|
||||
self.extra_kwargs = {
|
||||
'normalized_shape': self.sd_module.normalized_shape,
|
||||
'eps': self.sd_module.eps
|
||||
}
|
||||
elif isinstance(self.sd_module, nn.GroupNorm):
|
||||
self.ops = F.group_norm
|
||||
self.extra_kwargs = {
|
||||
'num_groups': self.sd_module.num_groups,
|
||||
'eps': self.sd_module.eps
|
||||
}
|
||||
|
||||
self.dim = None
|
||||
self.bias = weights.w.get("bias")
|
||||
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
|
||||
self.scale = weights.w["scale"].item() if "scale" in weights.w else None
|
||||
|
||||
self.dora_scale = weights.w.get("dora_scale", None)
|
||||
self.dora_norm_dims = len(self.shape) - 1
|
||||
|
||||
def multiplier(self):
|
||||
if 'transformer' in self.sd_key[:20]:
|
||||
return self.network.te_multiplier
|
||||
else:
|
||||
return self.network.unet_multiplier
|
||||
|
||||
def calc_scale(self):
|
||||
if self.scale is not None:
|
||||
return self.scale
|
||||
if self.dim is not None and self.alpha is not None:
|
||||
return self.alpha / self.dim
|
||||
|
||||
return 1.0
|
||||
|
||||
def apply_weight_decompose(self, updown, orig_weight):
|
||||
# Match the device/dtype
|
||||
orig_weight = orig_weight.to(updown.dtype)
|
||||
dora_scale = self.dora_scale.to(device=orig_weight.device, dtype=updown.dtype)
|
||||
updown = updown.to(orig_weight.device)
|
||||
|
||||
merged_scale1 = updown + orig_weight
|
||||
merged_scale1_norm = (
|
||||
merged_scale1.transpose(0, 1)
|
||||
.reshape(merged_scale1.shape[1], -1)
|
||||
.norm(dim=1, keepdim=True)
|
||||
.reshape(merged_scale1.shape[1], *[1] * self.dora_norm_dims)
|
||||
.transpose(0, 1)
|
||||
)
|
||||
|
||||
dora_merged = (
|
||||
merged_scale1 * (dora_scale / merged_scale1_norm)
|
||||
)
|
||||
final_updown = dora_merged - orig_weight
|
||||
return final_updown
|
||||
|
||||
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
|
||||
if self.bias is not None:
|
||||
updown = updown.reshape(self.bias.shape)
|
||||
updown += self.bias.to(orig_weight.device, dtype=updown.dtype)
|
||||
updown = updown.reshape(output_shape)
|
||||
|
||||
if len(output_shape) == 4:
|
||||
updown = updown.reshape(output_shape)
|
||||
|
||||
if orig_weight.size().numel() == updown.size().numel():
|
||||
updown = updown.reshape(orig_weight.shape)
|
||||
|
||||
if ex_bias is not None:
|
||||
ex_bias = ex_bias * self.multiplier()
|
||||
|
||||
updown = updown * self.calc_scale()
|
||||
|
||||
if self.dora_scale is not None:
|
||||
updown = self.apply_weight_decompose(updown, orig_weight)
|
||||
|
||||
return updown * self.multiplier(), ex_bias
|
||||
|
||||
def calc_updown(self, target):
|
||||
raise NotImplementedError()
|
||||
|
||||
def forward(self, x, y):
|
||||
"""A general forward implementation for all modules"""
|
||||
if self.ops is None:
|
||||
raise NotImplementedError()
|
||||
else:
|
||||
updown, ex_bias = self.calc_updown(self.sd_module.weight)
|
||||
return y + self.ops(x, weight=updown, bias=ex_bias, **self.extra_kwargs)
|
||||
|
@ -1,27 +0,0 @@
|
||||
import network
|
||||
|
||||
|
||||
class ModuleTypeFull(network.ModuleType):
|
||||
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
||||
if all(x in weights.w for x in ["diff"]):
|
||||
return NetworkModuleFull(net, weights)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class NetworkModuleFull(network.NetworkModule):
|
||||
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
||||
super().__init__(net, weights)
|
||||
|
||||
self.weight = weights.w.get("diff")
|
||||
self.ex_bias = weights.w.get("diff_b")
|
||||
|
||||
def calc_updown(self, orig_weight):
|
||||
output_shape = self.weight.shape
|
||||
updown = self.weight.to(orig_weight.device)
|
||||
if self.ex_bias is not None:
|
||||
ex_bias = self.ex_bias.to(orig_weight.device)
|
||||
else:
|
||||
ex_bias = None
|
||||
|
||||
return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
|
@ -1,33 +0,0 @@
|
||||
|
||||
import network
|
||||
|
||||
class ModuleTypeGLora(network.ModuleType):
|
||||
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
||||
if all(x in weights.w for x in ["a1.weight", "a2.weight", "alpha", "b1.weight", "b2.weight"]):
|
||||
return NetworkModuleGLora(net, weights)
|
||||
|
||||
return None
|
||||
|
||||
# adapted from https://github.com/KohakuBlueleaf/LyCORIS
|
||||
class NetworkModuleGLora(network.NetworkModule):
|
||||
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
||||
super().__init__(net, weights)
|
||||
|
||||
if hasattr(self.sd_module, 'weight'):
|
||||
self.shape = self.sd_module.weight.shape
|
||||
|
||||
self.w1a = weights.w["a1.weight"]
|
||||
self.w1b = weights.w["b1.weight"]
|
||||
self.w2a = weights.w["a2.weight"]
|
||||
self.w2b = weights.w["b2.weight"]
|
||||
|
||||
def calc_updown(self, orig_weight):
|
||||
w1a = self.w1a.to(orig_weight.device)
|
||||
w1b = self.w1b.to(orig_weight.device)
|
||||
w2a = self.w2a.to(orig_weight.device)
|
||||
w2b = self.w2b.to(orig_weight.device)
|
||||
|
||||
output_shape = [w1a.size(0), w1b.size(1)]
|
||||
updown = ((w2b @ w1b) + ((orig_weight.to(dtype = w1a.dtype) @ w2a) @ w1a))
|
||||
|
||||
return self.finalize_updown(updown, orig_weight, output_shape)
|
@ -1,55 +0,0 @@
|
||||
import lyco_helpers
|
||||
import network
|
||||
|
||||
|
||||
class ModuleTypeHada(network.ModuleType):
|
||||
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
||||
if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]):
|
||||
return NetworkModuleHada(net, weights)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class NetworkModuleHada(network.NetworkModule):
|
||||
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
||||
super().__init__(net, weights)
|
||||
|
||||
if hasattr(self.sd_module, 'weight'):
|
||||
self.shape = self.sd_module.weight.shape
|
||||
|
||||
self.w1a = weights.w["hada_w1_a"]
|
||||
self.w1b = weights.w["hada_w1_b"]
|
||||
self.dim = self.w1b.shape[0]
|
||||
self.w2a = weights.w["hada_w2_a"]
|
||||
self.w2b = weights.w["hada_w2_b"]
|
||||
|
||||
self.t1 = weights.w.get("hada_t1")
|
||||
self.t2 = weights.w.get("hada_t2")
|
||||
|
||||
def calc_updown(self, orig_weight):
|
||||
w1a = self.w1a.to(orig_weight.device)
|
||||
w1b = self.w1b.to(orig_weight.device)
|
||||
w2a = self.w2a.to(orig_weight.device)
|
||||
w2b = self.w2b.to(orig_weight.device)
|
||||
|
||||
output_shape = [w1a.size(0), w1b.size(1)]
|
||||
|
||||
if self.t1 is not None:
|
||||
output_shape = [w1a.size(1), w1b.size(1)]
|
||||
t1 = self.t1.to(orig_weight.device)
|
||||
updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b)
|
||||
output_shape += t1.shape[2:]
|
||||
else:
|
||||
if len(w1b.shape) == 4:
|
||||
output_shape += w1b.shape[2:]
|
||||
updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape)
|
||||
|
||||
if self.t2 is not None:
|
||||
t2 = self.t2.to(orig_weight.device)
|
||||
updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
|
||||
else:
|
||||
updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape)
|
||||
|
||||
updown = updown1 * updown2
|
||||
|
||||
return self.finalize_updown(updown, orig_weight, output_shape)
|
@ -1,30 +0,0 @@
|
||||
import network
|
||||
|
||||
|
||||
class ModuleTypeIa3(network.ModuleType):
|
||||
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
||||
if all(x in weights.w for x in ["weight"]):
|
||||
return NetworkModuleIa3(net, weights)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class NetworkModuleIa3(network.NetworkModule):
|
||||
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
||||
super().__init__(net, weights)
|
||||
|
||||
self.w = weights.w["weight"]
|
||||
self.on_input = weights.w["on_input"].item()
|
||||
|
||||
def calc_updown(self, orig_weight):
|
||||
w = self.w.to(orig_weight.device)
|
||||
|
||||
output_shape = [w.size(0), orig_weight.size(1)]
|
||||
if self.on_input:
|
||||
output_shape.reverse()
|
||||
else:
|
||||
w = w.reshape(-1, 1)
|
||||
|
||||
updown = orig_weight * w
|
||||
|
||||
return self.finalize_updown(updown, orig_weight, output_shape)
|
@ -1,64 +0,0 @@
|
||||
import torch
|
||||
|
||||
import lyco_helpers
|
||||
import network
|
||||
|
||||
|
||||
class ModuleTypeLokr(network.ModuleType):
|
||||
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
||||
has_1 = "lokr_w1" in weights.w or ("lokr_w1_a" in weights.w and "lokr_w1_b" in weights.w)
|
||||
has_2 = "lokr_w2" in weights.w or ("lokr_w2_a" in weights.w and "lokr_w2_b" in weights.w)
|
||||
if has_1 and has_2:
|
||||
return NetworkModuleLokr(net, weights)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def make_kron(orig_shape, w1, w2):
|
||||
if len(w2.shape) == 4:
|
||||
w1 = w1.unsqueeze(2).unsqueeze(2)
|
||||
w2 = w2.contiguous()
|
||||
return torch.kron(w1, w2).reshape(orig_shape)
|
||||
|
||||
|
||||
class NetworkModuleLokr(network.NetworkModule):
|
||||
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
||||
super().__init__(net, weights)
|
||||
|
||||
self.w1 = weights.w.get("lokr_w1")
|
||||
self.w1a = weights.w.get("lokr_w1_a")
|
||||
self.w1b = weights.w.get("lokr_w1_b")
|
||||
self.dim = self.w1b.shape[0] if self.w1b is not None else self.dim
|
||||
self.w2 = weights.w.get("lokr_w2")
|
||||
self.w2a = weights.w.get("lokr_w2_a")
|
||||
self.w2b = weights.w.get("lokr_w2_b")
|
||||
self.dim = self.w2b.shape[0] if self.w2b is not None else self.dim
|
||||
self.t2 = weights.w.get("lokr_t2")
|
||||
|
||||
def calc_updown(self, orig_weight):
|
||||
if self.w1 is not None:
|
||||
w1 = self.w1.to(orig_weight.device)
|
||||
else:
|
||||
w1a = self.w1a.to(orig_weight.device)
|
||||
w1b = self.w1b.to(orig_weight.device)
|
||||
w1 = w1a @ w1b
|
||||
|
||||
if self.w2 is not None:
|
||||
w2 = self.w2.to(orig_weight.device)
|
||||
elif self.t2 is None:
|
||||
w2a = self.w2a.to(orig_weight.device)
|
||||
w2b = self.w2b.to(orig_weight.device)
|
||||
w2 = w2a @ w2b
|
||||
else:
|
||||
t2 = self.t2.to(orig_weight.device)
|
||||
w2a = self.w2a.to(orig_weight.device)
|
||||
w2b = self.w2b.to(orig_weight.device)
|
||||
w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
|
||||
|
||||
output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)]
|
||||
if len(orig_weight.shape) == 4:
|
||||
output_shape = orig_weight.shape
|
||||
|
||||
updown = make_kron(output_shape, w1, w2)
|
||||
|
||||
return self.finalize_updown(updown, orig_weight, output_shape)
|
@ -1,94 +0,0 @@
|
||||
import torch
|
||||
|
||||
import lyco_helpers
|
||||
import modules.models.sd3.mmdit
|
||||
import network
|
||||
from modules import devices
|
||||
|
||||
|
||||
class ModuleTypeLora(network.ModuleType):
|
||||
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
||||
if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]):
|
||||
return NetworkModuleLora(net, weights)
|
||||
|
||||
if all(x in weights.w for x in ["lora_A.weight", "lora_B.weight"]):
|
||||
w = weights.w.copy()
|
||||
weights.w.clear()
|
||||
weights.w.update({"lora_up.weight": w["lora_B.weight"], "lora_down.weight": w["lora_A.weight"]})
|
||||
|
||||
return NetworkModuleLora(net, weights)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class NetworkModuleLora(network.NetworkModule):
|
||||
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
||||
super().__init__(net, weights)
|
||||
|
||||
self.up_model = self.create_module(weights.w, "lora_up.weight")
|
||||
self.down_model = self.create_module(weights.w, "lora_down.weight")
|
||||
self.mid_model = self.create_module(weights.w, "lora_mid.weight", none_ok=True)
|
||||
|
||||
self.dim = weights.w["lora_down.weight"].shape[0]
|
||||
|
||||
def create_module(self, weights, key, none_ok=False):
|
||||
weight = weights.get(key)
|
||||
|
||||
if weight is None and none_ok:
|
||||
return None
|
||||
|
||||
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention, modules.models.sd3.mmdit.QkvLinear]
|
||||
is_conv = type(self.sd_module) in [torch.nn.Conv2d]
|
||||
|
||||
if is_linear:
|
||||
weight = weight.reshape(weight.shape[0], -1)
|
||||
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
|
||||
elif is_conv and key == "lora_down.weight" or key == "dyn_up":
|
||||
if len(weight.shape) == 2:
|
||||
weight = weight.reshape(weight.shape[0], -1, 1, 1)
|
||||
|
||||
if weight.shape[2] != 1 or weight.shape[3] != 1:
|
||||
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False)
|
||||
else:
|
||||
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
|
||||
elif is_conv and key == "lora_mid.weight":
|
||||
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False)
|
||||
elif is_conv and key == "lora_up.weight" or key == "dyn_down":
|
||||
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
|
||||
else:
|
||||
raise AssertionError(f'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}')
|
||||
|
||||
with torch.no_grad():
|
||||
if weight.shape != module.weight.shape:
|
||||
weight = weight.reshape(module.weight.shape)
|
||||
module.weight.copy_(weight)
|
||||
|
||||
module.to(device=devices.cpu, dtype=devices.dtype)
|
||||
module.weight.requires_grad_(False)
|
||||
|
||||
return module
|
||||
|
||||
def calc_updown(self, orig_weight):
|
||||
up = self.up_model.weight.to(orig_weight.device)
|
||||
down = self.down_model.weight.to(orig_weight.device)
|
||||
|
||||
output_shape = [up.size(0), down.size(1)]
|
||||
if self.mid_model is not None:
|
||||
# cp-decomposition
|
||||
mid = self.mid_model.weight.to(orig_weight.device)
|
||||
updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid)
|
||||
output_shape += mid.shape[2:]
|
||||
else:
|
||||
if len(down.shape) == 4:
|
||||
output_shape += down.shape[2:]
|
||||
updown = lyco_helpers.rebuild_conventional(up, down, output_shape, self.network.dyn_dim)
|
||||
|
||||
return self.finalize_updown(updown, orig_weight, output_shape)
|
||||
|
||||
def forward(self, x, y):
|
||||
self.up_model.to(device=devices.device)
|
||||
self.down_model.to(device=devices.device)
|
||||
|
||||
return y + self.up_model(self.down_model(x)) * self.multiplier() * self.calc_scale()
|
||||
|
||||
|
@ -1,28 +0,0 @@
|
||||
import network
|
||||
|
||||
|
||||
class ModuleTypeNorm(network.ModuleType):
|
||||
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
||||
if all(x in weights.w for x in ["w_norm", "b_norm"]):
|
||||
return NetworkModuleNorm(net, weights)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class NetworkModuleNorm(network.NetworkModule):
|
||||
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
||||
super().__init__(net, weights)
|
||||
|
||||
self.w_norm = weights.w.get("w_norm")
|
||||
self.b_norm = weights.w.get("b_norm")
|
||||
|
||||
def calc_updown(self, orig_weight):
|
||||
output_shape = self.w_norm.shape
|
||||
updown = self.w_norm.to(orig_weight.device)
|
||||
|
||||
if self.b_norm is not None:
|
||||
ex_bias = self.b_norm.to(orig_weight.device)
|
||||
else:
|
||||
ex_bias = None
|
||||
|
||||
return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
|
@ -1,118 +0,0 @@
|
||||
import torch
|
||||
import network
|
||||
from einops import rearrange
|
||||
|
||||
|
||||
class ModuleTypeOFT(network.ModuleType):
|
||||
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
||||
if all(x in weights.w for x in ["oft_blocks"]) or all(x in weights.w for x in ["oft_diag"]):
|
||||
return NetworkModuleOFT(net, weights)
|
||||
|
||||
return None
|
||||
|
||||
# Supports both kohya-ss' implementation of COFT https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py
|
||||
# and KohakuBlueleaf's implementation of OFT/COFT https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/diag_oft.py
|
||||
class NetworkModuleOFT(network.NetworkModule):
|
||||
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
||||
|
||||
super().__init__(net, weights)
|
||||
|
||||
self.lin_module = None
|
||||
self.org_module: list[torch.Module] = [self.sd_module]
|
||||
|
||||
self.scale = 1.0
|
||||
self.is_R = False
|
||||
self.is_boft = False
|
||||
|
||||
# kohya-ss/New LyCORIS OFT/BOFT
|
||||
if "oft_blocks" in weights.w.keys():
|
||||
self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size)
|
||||
self.alpha = weights.w.get("alpha", None) # alpha is constraint
|
||||
self.dim = self.oft_blocks.shape[0] # lora dim
|
||||
# Old LyCORIS OFT
|
||||
elif "oft_diag" in weights.w.keys():
|
||||
self.is_R = True
|
||||
self.oft_blocks = weights.w["oft_diag"]
|
||||
# self.alpha is unused
|
||||
self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size)
|
||||
|
||||
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
|
||||
is_conv = type(self.sd_module) in [torch.nn.Conv2d]
|
||||
is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported
|
||||
|
||||
if is_linear:
|
||||
self.out_dim = self.sd_module.out_features
|
||||
elif is_conv:
|
||||
self.out_dim = self.sd_module.out_channels
|
||||
elif is_other_linear:
|
||||
self.out_dim = self.sd_module.embed_dim
|
||||
|
||||
# LyCORIS BOFT
|
||||
if self.oft_blocks.dim() == 4:
|
||||
self.is_boft = True
|
||||
self.rescale = weights.w.get('rescale', None)
|
||||
if self.rescale is not None and not is_other_linear:
|
||||
self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1))
|
||||
|
||||
self.num_blocks = self.dim
|
||||
self.block_size = self.out_dim // self.dim
|
||||
self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim
|
||||
if self.is_R:
|
||||
self.constraint = None
|
||||
self.block_size = self.dim
|
||||
self.num_blocks = self.out_dim // self.dim
|
||||
elif self.is_boft:
|
||||
self.boft_m = self.oft_blocks.shape[0]
|
||||
self.num_blocks = self.oft_blocks.shape[1]
|
||||
self.block_size = self.oft_blocks.shape[2]
|
||||
self.boft_b = self.block_size
|
||||
|
||||
def calc_updown(self, orig_weight):
|
||||
oft_blocks = self.oft_blocks.to(orig_weight.device)
|
||||
eye = torch.eye(self.block_size, device=oft_blocks.device)
|
||||
|
||||
if not self.is_R:
|
||||
block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix
|
||||
if self.constraint != 0:
|
||||
norm_Q = torch.norm(block_Q.flatten())
|
||||
new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device))
|
||||
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
|
||||
oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse())
|
||||
|
||||
R = oft_blocks.to(orig_weight.device)
|
||||
|
||||
if not self.is_boft:
|
||||
# This errors out for MultiheadAttention, might need to be handled up-stream
|
||||
merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size)
|
||||
merged_weight = torch.einsum(
|
||||
'k n m, k n ... -> k m ...',
|
||||
R,
|
||||
merged_weight
|
||||
)
|
||||
merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...')
|
||||
else:
|
||||
# TODO: determine correct value for scale
|
||||
scale = 1.0
|
||||
m = self.boft_m
|
||||
b = self.boft_b
|
||||
r_b = b // 2
|
||||
inp = orig_weight
|
||||
for i in range(m):
|
||||
bi = R[i] # b_num, b_size, b_size
|
||||
if i == 0:
|
||||
# Apply multiplier/scale and rescale into first weight
|
||||
bi = bi * scale + (1 - scale) * eye
|
||||
inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b)
|
||||
inp = rearrange(inp, "(d b) ... -> d b ...", b=b)
|
||||
inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp)
|
||||
inp = rearrange(inp, "d b ... -> (d b) ...")
|
||||
inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b)
|
||||
merged_weight = inp
|
||||
|
||||
# Rescale mechanism
|
||||
if self.rescale is not None:
|
||||
merged_weight = self.rescale.to(merged_weight) * merged_weight
|
||||
|
||||
updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype)
|
||||
output_shape = orig_weight.shape
|
||||
return self.finalize_updown(updown, orig_weight, output_shape)
|
@ -1,737 +0,0 @@
|
||||
from __future__ import annotations
|
||||
import gradio as gr
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import lora_patches
|
||||
import network
|
||||
import network_lora
|
||||
import network_glora
|
||||
import network_hada
|
||||
import network_ia3
|
||||
import network_lokr
|
||||
import network_full
|
||||
import network_norm
|
||||
import network_oft
|
||||
|
||||
import torch
|
||||
from typing import Union
|
||||
|
||||
from modules import shared, devices, sd_models, errors, scripts, sd_hijack
|
||||
import modules.textual_inversion.textual_inversion as textual_inversion
|
||||
import modules.models.sd3.mmdit
|
||||
|
||||
from lora_logger import logger
|
||||
|
||||
module_types = [
|
||||
network_lora.ModuleTypeLora(),
|
||||
network_hada.ModuleTypeHada(),
|
||||
network_ia3.ModuleTypeIa3(),
|
||||
network_lokr.ModuleTypeLokr(),
|
||||
network_full.ModuleTypeFull(),
|
||||
network_norm.ModuleTypeNorm(),
|
||||
network_glora.ModuleTypeGLora(),
|
||||
network_oft.ModuleTypeOFT(),
|
||||
]
|
||||
|
||||
|
||||
re_digits = re.compile(r"\d+")
|
||||
re_x_proj = re.compile(r"(.*)_([qkv]_proj)$")
|
||||
re_compiled = {}
|
||||
|
||||
suffix_conversion = {
|
||||
"attentions": {},
|
||||
"resnets": {
|
||||
"conv1": "in_layers_2",
|
||||
"conv2": "out_layers_3",
|
||||
"norm1": "in_layers_0",
|
||||
"norm2": "out_layers_0",
|
||||
"time_emb_proj": "emb_layers_1",
|
||||
"conv_shortcut": "skip_connection",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def convert_diffusers_name_to_compvis(key, is_sd2):
|
||||
def match(match_list, regex_text):
|
||||
regex = re_compiled.get(regex_text)
|
||||
if regex is None:
|
||||
regex = re.compile(regex_text)
|
||||
re_compiled[regex_text] = regex
|
||||
|
||||
r = re.match(regex, key)
|
||||
if not r:
|
||||
return False
|
||||
|
||||
match_list.clear()
|
||||
match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])
|
||||
return True
|
||||
|
||||
m = []
|
||||
|
||||
if match(m, r"lora_unet_conv_in(.*)"):
|
||||
return f'diffusion_model_input_blocks_0_0{m[0]}'
|
||||
|
||||
if match(m, r"lora_unet_conv_out(.*)"):
|
||||
return f'diffusion_model_out_2{m[0]}'
|
||||
|
||||
if match(m, r"lora_unet_time_embedding_linear_(\d+)(.*)"):
|
||||
return f"diffusion_model_time_embed_{m[0] * 2 - 2}{m[1]}"
|
||||
|
||||
if match(m, r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
|
||||
suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
|
||||
return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
|
||||
|
||||
if match(m, r"lora_unet_mid_block_(attentions|resnets)_(\d+)_(.+)"):
|
||||
suffix = suffix_conversion.get(m[0], {}).get(m[2], m[2])
|
||||
return f"diffusion_model_middle_block_{1 if m[0] == 'attentions' else m[1] * 2}_{suffix}"
|
||||
|
||||
if match(m, r"lora_unet_up_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
|
||||
suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
|
||||
return f"diffusion_model_output_blocks_{m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
|
||||
|
||||
if match(m, r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv"):
|
||||
return f"diffusion_model_input_blocks_{3 + m[0] * 3}_0_op"
|
||||
|
||||
if match(m, r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv"):
|
||||
return f"diffusion_model_output_blocks_{2 + m[0] * 3}_{2 if m[0]>0 else 1}_conv"
|
||||
|
||||
if match(m, r"lora_te_text_model_encoder_layers_(\d+)_(.+)"):
|
||||
if is_sd2:
|
||||
if 'mlp_fc1' in m[1]:
|
||||
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
|
||||
elif 'mlp_fc2' in m[1]:
|
||||
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
|
||||
else:
|
||||
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
|
||||
|
||||
return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
|
||||
|
||||
if match(m, r"lora_te2_text_model_encoder_layers_(\d+)_(.+)"):
|
||||
if 'mlp_fc1' in m[1]:
|
||||
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
|
||||
elif 'mlp_fc2' in m[1]:
|
||||
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
|
||||
else:
|
||||
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
|
||||
|
||||
return key
|
||||
|
||||
|
||||
def assign_network_names_to_compvis_modules(sd_model):
|
||||
network_layer_mapping = {}
|
||||
|
||||
if shared.sd_model.is_sdxl:
|
||||
for i, embedder in enumerate(shared.sd_model.conditioner.embedders):
|
||||
if not hasattr(embedder, 'wrapped'):
|
||||
continue
|
||||
|
||||
for name, module in embedder.wrapped.named_modules():
|
||||
network_name = f'{i}_{name.replace(".", "_")}'
|
||||
network_layer_mapping[network_name] = module
|
||||
module.network_layer_name = network_name
|
||||
else:
|
||||
cond_stage_model = getattr(shared.sd_model.cond_stage_model, 'wrapped', shared.sd_model.cond_stage_model)
|
||||
|
||||
for name, module in cond_stage_model.named_modules():
|
||||
network_name = name.replace(".", "_")
|
||||
network_layer_mapping[network_name] = module
|
||||
module.network_layer_name = network_name
|
||||
|
||||
for name, module in shared.sd_model.model.named_modules():
|
||||
network_name = name.replace(".", "_")
|
||||
network_layer_mapping[network_name] = module
|
||||
module.network_layer_name = network_name
|
||||
|
||||
sd_model.network_layer_mapping = network_layer_mapping
|
||||
|
||||
|
||||
class BundledTIHash(str):
|
||||
def __init__(self, hash_str):
|
||||
self.hash = hash_str
|
||||
|
||||
def __str__(self):
|
||||
return self.hash if shared.opts.lora_bundled_ti_to_infotext else ''
|
||||
|
||||
|
||||
def load_network(name, network_on_disk):
|
||||
net = network.Network(name, network_on_disk)
|
||||
net.mtime = os.path.getmtime(network_on_disk.filename)
|
||||
|
||||
sd = sd_models.read_state_dict(network_on_disk.filename)
|
||||
|
||||
# this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0
|
||||
if not hasattr(shared.sd_model, 'network_layer_mapping'):
|
||||
assign_network_names_to_compvis_modules(shared.sd_model)
|
||||
|
||||
keys_failed_to_match = {}
|
||||
is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping
|
||||
if hasattr(shared.sd_model, 'diffusers_weight_map'):
|
||||
diffusers_weight_map = shared.sd_model.diffusers_weight_map
|
||||
elif hasattr(shared.sd_model, 'diffusers_weight_mapping'):
|
||||
diffusers_weight_map = {}
|
||||
for k, v in shared.sd_model.diffusers_weight_mapping():
|
||||
diffusers_weight_map[k] = v
|
||||
shared.sd_model.diffusers_weight_map = diffusers_weight_map
|
||||
else:
|
||||
diffusers_weight_map = None
|
||||
|
||||
matched_networks = {}
|
||||
bundle_embeddings = {}
|
||||
|
||||
for key_network, weight in sd.items():
|
||||
|
||||
if diffusers_weight_map:
|
||||
key_network_without_network_parts, network_name, network_weight = key_network.rsplit(".", 2)
|
||||
network_part = network_name + '.' + network_weight
|
||||
else:
|
||||
key_network_without_network_parts, _, network_part = key_network.partition(".")
|
||||
|
||||
if key_network_without_network_parts == "bundle_emb":
|
||||
emb_name, vec_name = network_part.split(".", 1)
|
||||
emb_dict = bundle_embeddings.get(emb_name, {})
|
||||
if vec_name.split('.')[0] == 'string_to_param':
|
||||
_, k2 = vec_name.split('.', 1)
|
||||
emb_dict['string_to_param'] = {k2: weight}
|
||||
else:
|
||||
emb_dict[vec_name] = weight
|
||||
bundle_embeddings[emb_name] = emb_dict
|
||||
|
||||
if diffusers_weight_map:
|
||||
key = diffusers_weight_map.get(key_network_without_network_parts, key_network_without_network_parts)
|
||||
else:
|
||||
key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)
|
||||
|
||||
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
||||
|
||||
if sd_module is None:
|
||||
m = re_x_proj.match(key)
|
||||
if m:
|
||||
sd_module = shared.sd_model.network_layer_mapping.get(m.group(1), None)
|
||||
|
||||
# SDXL loras seem to already have correct compvis keys, so only need to replace "lora_unet" with "diffusion_model"
|
||||
if sd_module is None and "lora_unet" in key_network_without_network_parts:
|
||||
key = key_network_without_network_parts.replace("lora_unet", "diffusion_model")
|
||||
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
||||
elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts:
|
||||
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
|
||||
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
||||
|
||||
# some SD1 Loras also have correct compvis keys
|
||||
if sd_module is None:
|
||||
key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")
|
||||
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
||||
|
||||
# kohya_ss OFT module
|
||||
elif sd_module is None and "oft_unet" in key_network_without_network_parts:
|
||||
key = key_network_without_network_parts.replace("oft_unet", "diffusion_model")
|
||||
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
||||
|
||||
# KohakuBlueLeaf OFT module
|
||||
if sd_module is None and "oft_diag" in key:
|
||||
key = key_network_without_network_parts.replace("lora_unet", "diffusion_model")
|
||||
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
|
||||
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
||||
|
||||
if sd_module is None:
|
||||
keys_failed_to_match[key_network] = key
|
||||
continue
|
||||
|
||||
if key not in matched_networks:
|
||||
matched_networks[key] = network.NetworkWeights(network_key=key_network, sd_key=key, w={}, sd_module=sd_module)
|
||||
|
||||
matched_networks[key].w[network_part] = weight
|
||||
|
||||
for key, weights in matched_networks.items():
|
||||
net_module = None
|
||||
for nettype in module_types:
|
||||
net_module = nettype.create_module(net, weights)
|
||||
if net_module is not None:
|
||||
break
|
||||
|
||||
if net_module is None:
|
||||
raise AssertionError(f"Could not find a module type (out of {', '.join([x.__class__.__name__ for x in module_types])}) that would accept those keys: {', '.join(weights.w)}")
|
||||
|
||||
net.modules[key] = net_module
|
||||
|
||||
embeddings = {}
|
||||
for emb_name, data in bundle_embeddings.items():
|
||||
embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name)
|
||||
embedding.loaded = None
|
||||
embedding.shorthash = BundledTIHash(name)
|
||||
embeddings[emb_name] = embedding
|
||||
|
||||
net.bundle_embeddings = embeddings
|
||||
|
||||
if keys_failed_to_match:
|
||||
logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}")
|
||||
|
||||
return net
|
||||
|
||||
|
||||
def purge_networks_from_memory():
|
||||
while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0:
|
||||
name = next(iter(networks_in_memory))
|
||||
networks_in_memory.pop(name, None)
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
|
||||
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
|
||||
emb_db = sd_hijack.model_hijack.embedding_db
|
||||
already_loaded = {}
|
||||
|
||||
for net in loaded_networks:
|
||||
if net.name in names:
|
||||
already_loaded[net.name] = net
|
||||
for emb_name, embedding in net.bundle_embeddings.items():
|
||||
if embedding.loaded:
|
||||
emb_db.register_embedding_by_name(None, shared.sd_model, emb_name)
|
||||
|
||||
loaded_networks.clear()
|
||||
|
||||
unavailable_networks = []
|
||||
for name in names:
|
||||
if name.lower() in forbidden_network_aliases and available_networks.get(name) is None:
|
||||
unavailable_networks.append(name)
|
||||
elif available_network_aliases.get(name) is None:
|
||||
unavailable_networks.append(name)
|
||||
|
||||
if unavailable_networks:
|
||||
update_available_networks_by_names(unavailable_networks)
|
||||
|
||||
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
|
||||
if any(x is None for x in networks_on_disk):
|
||||
list_available_networks()
|
||||
|
||||
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
|
||||
|
||||
failed_to_load_networks = []
|
||||
|
||||
for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):
|
||||
net = already_loaded.get(name, None)
|
||||
|
||||
if network_on_disk is not None:
|
||||
if net is None:
|
||||
net = networks_in_memory.get(name)
|
||||
|
||||
if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:
|
||||
try:
|
||||
net = load_network(name, network_on_disk)
|
||||
|
||||
networks_in_memory.pop(name, None)
|
||||
networks_in_memory[name] = net
|
||||
except Exception as e:
|
||||
errors.display(e, f"loading network {network_on_disk.filename}")
|
||||
continue
|
||||
|
||||
net.mentioned_name = name
|
||||
|
||||
network_on_disk.read_hash()
|
||||
|
||||
if net is None:
|
||||
failed_to_load_networks.append(name)
|
||||
logging.info(f"Couldn't find network with name {name}")
|
||||
continue
|
||||
|
||||
net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0
|
||||
net.unet_multiplier = unet_multipliers[i] if unet_multipliers else 1.0
|
||||
net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0
|
||||
loaded_networks.append(net)
|
||||
|
||||
for emb_name, embedding in net.bundle_embeddings.items():
|
||||
if embedding.loaded is None and emb_name in emb_db.word_embeddings:
|
||||
logger.warning(
|
||||
f'Skip bundle embedding: "{emb_name}"'
|
||||
' as it was already loaded from embeddings folder'
|
||||
)
|
||||
continue
|
||||
|
||||
embedding.loaded = False
|
||||
if emb_db.expected_shape == -1 or emb_db.expected_shape == embedding.shape:
|
||||
embedding.loaded = True
|
||||
emb_db.register_embedding(embedding, shared.sd_model)
|
||||
else:
|
||||
emb_db.skipped_embeddings[name] = embedding
|
||||
|
||||
if failed_to_load_networks:
|
||||
lora_not_found_message = f'Lora not found: {", ".join(failed_to_load_networks)}'
|
||||
sd_hijack.model_hijack.comments.append(lora_not_found_message)
|
||||
if shared.opts.lora_not_found_warning_console:
|
||||
print(f'\n{lora_not_found_message}\n')
|
||||
if shared.opts.lora_not_found_gradio_warning:
|
||||
gr.Warning(lora_not_found_message)
|
||||
|
||||
purge_networks_from_memory()
|
||||
|
||||
|
||||
def allowed_layer_without_weight(layer):
|
||||
if isinstance(layer, torch.nn.LayerNorm) and not layer.elementwise_affine:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def store_weights_backup(weight):
|
||||
if weight is None:
|
||||
return None
|
||||
|
||||
return weight.to(devices.cpu, copy=True)
|
||||
|
||||
|
||||
def restore_weights_backup(obj, field, weight):
|
||||
if weight is None:
|
||||
setattr(obj, field, None)
|
||||
return
|
||||
|
||||
getattr(obj, field).copy_(weight)
|
||||
|
||||
|
||||
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
|
||||
weights_backup = getattr(self, "network_weights_backup", None)
|
||||
bias_backup = getattr(self, "network_bias_backup", None)
|
||||
|
||||
if weights_backup is None and bias_backup is None:
|
||||
return
|
||||
|
||||
if weights_backup is not None:
|
||||
if isinstance(self, torch.nn.MultiheadAttention):
|
||||
restore_weights_backup(self, 'in_proj_weight', weights_backup[0])
|
||||
restore_weights_backup(self.out_proj, 'weight', weights_backup[1])
|
||||
else:
|
||||
restore_weights_backup(self, 'weight', weights_backup)
|
||||
|
||||
if isinstance(self, torch.nn.MultiheadAttention):
|
||||
restore_weights_backup(self.out_proj, 'bias', bias_backup)
|
||||
else:
|
||||
restore_weights_backup(self, 'bias', bias_backup)
|
||||
|
||||
|
||||
def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
|
||||
"""
|
||||
Applies the currently selected set of networks to the weights of torch layer self.
|
||||
If weights already have this particular set of networks applied, does nothing.
|
||||
If not, restores original weights from backup and alters weights according to networks.
|
||||
"""
|
||||
|
||||
network_layer_name = getattr(self, 'network_layer_name', None)
|
||||
if network_layer_name is None:
|
||||
return
|
||||
|
||||
current_names = getattr(self, "network_current_names", ())
|
||||
wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks)
|
||||
|
||||
weights_backup = getattr(self, "network_weights_backup", None)
|
||||
if weights_backup is None and wanted_names != ():
|
||||
if current_names != () and not allowed_layer_without_weight(self):
|
||||
raise RuntimeError(f"{network_layer_name} - no backup weights found and current weights are not unchanged")
|
||||
|
||||
if isinstance(self, torch.nn.MultiheadAttention):
|
||||
weights_backup = (store_weights_backup(self.in_proj_weight), store_weights_backup(self.out_proj.weight))
|
||||
else:
|
||||
weights_backup = store_weights_backup(self.weight)
|
||||
|
||||
self.network_weights_backup = weights_backup
|
||||
|
||||
bias_backup = getattr(self, "network_bias_backup", None)
|
||||
if bias_backup is None and wanted_names != ():
|
||||
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
|
||||
bias_backup = store_weights_backup(self.out_proj.bias)
|
||||
elif getattr(self, 'bias', None) is not None:
|
||||
bias_backup = store_weights_backup(self.bias)
|
||||
else:
|
||||
bias_backup = None
|
||||
|
||||
# Unlike weight which always has value, some modules don't have bias.
|
||||
# Only report if bias is not None and current bias are not unchanged.
|
||||
if bias_backup is not None and current_names != ():
|
||||
raise RuntimeError("no backup bias found and current bias are not unchanged")
|
||||
|
||||
self.network_bias_backup = bias_backup
|
||||
|
||||
if current_names != wanted_names:
|
||||
network_restore_weights_from_backup(self)
|
||||
|
||||
for net in loaded_networks:
|
||||
module = net.modules.get(network_layer_name, None)
|
||||
if module is not None and hasattr(self, 'weight') and not isinstance(module, modules.models.sd3.mmdit.QkvLinear):
|
||||
try:
|
||||
with torch.no_grad():
|
||||
if getattr(self, 'fp16_weight', None) is None:
|
||||
weight = self.weight
|
||||
bias = self.bias
|
||||
else:
|
||||
weight = self.fp16_weight.clone().to(self.weight.device)
|
||||
bias = getattr(self, 'fp16_bias', None)
|
||||
if bias is not None:
|
||||
bias = bias.clone().to(self.bias.device)
|
||||
updown, ex_bias = module.calc_updown(weight)
|
||||
|
||||
if len(weight.shape) == 4 and weight.shape[1] == 9:
|
||||
# inpainting model. zero pad updown to make channel[1] 4 to 9
|
||||
updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
|
||||
|
||||
self.weight.copy_((weight.to(dtype=updown.dtype) + updown).to(dtype=self.weight.dtype))
|
||||
if ex_bias is not None and hasattr(self, 'bias'):
|
||||
if self.bias is None:
|
||||
self.bias = torch.nn.Parameter(ex_bias).to(self.weight.dtype)
|
||||
else:
|
||||
self.bias.copy_((bias + ex_bias).to(dtype=self.bias.dtype))
|
||||
except RuntimeError as e:
|
||||
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
|
||||
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
|
||||
|
||||
continue
|
||||
|
||||
module_q = net.modules.get(network_layer_name + "_q_proj", None)
|
||||
module_k = net.modules.get(network_layer_name + "_k_proj", None)
|
||||
module_v = net.modules.get(network_layer_name + "_v_proj", None)
|
||||
module_out = net.modules.get(network_layer_name + "_out_proj", None)
|
||||
|
||||
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
|
||||
try:
|
||||
with torch.no_grad():
|
||||
# Send "real" orig_weight into MHA's lora module
|
||||
qw, kw, vw = self.in_proj_weight.chunk(3, 0)
|
||||
updown_q, _ = module_q.calc_updown(qw)
|
||||
updown_k, _ = module_k.calc_updown(kw)
|
||||
updown_v, _ = module_v.calc_updown(vw)
|
||||
del qw, kw, vw
|
||||
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
|
||||
updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight)
|
||||
|
||||
self.in_proj_weight += updown_qkv
|
||||
self.out_proj.weight += updown_out
|
||||
if ex_bias is not None:
|
||||
if self.out_proj.bias is None:
|
||||
self.out_proj.bias = torch.nn.Parameter(ex_bias)
|
||||
else:
|
||||
self.out_proj.bias += ex_bias
|
||||
|
||||
except RuntimeError as e:
|
||||
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
|
||||
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
|
||||
|
||||
continue
|
||||
|
||||
if isinstance(self, modules.models.sd3.mmdit.QkvLinear) and module_q and module_k and module_v:
|
||||
try:
|
||||
with torch.no_grad():
|
||||
# Send "real" orig_weight into MHA's lora module
|
||||
qw, kw, vw = self.weight.chunk(3, 0)
|
||||
updown_q, _ = module_q.calc_updown(qw)
|
||||
updown_k, _ = module_k.calc_updown(kw)
|
||||
updown_v, _ = module_v.calc_updown(vw)
|
||||
del qw, kw, vw
|
||||
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
|
||||
self.weight += updown_qkv
|
||||
|
||||
except RuntimeError as e:
|
||||
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
|
||||
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
|
||||
|
||||
continue
|
||||
|
||||
if module is None:
|
||||
continue
|
||||
|
||||
logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation")
|
||||
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
|
||||
|
||||
self.network_current_names = wanted_names
|
||||
|
||||
|
||||
def network_forward(org_module, input, original_forward):
|
||||
"""
|
||||
Old way of applying Lora by executing operations during layer's forward.
|
||||
Stacking many loras this way results in big performance degradation.
|
||||
"""
|
||||
|
||||
if len(loaded_networks) == 0:
|
||||
return original_forward(org_module, input)
|
||||
|
||||
input = devices.cond_cast_unet(input)
|
||||
|
||||
network_restore_weights_from_backup(org_module)
|
||||
network_reset_cached_weight(org_module)
|
||||
|
||||
y = original_forward(org_module, input)
|
||||
|
||||
network_layer_name = getattr(org_module, 'network_layer_name', None)
|
||||
for lora in loaded_networks:
|
||||
module = lora.modules.get(network_layer_name, None)
|
||||
if module is None:
|
||||
continue
|
||||
|
||||
y = module.forward(input, y)
|
||||
|
||||
return y
|
||||
|
||||
|
||||
def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
|
||||
self.network_current_names = ()
|
||||
self.network_weights_backup = None
|
||||
self.network_bias_backup = None
|
||||
|
||||
|
||||
def network_Linear_forward(self, input):
|
||||
if shared.opts.lora_functional:
|
||||
return network_forward(self, input, originals.Linear_forward)
|
||||
|
||||
network_apply_weights(self)
|
||||
|
||||
return originals.Linear_forward(self, input)
|
||||
|
||||
|
||||
def network_Linear_load_state_dict(self, *args, **kwargs):
|
||||
network_reset_cached_weight(self)
|
||||
|
||||
return originals.Linear_load_state_dict(self, *args, **kwargs)
|
||||
|
||||
|
||||
def network_Conv2d_forward(self, input):
|
||||
if shared.opts.lora_functional:
|
||||
return network_forward(self, input, originals.Conv2d_forward)
|
||||
|
||||
network_apply_weights(self)
|
||||
|
||||
return originals.Conv2d_forward(self, input)
|
||||
|
||||
|
||||
def network_Conv2d_load_state_dict(self, *args, **kwargs):
|
||||
network_reset_cached_weight(self)
|
||||
|
||||
return originals.Conv2d_load_state_dict(self, *args, **kwargs)
|
||||
|
||||
|
||||
def network_GroupNorm_forward(self, input):
|
||||
if shared.opts.lora_functional:
|
||||
return network_forward(self, input, originals.GroupNorm_forward)
|
||||
|
||||
network_apply_weights(self)
|
||||
|
||||
return originals.GroupNorm_forward(self, input)
|
||||
|
||||
|
||||
def network_GroupNorm_load_state_dict(self, *args, **kwargs):
|
||||
network_reset_cached_weight(self)
|
||||
|
||||
return originals.GroupNorm_load_state_dict(self, *args, **kwargs)
|
||||
|
||||
|
||||
def network_LayerNorm_forward(self, input):
|
||||
if shared.opts.lora_functional:
|
||||
return network_forward(self, input, originals.LayerNorm_forward)
|
||||
|
||||
network_apply_weights(self)
|
||||
|
||||
return originals.LayerNorm_forward(self, input)
|
||||
|
||||
|
||||
def network_LayerNorm_load_state_dict(self, *args, **kwargs):
|
||||
network_reset_cached_weight(self)
|
||||
|
||||
return originals.LayerNorm_load_state_dict(self, *args, **kwargs)
|
||||
|
||||
|
||||
def network_MultiheadAttention_forward(self, *args, **kwargs):
|
||||
network_apply_weights(self)
|
||||
|
||||
return originals.MultiheadAttention_forward(self, *args, **kwargs)
|
||||
|
||||
|
||||
def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
|
||||
network_reset_cached_weight(self)
|
||||
|
||||
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
|
||||
|
||||
|
||||
def process_network_files(names: list[str] | None = None):
|
||||
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
||||
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
||||
for filename in candidates:
|
||||
if os.path.isdir(filename):
|
||||
continue
|
||||
name = os.path.splitext(os.path.basename(filename))[0]
|
||||
# if names is provided, only load networks with names in the list
|
||||
if names and name not in names:
|
||||
continue
|
||||
try:
|
||||
entry = network.NetworkOnDisk(name, filename)
|
||||
except OSError: # should catch FileNotFoundError and PermissionError etc.
|
||||
errors.report(f"Failed to load network {name} from {filename}", exc_info=True)
|
||||
continue
|
||||
|
||||
available_networks[name] = entry
|
||||
|
||||
if entry.alias in available_network_aliases:
|
||||
forbidden_network_aliases[entry.alias.lower()] = 1
|
||||
|
||||
available_network_aliases[name] = entry
|
||||
available_network_aliases[entry.alias] = entry
|
||||
|
||||
|
||||
def update_available_networks_by_names(names: list[str]):
|
||||
process_network_files(names)
|
||||
|
||||
|
||||
def list_available_networks():
|
||||
available_networks.clear()
|
||||
available_network_aliases.clear()
|
||||
forbidden_network_aliases.clear()
|
||||
available_network_hash_lookup.clear()
|
||||
forbidden_network_aliases.update({"none": 1, "Addams": 1})
|
||||
|
||||
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
||||
|
||||
process_network_files()
|
||||
|
||||
|
||||
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
||||
|
||||
|
||||
def infotext_pasted(infotext, params):
|
||||
if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
|
||||
return # if the other extension is active, it will handle those fields, no need to do anything
|
||||
|
||||
added = []
|
||||
|
||||
for k in params:
|
||||
if not k.startswith("AddNet Model "):
|
||||
continue
|
||||
|
||||
num = k[13:]
|
||||
|
||||
if params.get("AddNet Module " + num) != "LoRA":
|
||||
continue
|
||||
|
||||
name = params.get("AddNet Model " + num)
|
||||
if name is None:
|
||||
continue
|
||||
|
||||
m = re_network_name.match(name)
|
||||
if m:
|
||||
name = m.group(1)
|
||||
|
||||
multiplier = params.get("AddNet Weight A " + num, "1.0")
|
||||
|
||||
added.append(f"<lora:{name}:{multiplier}>")
|
||||
|
||||
if added:
|
||||
params["Prompt"] += "\n" + "".join(added)
|
||||
|
||||
|
||||
originals: lora_patches.LoraPatches = None
|
||||
|
||||
extra_network_lora = None
|
||||
|
||||
available_networks = {}
|
||||
available_network_aliases = {}
|
||||
loaded_networks = []
|
||||
loaded_bundle_embeddings = {}
|
||||
networks_in_memory = {}
|
||||
available_network_hash_lookup = {}
|
||||
forbidden_network_aliases = {}
|
||||
|
||||
list_available_networks()
|
@ -1,8 +1,6 @@
|
||||
import os
|
||||
from modules import paths
|
||||
from modules.paths_internal import normalized_filepath
|
||||
|
||||
|
||||
def preload(parser):
|
||||
parser.add_argument("--lora-dir", type=normalized_filepath, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
|
||||
parser.add_argument("--lyco-dir-backcompat", type=normalized_filepath, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
|
||||
parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
|
||||
|
@ -1,56 +1,72 @@
|
||||
import re
|
||||
|
||||
import torch
|
||||
import gradio as gr
|
||||
from fastapi import FastAPI
|
||||
|
||||
import network
|
||||
import networks
|
||||
import lora # noqa:F401
|
||||
import lora_patches
|
||||
import lora
|
||||
import extra_networks_lora
|
||||
import ui_extra_networks_lora
|
||||
from modules import script_callbacks, ui_extra_networks, extra_networks, shared
|
||||
|
||||
|
||||
def unload():
|
||||
networks.originals.undo()
|
||||
torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora
|
||||
torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_lora
|
||||
torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora
|
||||
torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_lora
|
||||
torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_lora
|
||||
torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_lora
|
||||
|
||||
|
||||
def before_ui():
|
||||
ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
|
||||
|
||||
networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora()
|
||||
extra_networks.register_extra_network(networks.extra_network_lora)
|
||||
extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco")
|
||||
extra_networks.register_extra_network(extra_networks_lora.ExtraNetworkLora())
|
||||
|
||||
|
||||
networks.originals = lora_patches.LoraPatches()
|
||||
if not hasattr(torch.nn, 'Linear_forward_before_lora'):
|
||||
torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward
|
||||
|
||||
script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules)
|
||||
if not hasattr(torch.nn, 'Linear_load_state_dict_before_lora'):
|
||||
torch.nn.Linear_load_state_dict_before_lora = torch.nn.Linear._load_from_state_dict
|
||||
|
||||
if not hasattr(torch.nn, 'Conv2d_forward_before_lora'):
|
||||
torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward
|
||||
|
||||
if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_lora'):
|
||||
torch.nn.Conv2d_load_state_dict_before_lora = torch.nn.Conv2d._load_from_state_dict
|
||||
|
||||
if not hasattr(torch.nn, 'MultiheadAttention_forward_before_lora'):
|
||||
torch.nn.MultiheadAttention_forward_before_lora = torch.nn.MultiheadAttention.forward
|
||||
|
||||
if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_lora'):
|
||||
torch.nn.MultiheadAttention_load_state_dict_before_lora = torch.nn.MultiheadAttention._load_from_state_dict
|
||||
|
||||
torch.nn.Linear.forward = lora.lora_Linear_forward
|
||||
torch.nn.Linear._load_from_state_dict = lora.lora_Linear_load_state_dict
|
||||
torch.nn.Conv2d.forward = lora.lora_Conv2d_forward
|
||||
torch.nn.Conv2d._load_from_state_dict = lora.lora_Conv2d_load_state_dict
|
||||
torch.nn.MultiheadAttention.forward = lora.lora_MultiheadAttention_forward
|
||||
torch.nn.MultiheadAttention._load_from_state_dict = lora.lora_MultiheadAttention_load_state_dict
|
||||
|
||||
script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
|
||||
script_callbacks.on_script_unloaded(unload)
|
||||
script_callbacks.on_before_ui(before_ui)
|
||||
script_callbacks.on_infotext_pasted(networks.infotext_pasted)
|
||||
script_callbacks.on_infotext_pasted(lora.infotext_pasted)
|
||||
|
||||
|
||||
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
|
||||
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
|
||||
"sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras),
|
||||
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
|
||||
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
|
||||
"lora_bundled_ti_to_infotext": shared.OptionInfo(True, "Add Lora name as TI hashes for bundled Textual Inversion").info('"Add Textual Inversion hashes to infotext" needs to be enabled'),
|
||||
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
|
||||
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
|
||||
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
|
||||
"lora_not_found_warning_console": shared.OptionInfo(False, "Lora not found warning in console"),
|
||||
"lora_not_found_gradio_warning": shared.OptionInfo(False, "Lora not found warning popup in webui"),
|
||||
}))
|
||||
|
||||
|
||||
shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
|
||||
"lora_functional": shared.OptionInfo(False, "Lora/Networks: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
|
||||
"lora_functional": shared.OptionInfo(False, "Lora: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
|
||||
}))
|
||||
|
||||
|
||||
def create_lora_json(obj: network.NetworkOnDisk):
|
||||
def create_lora_json(obj: lora.LoraOnDisk):
|
||||
return {
|
||||
"name": obj.name,
|
||||
"alias": obj.alias,
|
||||
@ -59,17 +75,17 @@ def create_lora_json(obj: network.NetworkOnDisk):
|
||||
}
|
||||
|
||||
|
||||
def api_networks(_: gr.Blocks, app: FastAPI):
|
||||
def api_loras(_: gr.Blocks, app: FastAPI):
|
||||
@app.get("/sdapi/v1/loras")
|
||||
async def get_loras():
|
||||
return [create_lora_json(obj) for obj in networks.available_networks.values()]
|
||||
return [create_lora_json(obj) for obj in lora.available_loras.values()]
|
||||
|
||||
@app.post("/sdapi/v1/refresh-loras")
|
||||
async def refresh_loras():
|
||||
return networks.list_available_networks()
|
||||
return lora.list_available_loras()
|
||||
|
||||
|
||||
script_callbacks.on_app_started(api_networks)
|
||||
script_callbacks.on_app_started(api_loras)
|
||||
|
||||
re_lora = re.compile("<lora:([^:]+):")
|
||||
|
||||
@ -82,21 +98,19 @@ def infotext_pasted(infotext, d):
|
||||
hashes = [x.strip().split(':', 1) for x in hashes.split(",")]
|
||||
hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
|
||||
|
||||
def network_replacement(m):
|
||||
def lora_replacement(m):
|
||||
alias = m.group(1)
|
||||
shorthash = hashes.get(alias)
|
||||
if shorthash is None:
|
||||
return m.group(0)
|
||||
|
||||
network_on_disk = networks.available_network_hash_lookup.get(shorthash)
|
||||
if network_on_disk is None:
|
||||
lora_on_disk = lora.available_lora_hash_lookup.get(shorthash)
|
||||
if lora_on_disk is None:
|
||||
return m.group(0)
|
||||
|
||||
return f'<lora:{network_on_disk.get_alias()}:'
|
||||
return f'<lora:{lora_on_disk.get_alias()}:'
|
||||
|
||||
d["Prompt"] = re.sub(re_lora, network_replacement, d["Prompt"])
|
||||
d["Prompt"] = re.sub(re_lora, lora_replacement, d["Prompt"])
|
||||
|
||||
|
||||
script_callbacks.on_infotext_pasted(infotext_pasted)
|
||||
|
||||
shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory)
|
||||
|
@ -1,226 +0,0 @@
|
||||
import datetime
|
||||
import html
|
||||
import random
|
||||
|
||||
import gradio as gr
|
||||
import re
|
||||
|
||||
from modules import ui_extra_networks_user_metadata
|
||||
|
||||
|
||||
def is_non_comma_tagset(tags):
|
||||
average_tag_length = sum(len(x) for x in tags.keys()) / len(tags)
|
||||
|
||||
return average_tag_length >= 16
|
||||
|
||||
|
||||
re_word = re.compile(r"[-_\w']+")
|
||||
re_comma = re.compile(r" *, *")
|
||||
|
||||
|
||||
def build_tags(metadata):
|
||||
tags = {}
|
||||
|
||||
ss_tag_frequency = metadata.get("ss_tag_frequency", {})
|
||||
if ss_tag_frequency is not None and hasattr(ss_tag_frequency, 'items'):
|
||||
for _, tags_dict in ss_tag_frequency.items():
|
||||
for tag, tag_count in tags_dict.items():
|
||||
tag = tag.strip()
|
||||
tags[tag] = tags.get(tag, 0) + int(tag_count)
|
||||
|
||||
if tags and is_non_comma_tagset(tags):
|
||||
new_tags = {}
|
||||
|
||||
for text, text_count in tags.items():
|
||||
for word in re.findall(re_word, text):
|
||||
if len(word) < 3:
|
||||
continue
|
||||
|
||||
new_tags[word] = new_tags.get(word, 0) + text_count
|
||||
|
||||
tags = new_tags
|
||||
|
||||
ordered_tags = sorted(tags.keys(), key=tags.get, reverse=True)
|
||||
|
||||
return [(tag, tags[tag]) for tag in ordered_tags]
|
||||
|
||||
|
||||
class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor):
|
||||
def __init__(self, ui, tabname, page):
|
||||
super().__init__(ui, tabname, page)
|
||||
|
||||
self.select_sd_version = None
|
||||
|
||||
self.taginfo = None
|
||||
self.edit_activation_text = None
|
||||
self.slider_preferred_weight = None
|
||||
self.edit_notes = None
|
||||
|
||||
def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, negative_text, notes):
|
||||
user_metadata = self.get_user_metadata(name)
|
||||
user_metadata["description"] = desc
|
||||
user_metadata["sd version"] = sd_version
|
||||
user_metadata["activation text"] = activation_text
|
||||
user_metadata["preferred weight"] = preferred_weight
|
||||
user_metadata["negative text"] = negative_text
|
||||
user_metadata["notes"] = notes
|
||||
|
||||
self.write_user_metadata(name, user_metadata)
|
||||
|
||||
def get_metadata_table(self, name):
|
||||
table = super().get_metadata_table(name)
|
||||
item = self.page.items.get(name, {})
|
||||
metadata = item.get("metadata") or {}
|
||||
|
||||
keys = {
|
||||
'ss_output_name': "Output name:",
|
||||
'ss_sd_model_name': "Model:",
|
||||
'ss_clip_skip': "Clip skip:",
|
||||
'ss_network_module': "Kohya module:",
|
||||
}
|
||||
|
||||
for key, label in keys.items():
|
||||
value = metadata.get(key, None)
|
||||
if value is not None and str(value) != "None":
|
||||
table.append((label, html.escape(value)))
|
||||
|
||||
ss_training_started_at = metadata.get('ss_training_started_at')
|
||||
if ss_training_started_at:
|
||||
table.append(("Date trained:", datetime.datetime.utcfromtimestamp(float(ss_training_started_at)).strftime('%Y-%m-%d %H:%M')))
|
||||
|
||||
ss_bucket_info = metadata.get("ss_bucket_info")
|
||||
if ss_bucket_info and "buckets" in ss_bucket_info:
|
||||
resolutions = {}
|
||||
for _, bucket in ss_bucket_info["buckets"].items():
|
||||
resolution = bucket["resolution"]
|
||||
resolution = f'{resolution[1]}x{resolution[0]}'
|
||||
|
||||
resolutions[resolution] = resolutions.get(resolution, 0) + int(bucket["count"])
|
||||
|
||||
resolutions_list = sorted(resolutions.keys(), key=resolutions.get, reverse=True)
|
||||
resolutions_text = html.escape(", ".join(resolutions_list[0:4]))
|
||||
if len(resolutions) > 4:
|
||||
resolutions_text += ", ..."
|
||||
resolutions_text = f"<span title='{html.escape(', '.join(resolutions_list))}'>{resolutions_text}</span>"
|
||||
|
||||
table.append(('Resolutions:' if len(resolutions_list) > 1 else 'Resolution:', resolutions_text))
|
||||
|
||||
image_count = 0
|
||||
for _, params in metadata.get("ss_dataset_dirs", {}).items():
|
||||
image_count += int(params.get("img_count", 0))
|
||||
|
||||
if image_count:
|
||||
table.append(("Dataset size:", image_count))
|
||||
|
||||
return table
|
||||
|
||||
def put_values_into_components(self, name):
|
||||
user_metadata = self.get_user_metadata(name)
|
||||
values = super().put_values_into_components(name)
|
||||
|
||||
item = self.page.items.get(name, {})
|
||||
metadata = item.get("metadata") or {}
|
||||
|
||||
tags = build_tags(metadata)
|
||||
gradio_tags = [(tag, str(count)) for tag, count in tags[0:24]]
|
||||
|
||||
return [
|
||||
*values[0:5],
|
||||
item.get("sd_version", "Unknown"),
|
||||
gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False),
|
||||
user_metadata.get('activation text', ''),
|
||||
float(user_metadata.get('preferred weight', 0.0)),
|
||||
user_metadata.get('negative text', ''),
|
||||
gr.update(visible=True if tags else False),
|
||||
gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False),
|
||||
]
|
||||
|
||||
def generate_random_prompt(self, name):
|
||||
item = self.page.items.get(name, {})
|
||||
metadata = item.get("metadata") or {}
|
||||
tags = build_tags(metadata)
|
||||
|
||||
return self.generate_random_prompt_from_tags(tags)
|
||||
|
||||
def generate_random_prompt_from_tags(self, tags):
|
||||
max_count = None
|
||||
res = []
|
||||
for tag, count in tags:
|
||||
if not max_count:
|
||||
max_count = count
|
||||
|
||||
v = random.random() * max_count
|
||||
if count > v:
|
||||
for x in "({[]})":
|
||||
tag = tag.replace(x, '\\' + x)
|
||||
res.append(tag)
|
||||
|
||||
return ", ".join(sorted(res))
|
||||
|
||||
def create_extra_default_items_in_left_column(self):
|
||||
|
||||
# this would be a lot better as gr.Radio but I can't make it work
|
||||
self.select_sd_version = gr.Dropdown(['SD1', 'SD2', 'SDXL', 'Unknown'], value='Unknown', label='Stable Diffusion version', interactive=True)
|
||||
|
||||
def create_editor(self):
|
||||
self.create_default_editor_elems()
|
||||
|
||||
self.taginfo = gr.HighlightedText(label="Training dataset tags")
|
||||
self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora")
|
||||
self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01)
|
||||
self.edit_negative_text = gr.Text(label='Negative prompt', info="Will be added to negative prompts")
|
||||
with gr.Row() as row_random_prompt:
|
||||
with gr.Column(scale=8):
|
||||
random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False)
|
||||
|
||||
with gr.Column(scale=1, min_width=120):
|
||||
generate_random_prompt = gr.Button('Generate', size="lg", scale=1)
|
||||
|
||||
self.edit_notes = gr.TextArea(label='Notes', lines=4)
|
||||
|
||||
generate_random_prompt.click(fn=self.generate_random_prompt, inputs=[self.edit_name_input], outputs=[random_prompt], show_progress=False)
|
||||
|
||||
def select_tag(activation_text, evt: gr.SelectData):
|
||||
tag = evt.value[0]
|
||||
|
||||
words = re.split(re_comma, activation_text)
|
||||
if tag in words:
|
||||
words = [x for x in words if x != tag and x.strip()]
|
||||
return ", ".join(words)
|
||||
|
||||
return activation_text + ", " + tag if activation_text else tag
|
||||
|
||||
self.taginfo.select(fn=select_tag, inputs=[self.edit_activation_text], outputs=[self.edit_activation_text], show_progress=False)
|
||||
|
||||
self.create_default_buttons()
|
||||
|
||||
viewed_components = [
|
||||
self.edit_name,
|
||||
self.edit_description,
|
||||
self.html_filedata,
|
||||
self.html_preview,
|
||||
self.edit_notes,
|
||||
self.select_sd_version,
|
||||
self.taginfo,
|
||||
self.edit_activation_text,
|
||||
self.slider_preferred_weight,
|
||||
self.edit_negative_text,
|
||||
row_random_prompt,
|
||||
random_prompt,
|
||||
]
|
||||
|
||||
self.button_edit\
|
||||
.click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\
|
||||
.then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box])
|
||||
|
||||
edited_components = [
|
||||
self.edit_description,
|
||||
self.select_sd_version,
|
||||
self.edit_activation_text,
|
||||
self.slider_preferred_weight,
|
||||
self.edit_negative_text,
|
||||
self.edit_notes,
|
||||
]
|
||||
|
||||
|
||||
self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components)
|
@ -1,11 +1,8 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import network
|
||||
import networks
|
||||
import lora
|
||||
|
||||
from modules import shared, ui_extra_networks
|
||||
from modules.ui_extra_networks import quote_js
|
||||
from ui_edit_user_metadata import LoraUserMetadataEditor
|
||||
|
||||
|
||||
class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
|
||||
@ -13,78 +10,25 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
|
||||
super().__init__('Lora')
|
||||
|
||||
def refresh(self):
|
||||
networks.list_available_networks()
|
||||
|
||||
def create_item(self, name, index=None, enable_filter=True):
|
||||
lora_on_disk = networks.available_networks.get(name)
|
||||
if lora_on_disk is None:
|
||||
return
|
||||
|
||||
path, ext = os.path.splitext(lora_on_disk.filename)
|
||||
|
||||
alias = lora_on_disk.get_alias()
|
||||
|
||||
search_terms = [self.search_terms_from_path(lora_on_disk.filename)]
|
||||
if lora_on_disk.hash:
|
||||
search_terms.append(lora_on_disk.hash)
|
||||
item = {
|
||||
"name": name,
|
||||
"filename": lora_on_disk.filename,
|
||||
"shorthash": lora_on_disk.shorthash,
|
||||
"preview": self.find_preview(path) or self.find_embedded_preview(path, name, lora_on_disk.metadata),
|
||||
"description": self.find_description(path),
|
||||
"search_terms": search_terms,
|
||||
"local_preview": f"{path}.{shared.opts.samples_format}",
|
||||
"metadata": lora_on_disk.metadata,
|
||||
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
|
||||
"sd_version": lora_on_disk.sd_version.name,
|
||||
}
|
||||
|
||||
self.read_user_metadata(item)
|
||||
activation_text = item["user_metadata"].get("activation text")
|
||||
preferred_weight = item["user_metadata"].get("preferred weight", 0.0)
|
||||
item["prompt"] = quote_js(f"<lora:{alias}:") + " + " + (str(preferred_weight) if preferred_weight else "opts.extra_networks_default_multiplier") + " + " + quote_js(">")
|
||||
|
||||
if activation_text:
|
||||
item["prompt"] += " + " + quote_js(" " + activation_text)
|
||||
|
||||
negative_prompt = item["user_metadata"].get("negative text")
|
||||
item["negative_prompt"] = quote_js("")
|
||||
if negative_prompt:
|
||||
item["negative_prompt"] = quote_js('(' + negative_prompt + ':1)')
|
||||
|
||||
sd_version = item["user_metadata"].get("sd version")
|
||||
if sd_version in network.SdVersion.__members__:
|
||||
item["sd_version"] = sd_version
|
||||
sd_version = network.SdVersion[sd_version]
|
||||
else:
|
||||
sd_version = lora_on_disk.sd_version
|
||||
|
||||
if shared.opts.lora_show_all or not enable_filter or not shared.sd_model:
|
||||
pass
|
||||
elif sd_version == network.SdVersion.Unknown:
|
||||
model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1
|
||||
if model_version.name in shared.opts.lora_hide_unknown_for_versions:
|
||||
return None
|
||||
elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL:
|
||||
return None
|
||||
elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2:
|
||||
return None
|
||||
elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1:
|
||||
return None
|
||||
|
||||
return item
|
||||
lora.list_available_loras()
|
||||
|
||||
def list_items(self):
|
||||
# instantiate a list to protect against concurrent modification
|
||||
names = list(networks.available_networks)
|
||||
for index, name in enumerate(names):
|
||||
item = self.create_item(name, index)
|
||||
if item is not None:
|
||||
yield item
|
||||
for name, lora_on_disk in lora.available_loras.items():
|
||||
path, ext = os.path.splitext(lora_on_disk.filename)
|
||||
|
||||
alias = lora_on_disk.get_alias()
|
||||
|
||||
yield {
|
||||
"name": name,
|
||||
"filename": path,
|
||||
"preview": self.find_preview(path),
|
||||
"description": self.find_description(path),
|
||||
"search_term": self.search_terms_from_path(lora_on_disk.filename),
|
||||
"prompt": json.dumps(f"<lora:{alias}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
|
||||
"local_preview": f"{path}.{shared.opts.samples_format}",
|
||||
"metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
|
||||
}
|
||||
|
||||
def allowed_directories_for_previews(self):
|
||||
return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir_backcompat]
|
||||
return [shared.cmd_opts.lora_dir]
|
||||
|
||||
def create_user_metadata_editor(self, ui, tabname):
|
||||
return LoraUserMetadataEditor(ui, tabname, self)
|
||||
|
@ -1,9 +1,18 @@
|
||||
import os.path
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import PIL.Image
|
||||
import numpy as np
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
|
||||
import modules.upscaler
|
||||
from modules import devices, errors, modelloader, script_callbacks, shared, upscaler_utils
|
||||
from modules import devices, modelloader, script_callbacks
|
||||
from scunet_model_arch import SCUNet as net
|
||||
from modules.shared import opts
|
||||
|
||||
|
||||
class UpscalerScuNET(modules.upscaler.Upscaler):
|
||||
@ -19,7 +28,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
|
||||
scalers = []
|
||||
add_model2 = True
|
||||
for file in model_paths:
|
||||
if file.startswith("http"):
|
||||
if "http" in file:
|
||||
name = self.model_name
|
||||
else:
|
||||
name = modelloader.friendly_name(file)
|
||||
@ -29,43 +38,109 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
|
||||
scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
|
||||
scalers.append(scaler_data)
|
||||
except Exception:
|
||||
errors.report(f"Error loading ScuNET model: {file}", exc_info=True)
|
||||
print(f"Error loading ScuNET model: {file}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
if add_model2:
|
||||
scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
|
||||
scalers.append(scaler_data2)
|
||||
self.scalers = scalers
|
||||
|
||||
@staticmethod
|
||||
@torch.no_grad()
|
||||
def tiled_inference(img, model):
|
||||
# test the image tile by tile
|
||||
h, w = img.shape[2:]
|
||||
tile = opts.SCUNET_tile
|
||||
tile_overlap = opts.SCUNET_tile_overlap
|
||||
if tile == 0:
|
||||
return model(img)
|
||||
|
||||
device = devices.get_device_for('scunet')
|
||||
assert tile % 8 == 0, "tile size should be a multiple of window_size"
|
||||
sf = 1
|
||||
|
||||
stride = tile - tile_overlap
|
||||
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
|
||||
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
|
||||
E = torch.zeros(1, 3, h * sf, w * sf, dtype=img.dtype, device=device)
|
||||
W = torch.zeros_like(E, dtype=devices.dtype, device=device)
|
||||
|
||||
with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="ScuNET tiles") as pbar:
|
||||
for h_idx in h_idx_list:
|
||||
|
||||
for w_idx in w_idx_list:
|
||||
|
||||
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
|
||||
|
||||
out_patch = model(in_patch)
|
||||
out_patch_mask = torch.ones_like(out_patch)
|
||||
|
||||
E[
|
||||
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
|
||||
].add_(out_patch)
|
||||
W[
|
||||
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
|
||||
].add_(out_patch_mask)
|
||||
pbar.update(1)
|
||||
output = E.div_(W)
|
||||
|
||||
return output
|
||||
|
||||
def do_upscale(self, img: PIL.Image.Image, selected_file):
|
||||
devices.torch_gc()
|
||||
try:
|
||||
model = self.load_model(selected_file)
|
||||
except Exception as e:
|
||||
print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr)
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
model = self.load_model(selected_file)
|
||||
if model is None:
|
||||
print(f"ScuNET: Unable to load model from {selected_file}", file=sys.stderr)
|
||||
return img
|
||||
|
||||
img = upscaler_utils.upscale_2(
|
||||
img,
|
||||
model,
|
||||
tile_size=shared.opts.SCUNET_tile,
|
||||
tile_overlap=shared.opts.SCUNET_tile_overlap,
|
||||
scale=1, # ScuNET is a denoising model, not an upscaler
|
||||
desc='ScuNET',
|
||||
)
|
||||
devices.torch_gc()
|
||||
return img
|
||||
device = devices.get_device_for('scunet')
|
||||
tile = opts.SCUNET_tile
|
||||
h, w = img.height, img.width
|
||||
np_img = np.array(img)
|
||||
np_img = np_img[:, :, ::-1] # RGB to BGR
|
||||
np_img = np_img.transpose((2, 0, 1)) / 255 # HWC to CHW
|
||||
torch_img = torch.from_numpy(np_img).float().unsqueeze(0).to(device) # type: ignore
|
||||
|
||||
if tile > h or tile > w:
|
||||
_img = torch.zeros(1, 3, max(h, tile), max(w, tile), dtype=torch_img.dtype, device=torch_img.device)
|
||||
_img[:, :, :h, :w] = torch_img # pad image
|
||||
torch_img = _img
|
||||
|
||||
torch_output = self.tiled_inference(torch_img, model).squeeze(0)
|
||||
torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any
|
||||
np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy()
|
||||
del torch_img, torch_output
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
output = np_output.transpose((1, 2, 0)) # CHW to HWC
|
||||
output = output[:, :, ::-1] # BGR to RGB
|
||||
return PIL.Image.fromarray((output * 255).astype(np.uint8))
|
||||
|
||||
def load_model(self, path: str):
|
||||
device = devices.get_device_for('scunet')
|
||||
if path.startswith("http"):
|
||||
# TODO: this doesn't use `path` at all?
|
||||
filename = modelloader.load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth")
|
||||
if "http" in path:
|
||||
filename = load_file_from_url(url=self.model_url, model_dir=self.model_download_path, file_name="%s.pth" % self.name, progress=True)
|
||||
else:
|
||||
filename = path
|
||||
return modelloader.load_spandrel_model(filename, device=device, expected_architecture='SCUNet')
|
||||
if not os.path.exists(os.path.join(self.model_path, filename)) or filename is None:
|
||||
print(f"ScuNET: Unable to load model from {filename}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
|
||||
model.load_state_dict(torch.load(filename), strict=True)
|
||||
model.eval()
|
||||
for _, v in model.named_parameters():
|
||||
v.requires_grad = False
|
||||
model = model.to(device)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def on_ui_settings():
|
||||
import gradio as gr
|
||||
from modules import shared
|
||||
|
||||
shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling"))
|
||||
shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam"))
|
||||
|
268
extensions-builtin/ScuNET/scunet_model_arch.py
Normal file
268
extensions-builtin/ScuNET/scunet_model_arch.py
Normal file
@ -0,0 +1,268 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from einops import rearrange
|
||||
from einops.layers.torch import Rearrange
|
||||
from timm.models.layers import trunc_normal_, DropPath
|
||||
|
||||
|
||||
class WMSA(nn.Module):
|
||||
""" Self-attention module in Swin Transformer
|
||||
"""
|
||||
|
||||
def __init__(self, input_dim, output_dim, head_dim, window_size, type):
|
||||
super(WMSA, self).__init__()
|
||||
self.input_dim = input_dim
|
||||
self.output_dim = output_dim
|
||||
self.head_dim = head_dim
|
||||
self.scale = self.head_dim ** -0.5
|
||||
self.n_heads = input_dim // head_dim
|
||||
self.window_size = window_size
|
||||
self.type = type
|
||||
self.embedding_layer = nn.Linear(self.input_dim, 3 * self.input_dim, bias=True)
|
||||
|
||||
self.relative_position_params = nn.Parameter(
|
||||
torch.zeros((2 * window_size - 1) * (2 * window_size - 1), self.n_heads))
|
||||
|
||||
self.linear = nn.Linear(self.input_dim, self.output_dim)
|
||||
|
||||
trunc_normal_(self.relative_position_params, std=.02)
|
||||
self.relative_position_params = torch.nn.Parameter(
|
||||
self.relative_position_params.view(2 * window_size - 1, 2 * window_size - 1, self.n_heads).transpose(1,
|
||||
2).transpose(
|
||||
0, 1))
|
||||
|
||||
def generate_mask(self, h, w, p, shift):
|
||||
""" generating the mask of SW-MSA
|
||||
Args:
|
||||
shift: shift parameters in CyclicShift.
|
||||
Returns:
|
||||
attn_mask: should be (1 1 w p p),
|
||||
"""
|
||||
# supporting square.
|
||||
attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device)
|
||||
if self.type == 'W':
|
||||
return attn_mask
|
||||
|
||||
s = p - shift
|
||||
attn_mask[-1, :, :s, :, s:, :] = True
|
||||
attn_mask[-1, :, s:, :, :s, :] = True
|
||||
attn_mask[:, -1, :, :s, :, s:] = True
|
||||
attn_mask[:, -1, :, s:, :, :s] = True
|
||||
attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)')
|
||||
return attn_mask
|
||||
|
||||
def forward(self, x):
|
||||
""" Forward pass of Window Multi-head Self-attention module.
|
||||
Args:
|
||||
x: input tensor with shape of [b h w c];
|
||||
attn_mask: attention mask, fill -inf where the value is True;
|
||||
Returns:
|
||||
output: tensor shape [b h w c]
|
||||
"""
|
||||
if self.type != 'W':
|
||||
x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
|
||||
|
||||
x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
|
||||
h_windows = x.size(1)
|
||||
w_windows = x.size(2)
|
||||
# square validation
|
||||
# assert h_windows == w_windows
|
||||
|
||||
x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size)
|
||||
qkv = self.embedding_layer(x)
|
||||
q, k, v = rearrange(qkv, 'b nw np (threeh c) -> threeh b nw np c', c=self.head_dim).chunk(3, dim=0)
|
||||
sim = torch.einsum('hbwpc,hbwqc->hbwpq', q, k) * self.scale
|
||||
# Adding learnable relative embedding
|
||||
sim = sim + rearrange(self.relative_embedding(), 'h p q -> h 1 1 p q')
|
||||
# Using Attn Mask to distinguish different subwindows.
|
||||
if self.type != 'W':
|
||||
attn_mask = self.generate_mask(h_windows, w_windows, self.window_size, shift=self.window_size // 2)
|
||||
sim = sim.masked_fill_(attn_mask, float("-inf"))
|
||||
|
||||
probs = nn.functional.softmax(sim, dim=-1)
|
||||
output = torch.einsum('hbwij,hbwjc->hbwic', probs, v)
|
||||
output = rearrange(output, 'h b w p c -> b w p (h c)')
|
||||
output = self.linear(output)
|
||||
output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
|
||||
|
||||
if self.type != 'W':
|
||||
output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
|
||||
|
||||
return output
|
||||
|
||||
def relative_embedding(self):
|
||||
cord = torch.tensor(np.array([[i, j] for i in range(self.window_size) for j in range(self.window_size)]))
|
||||
relation = cord[:, None, :] - cord[None, :, :] + self.window_size - 1
|
||||
# negative is allowed
|
||||
return self.relative_position_params[:, relation[:, :, 0].long(), relation[:, :, 1].long()]
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
def __init__(self, input_dim, output_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
|
||||
""" SwinTransformer Block
|
||||
"""
|
||||
super(Block, self).__init__()
|
||||
self.input_dim = input_dim
|
||||
self.output_dim = output_dim
|
||||
assert type in ['W', 'SW']
|
||||
self.type = type
|
||||
if input_resolution <= window_size:
|
||||
self.type = 'W'
|
||||
|
||||
self.ln1 = nn.LayerNorm(input_dim)
|
||||
self.msa = WMSA(input_dim, input_dim, head_dim, window_size, self.type)
|
||||
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
self.ln2 = nn.LayerNorm(input_dim)
|
||||
self.mlp = nn.Sequential(
|
||||
nn.Linear(input_dim, 4 * input_dim),
|
||||
nn.GELU(),
|
||||
nn.Linear(4 * input_dim, output_dim),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = x + self.drop_path(self.msa(self.ln1(x)))
|
||||
x = x + self.drop_path(self.mlp(self.ln2(x)))
|
||||
return x
|
||||
|
||||
|
||||
class ConvTransBlock(nn.Module):
|
||||
def __init__(self, conv_dim, trans_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
|
||||
""" SwinTransformer and Conv Block
|
||||
"""
|
||||
super(ConvTransBlock, self).__init__()
|
||||
self.conv_dim = conv_dim
|
||||
self.trans_dim = trans_dim
|
||||
self.head_dim = head_dim
|
||||
self.window_size = window_size
|
||||
self.drop_path = drop_path
|
||||
self.type = type
|
||||
self.input_resolution = input_resolution
|
||||
|
||||
assert self.type in ['W', 'SW']
|
||||
if self.input_resolution <= self.window_size:
|
||||
self.type = 'W'
|
||||
|
||||
self.trans_block = Block(self.trans_dim, self.trans_dim, self.head_dim, self.window_size, self.drop_path,
|
||||
self.type, self.input_resolution)
|
||||
self.conv1_1 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
|
||||
self.conv1_2 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
|
||||
|
||||
self.conv_block = nn.Sequential(
|
||||
nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
conv_x, trans_x = torch.split(self.conv1_1(x), (self.conv_dim, self.trans_dim), dim=1)
|
||||
conv_x = self.conv_block(conv_x) + conv_x
|
||||
trans_x = Rearrange('b c h w -> b h w c')(trans_x)
|
||||
trans_x = self.trans_block(trans_x)
|
||||
trans_x = Rearrange('b h w c -> b c h w')(trans_x)
|
||||
res = self.conv1_2(torch.cat((conv_x, trans_x), dim=1))
|
||||
x = x + res
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class SCUNet(nn.Module):
|
||||
# def __init__(self, in_nc=3, config=[2, 2, 2, 2, 2, 2, 2], dim=64, drop_path_rate=0.0, input_resolution=256):
|
||||
def __init__(self, in_nc=3, config=None, dim=64, drop_path_rate=0.0, input_resolution=256):
|
||||
super(SCUNet, self).__init__()
|
||||
if config is None:
|
||||
config = [2, 2, 2, 2, 2, 2, 2]
|
||||
self.config = config
|
||||
self.dim = dim
|
||||
self.head_dim = 32
|
||||
self.window_size = 8
|
||||
|
||||
# drop path rate for each layer
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(config))]
|
||||
|
||||
self.m_head = [nn.Conv2d(in_nc, dim, 3, 1, 1, bias=False)]
|
||||
|
||||
begin = 0
|
||||
self.m_down1 = [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
|
||||
'W' if not i % 2 else 'SW', input_resolution)
|
||||
for i in range(config[0])] + \
|
||||
[nn.Conv2d(dim, 2 * dim, 2, 2, 0, bias=False)]
|
||||
|
||||
begin += config[0]
|
||||
self.m_down2 = [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
|
||||
'W' if not i % 2 else 'SW', input_resolution // 2)
|
||||
for i in range(config[1])] + \
|
||||
[nn.Conv2d(2 * dim, 4 * dim, 2, 2, 0, bias=False)]
|
||||
|
||||
begin += config[1]
|
||||
self.m_down3 = [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
|
||||
'W' if not i % 2 else 'SW', input_resolution // 4)
|
||||
for i in range(config[2])] + \
|
||||
[nn.Conv2d(4 * dim, 8 * dim, 2, 2, 0, bias=False)]
|
||||
|
||||
begin += config[2]
|
||||
self.m_body = [ConvTransBlock(4 * dim, 4 * dim, self.head_dim, self.window_size, dpr[i + begin],
|
||||
'W' if not i % 2 else 'SW', input_resolution // 8)
|
||||
for i in range(config[3])]
|
||||
|
||||
begin += config[3]
|
||||
self.m_up3 = [nn.ConvTranspose2d(8 * dim, 4 * dim, 2, 2, 0, bias=False), ] + \
|
||||
[ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
|
||||
'W' if not i % 2 else 'SW', input_resolution // 4)
|
||||
for i in range(config[4])]
|
||||
|
||||
begin += config[4]
|
||||
self.m_up2 = [nn.ConvTranspose2d(4 * dim, 2 * dim, 2, 2, 0, bias=False), ] + \
|
||||
[ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
|
||||
'W' if not i % 2 else 'SW', input_resolution // 2)
|
||||
for i in range(config[5])]
|
||||
|
||||
begin += config[5]
|
||||
self.m_up1 = [nn.ConvTranspose2d(2 * dim, dim, 2, 2, 0, bias=False), ] + \
|
||||
[ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
|
||||
'W' if not i % 2 else 'SW', input_resolution)
|
||||
for i in range(config[6])]
|
||||
|
||||
self.m_tail = [nn.Conv2d(dim, in_nc, 3, 1, 1, bias=False)]
|
||||
|
||||
self.m_head = nn.Sequential(*self.m_head)
|
||||
self.m_down1 = nn.Sequential(*self.m_down1)
|
||||
self.m_down2 = nn.Sequential(*self.m_down2)
|
||||
self.m_down3 = nn.Sequential(*self.m_down3)
|
||||
self.m_body = nn.Sequential(*self.m_body)
|
||||
self.m_up3 = nn.Sequential(*self.m_up3)
|
||||
self.m_up2 = nn.Sequential(*self.m_up2)
|
||||
self.m_up1 = nn.Sequential(*self.m_up1)
|
||||
self.m_tail = nn.Sequential(*self.m_tail)
|
||||
# self.apply(self._init_weights)
|
||||
|
||||
def forward(self, x0):
|
||||
|
||||
h, w = x0.size()[-2:]
|
||||
paddingBottom = int(np.ceil(h / 64) * 64 - h)
|
||||
paddingRight = int(np.ceil(w / 64) * 64 - w)
|
||||
x0 = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x0)
|
||||
|
||||
x1 = self.m_head(x0)
|
||||
x2 = self.m_down1(x1)
|
||||
x3 = self.m_down2(x2)
|
||||
x4 = self.m_down3(x3)
|
||||
x = self.m_body(x4)
|
||||
x = self.m_up3(x + x4)
|
||||
x = self.m_up2(x + x3)
|
||||
x = self.m_up1(x + x2)
|
||||
x = self.m_tail(x + x1)
|
||||
|
||||
x = x[..., :h, :w]
|
||||
|
||||
return x
|
||||
|
||||
def _init_weights(self, m):
|
||||
if isinstance(m, nn.Linear):
|
||||
trunc_normal_(m.weight, std=.02)
|
||||
if m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.LayerNorm):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
@ -1,30 +1,34 @@
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from tqdm import tqdm
|
||||
|
||||
from modules import devices, modelloader, script_callbacks, shared, upscaler_utils
|
||||
from modules import modelloader, devices, script_callbacks, shared
|
||||
from modules.shared import opts, state
|
||||
from swinir_model_arch import SwinIR as net
|
||||
from swinir_model_arch_v2 import Swin2SR as net2
|
||||
from modules.upscaler import Upscaler, UpscalerData
|
||||
|
||||
SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
device_swinir = devices.get_device_for('swinir')
|
||||
|
||||
|
||||
class UpscalerSwinIR(Upscaler):
|
||||
def __init__(self, dirname):
|
||||
self._cached_model = None # keep the model when SWIN_torch_compile is on to prevent re-compile every runs
|
||||
self._cached_model_config = None # to clear '_cached_model' when changing model (v1/v2) or settings
|
||||
self.name = "SwinIR"
|
||||
self.model_url = SWINIR_MODEL_URL
|
||||
self.model_url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0" \
|
||||
"/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \
|
||||
"-L_x4_GAN.pth "
|
||||
self.model_name = "SwinIR 4x"
|
||||
self.user_path = dirname
|
||||
super().__init__()
|
||||
scalers = []
|
||||
model_files = self.find_models(ext_filter=[".pt", ".pth"])
|
||||
for model in model_files:
|
||||
if model.startswith("http"):
|
||||
if "http" in model:
|
||||
name = self.model_name
|
||||
else:
|
||||
name = modelloader.friendly_name(model)
|
||||
@ -32,56 +36,135 @@ class UpscalerSwinIR(Upscaler):
|
||||
scalers.append(model_data)
|
||||
self.scalers = scalers
|
||||
|
||||
def do_upscale(self, img: Image.Image, model_file: str) -> Image.Image:
|
||||
current_config = (model_file, shared.opts.SWIN_tile)
|
||||
|
||||
if self._cached_model_config == current_config:
|
||||
model = self._cached_model
|
||||
else:
|
||||
try:
|
||||
model = self.load_model(model_file)
|
||||
except Exception as e:
|
||||
print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr)
|
||||
return img
|
||||
self._cached_model = model
|
||||
self._cached_model_config = current_config
|
||||
|
||||
img = upscaler_utils.upscale_2(
|
||||
img,
|
||||
model,
|
||||
tile_size=shared.opts.SWIN_tile,
|
||||
tile_overlap=shared.opts.SWIN_tile_overlap,
|
||||
scale=model.scale,
|
||||
desc="SwinIR",
|
||||
)
|
||||
devices.torch_gc()
|
||||
def do_upscale(self, img, model_file):
|
||||
model = self.load_model(model_file)
|
||||
if model is None:
|
||||
return img
|
||||
model = model.to(device_swinir, dtype=devices.dtype)
|
||||
img = upscale(img, model)
|
||||
try:
|
||||
torch.cuda.empty_cache()
|
||||
except Exception:
|
||||
pass
|
||||
return img
|
||||
|
||||
def load_model(self, path, scale=4):
|
||||
if path.startswith("http"):
|
||||
filename = modelloader.load_file_from_url(
|
||||
url=path,
|
||||
model_dir=self.model_download_path,
|
||||
file_name=f"{self.model_name.replace(' ', '_')}.pth",
|
||||
)
|
||||
if "http" in path:
|
||||
dl_name = "%s%s" % (self.model_name.replace(" ", "_"), ".pth")
|
||||
filename = load_file_from_url(url=path, model_dir=self.model_download_path, file_name=dl_name, progress=True)
|
||||
else:
|
||||
filename = path
|
||||
if filename is None or not os.path.exists(filename):
|
||||
return None
|
||||
if filename.endswith(".v2.pth"):
|
||||
model = net2(
|
||||
upscale=scale,
|
||||
in_chans=3,
|
||||
img_size=64,
|
||||
window_size=8,
|
||||
img_range=1.0,
|
||||
depths=[6, 6, 6, 6, 6, 6],
|
||||
embed_dim=180,
|
||||
num_heads=[6, 6, 6, 6, 6, 6],
|
||||
mlp_ratio=2,
|
||||
upsampler="nearest+conv",
|
||||
resi_connection="1conv",
|
||||
)
|
||||
params = None
|
||||
else:
|
||||
model = net(
|
||||
upscale=scale,
|
||||
in_chans=3,
|
||||
img_size=64,
|
||||
window_size=8,
|
||||
img_range=1.0,
|
||||
depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
|
||||
embed_dim=240,
|
||||
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
|
||||
mlp_ratio=2,
|
||||
upsampler="nearest+conv",
|
||||
resi_connection="3conv",
|
||||
)
|
||||
params = "params_ema"
|
||||
|
||||
model_descriptor = modelloader.load_spandrel_model(
|
||||
filename,
|
||||
device=self._get_device(),
|
||||
prefer_half=(devices.dtype == torch.float16),
|
||||
expected_architecture="SwinIR",
|
||||
)
|
||||
if getattr(shared.opts, 'SWIN_torch_compile', False):
|
||||
try:
|
||||
model_descriptor.model.compile()
|
||||
except Exception:
|
||||
logger.warning("Failed to compile SwinIR model, fallback to JIT", exc_info=True)
|
||||
return model_descriptor
|
||||
pretrained_model = torch.load(filename)
|
||||
if params is not None:
|
||||
model.load_state_dict(pretrained_model[params], strict=True)
|
||||
else:
|
||||
model.load_state_dict(pretrained_model, strict=True)
|
||||
return model
|
||||
|
||||
def _get_device(self):
|
||||
return devices.get_device_for('swinir')
|
||||
|
||||
def upscale(
|
||||
img,
|
||||
model,
|
||||
tile=None,
|
||||
tile_overlap=None,
|
||||
window_size=8,
|
||||
scale=4,
|
||||
):
|
||||
tile = tile or opts.SWIN_tile
|
||||
tile_overlap = tile_overlap or opts.SWIN_tile_overlap
|
||||
|
||||
|
||||
img = np.array(img)
|
||||
img = img[:, :, ::-1]
|
||||
img = np.moveaxis(img, 2, 0) / 255
|
||||
img = torch.from_numpy(img).float()
|
||||
img = img.unsqueeze(0).to(device_swinir, dtype=devices.dtype)
|
||||
with torch.no_grad(), devices.autocast():
|
||||
_, _, h_old, w_old = img.size()
|
||||
h_pad = (h_old // window_size + 1) * window_size - h_old
|
||||
w_pad = (w_old // window_size + 1) * window_size - w_old
|
||||
img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, : h_old + h_pad, :]
|
||||
img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, : w_old + w_pad]
|
||||
output = inference(img, model, tile, tile_overlap, window_size, scale)
|
||||
output = output[..., : h_old * scale, : w_old * scale]
|
||||
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
||||
if output.ndim == 3:
|
||||
output = np.transpose(
|
||||
output[[2, 1, 0], :, :], (1, 2, 0)
|
||||
) # CHW-RGB to HCW-BGR
|
||||
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
|
||||
return Image.fromarray(output, "RGB")
|
||||
|
||||
|
||||
def inference(img, model, tile, tile_overlap, window_size, scale):
|
||||
# test the image tile by tile
|
||||
b, c, h, w = img.size()
|
||||
tile = min(tile, h, w)
|
||||
assert tile % window_size == 0, "tile size should be a multiple of window_size"
|
||||
sf = scale
|
||||
|
||||
stride = tile - tile_overlap
|
||||
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
|
||||
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
|
||||
E = torch.zeros(b, c, h * sf, w * sf, dtype=devices.dtype, device=device_swinir).type_as(img)
|
||||
W = torch.zeros_like(E, dtype=devices.dtype, device=device_swinir)
|
||||
|
||||
with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar:
|
||||
for h_idx in h_idx_list:
|
||||
if state.interrupted or state.skipped:
|
||||
break
|
||||
|
||||
for w_idx in w_idx_list:
|
||||
if state.interrupted or state.skipped:
|
||||
break
|
||||
|
||||
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
|
||||
out_patch = model(in_patch)
|
||||
out_patch_mask = torch.ones_like(out_patch)
|
||||
|
||||
E[
|
||||
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
|
||||
].add_(out_patch)
|
||||
W[
|
||||
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
|
||||
].add_(out_patch_mask)
|
||||
pbar.update(1)
|
||||
output = E.div_(W)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def on_ui_settings():
|
||||
@ -89,7 +172,6 @@ def on_ui_settings():
|
||||
|
||||
shared.opts.add_option("SWIN_tile", shared.OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")))
|
||||
shared.opts.add_option("SWIN_tile_overlap", shared.OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}, section=('upscaling', "Upscaling")))
|
||||
shared.opts.add_option("SWIN_torch_compile", shared.OptionInfo(False, "Use torch.compile to accelerate SwinIR.", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")).info("Takes longer on first run"))
|
||||
|
||||
|
||||
script_callbacks.on_ui_settings(on_ui_settings)
|
||||
|
867
extensions-builtin/SwinIR/swinir_model_arch.py
Normal file
867
extensions-builtin/SwinIR/swinir_model_arch.py
Normal file
@ -0,0 +1,867 @@
|
||||
# -----------------------------------------------------------------------------------
|
||||
# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
|
||||
# Originally Written by Ze Liu, Modified by Jingyun Liang.
|
||||
# -----------------------------------------------------------------------------------
|
||||
|
||||
import math
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.utils.checkpoint as checkpoint
|
||||
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
||||
|
||||
|
||||
class Mlp(nn.Module):
|
||||
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
||||
super().__init__()
|
||||
out_features = out_features or in_features
|
||||
hidden_features = hidden_features or in_features
|
||||
self.fc1 = nn.Linear(in_features, hidden_features)
|
||||
self.act = act_layer()
|
||||
self.fc2 = nn.Linear(hidden_features, out_features)
|
||||
self.drop = nn.Dropout(drop)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.fc1(x)
|
||||
x = self.act(x)
|
||||
x = self.drop(x)
|
||||
x = self.fc2(x)
|
||||
x = self.drop(x)
|
||||
return x
|
||||
|
||||
|
||||
def window_partition(x, window_size):
|
||||
"""
|
||||
Args:
|
||||
x: (B, H, W, C)
|
||||
window_size (int): window size
|
||||
|
||||
Returns:
|
||||
windows: (num_windows*B, window_size, window_size, C)
|
||||
"""
|
||||
B, H, W, C = x.shape
|
||||
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
||||
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
||||
return windows
|
||||
|
||||
|
||||
def window_reverse(windows, window_size, H, W):
|
||||
"""
|
||||
Args:
|
||||
windows: (num_windows*B, window_size, window_size, C)
|
||||
window_size (int): Window size
|
||||
H (int): Height of image
|
||||
W (int): Width of image
|
||||
|
||||
Returns:
|
||||
x: (B, H, W, C)
|
||||
"""
|
||||
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
||||
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
||||
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
||||
return x
|
||||
|
||||
|
||||
class WindowAttention(nn.Module):
|
||||
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
||||
It supports both of shifted and non-shifted window.
|
||||
|
||||
Args:
|
||||
dim (int): Number of input channels.
|
||||
window_size (tuple[int]): The height and width of the window.
|
||||
num_heads (int): Number of attention heads.
|
||||
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
||||
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
||||
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
||||
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
||||
"""
|
||||
|
||||
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
|
||||
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.window_size = window_size # Wh, Ww
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
self.scale = qk_scale or head_dim ** -0.5
|
||||
|
||||
# define a parameter table of relative position bias
|
||||
self.relative_position_bias_table = nn.Parameter(
|
||||
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
||||
|
||||
# get pair-wise relative position index for each token inside the window
|
||||
coords_h = torch.arange(self.window_size[0])
|
||||
coords_w = torch.arange(self.window_size[1])
|
||||
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
||||
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
||||
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
||||
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
||||
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
||||
relative_coords[:, :, 1] += self.window_size[1] - 1
|
||||
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
||||
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
||||
self.register_buffer("relative_position_index", relative_position_index)
|
||||
|
||||
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
|
||||
trunc_normal_(self.relative_position_bias_table, std=.02)
|
||||
self.softmax = nn.Softmax(dim=-1)
|
||||
|
||||
def forward(self, x, mask=None):
|
||||
"""
|
||||
Args:
|
||||
x: input features with shape of (num_windows*B, N, C)
|
||||
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
||||
"""
|
||||
B_, N, C = x.shape
|
||||
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
||||
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
||||
|
||||
q = q * self.scale
|
||||
attn = (q @ k.transpose(-2, -1))
|
||||
|
||||
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
||||
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
|
||||
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
||||
attn = attn + relative_position_bias.unsqueeze(0)
|
||||
|
||||
if mask is not None:
|
||||
nW = mask.shape[0]
|
||||
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
||||
attn = attn.view(-1, self.num_heads, N, N)
|
||||
attn = self.softmax(attn)
|
||||
else:
|
||||
attn = self.softmax(attn)
|
||||
|
||||
attn = self.attn_drop(attn)
|
||||
|
||||
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
|
||||
|
||||
def flops(self, N):
|
||||
# calculate flops for 1 window with token length of N
|
||||
flops = 0
|
||||
# qkv = self.qkv(x)
|
||||
flops += N * self.dim * 3 * self.dim
|
||||
# attn = (q @ k.transpose(-2, -1))
|
||||
flops += self.num_heads * N * (self.dim // self.num_heads) * N
|
||||
# x = (attn @ v)
|
||||
flops += self.num_heads * N * N * (self.dim // self.num_heads)
|
||||
# x = self.proj(x)
|
||||
flops += N * self.dim * self.dim
|
||||
return flops
|
||||
|
||||
|
||||
class SwinTransformerBlock(nn.Module):
|
||||
r""" Swin Transformer Block.
|
||||
|
||||
Args:
|
||||
dim (int): Number of input channels.
|
||||
input_resolution (tuple[int]): Input resolution.
|
||||
num_heads (int): Number of attention heads.
|
||||
window_size (int): Window size.
|
||||
shift_size (int): Shift size for SW-MSA.
|
||||
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
||||
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
||||
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
||||
drop (float, optional): Dropout rate. Default: 0.0
|
||||
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
||||
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
||||
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
||||
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
||||
"""
|
||||
|
||||
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
|
||||
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
|
||||
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.input_resolution = input_resolution
|
||||
self.num_heads = num_heads
|
||||
self.window_size = window_size
|
||||
self.shift_size = shift_size
|
||||
self.mlp_ratio = mlp_ratio
|
||||
if min(self.input_resolution) <= self.window_size:
|
||||
# if window size is larger than input resolution, we don't partition windows
|
||||
self.shift_size = 0
|
||||
self.window_size = min(self.input_resolution)
|
||||
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
||||
|
||||
self.norm1 = norm_layer(dim)
|
||||
self.attn = WindowAttention(
|
||||
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
|
||||
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
||||
|
||||
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
self.norm2 = norm_layer(dim)
|
||||
mlp_hidden_dim = int(dim * mlp_ratio)
|
||||
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
||||
|
||||
if self.shift_size > 0:
|
||||
attn_mask = self.calculate_mask(self.input_resolution)
|
||||
else:
|
||||
attn_mask = None
|
||||
|
||||
self.register_buffer("attn_mask", attn_mask)
|
||||
|
||||
def calculate_mask(self, x_size):
|
||||
# calculate attention mask for SW-MSA
|
||||
H, W = x_size
|
||||
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
|
||||
h_slices = (slice(0, -self.window_size),
|
||||
slice(-self.window_size, -self.shift_size),
|
||||
slice(-self.shift_size, None))
|
||||
w_slices = (slice(0, -self.window_size),
|
||||
slice(-self.window_size, -self.shift_size),
|
||||
slice(-self.shift_size, None))
|
||||
cnt = 0
|
||||
for h in h_slices:
|
||||
for w in w_slices:
|
||||
img_mask[:, h, w, :] = cnt
|
||||
cnt += 1
|
||||
|
||||
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
|
||||
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
||||
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
||||
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
||||
|
||||
return attn_mask
|
||||
|
||||
def forward(self, x, x_size):
|
||||
H, W = x_size
|
||||
B, L, C = x.shape
|
||||
# assert L == H * W, "input feature has wrong size"
|
||||
|
||||
shortcut = x
|
||||
x = self.norm1(x)
|
||||
x = x.view(B, H, W, C)
|
||||
|
||||
# cyclic shift
|
||||
if self.shift_size > 0:
|
||||
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
||||
else:
|
||||
shifted_x = x
|
||||
|
||||
# partition windows
|
||||
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
|
||||
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
|
||||
|
||||
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
|
||||
if self.input_resolution == x_size:
|
||||
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
|
||||
else:
|
||||
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
|
||||
|
||||
# merge windows
|
||||
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
||||
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
|
||||
|
||||
# reverse cyclic shift
|
||||
if self.shift_size > 0:
|
||||
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
||||
else:
|
||||
x = shifted_x
|
||||
x = x.view(B, H * W, C)
|
||||
|
||||
# FFN
|
||||
x = shortcut + self.drop_path(x)
|
||||
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
||||
|
||||
return x
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
|
||||
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
|
||||
|
||||
def flops(self):
|
||||
flops = 0
|
||||
H, W = self.input_resolution
|
||||
# norm1
|
||||
flops += self.dim * H * W
|
||||
# W-MSA/SW-MSA
|
||||
nW = H * W / self.window_size / self.window_size
|
||||
flops += nW * self.attn.flops(self.window_size * self.window_size)
|
||||
# mlp
|
||||
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
|
||||
# norm2
|
||||
flops += self.dim * H * W
|
||||
return flops
|
||||
|
||||
|
||||
class PatchMerging(nn.Module):
|
||||
r""" Patch Merging Layer.
|
||||
|
||||
Args:
|
||||
input_resolution (tuple[int]): Resolution of input feature.
|
||||
dim (int): Number of input channels.
|
||||
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
||||
"""
|
||||
|
||||
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
||||
super().__init__()
|
||||
self.input_resolution = input_resolution
|
||||
self.dim = dim
|
||||
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
||||
self.norm = norm_layer(4 * dim)
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
x: B, H*W, C
|
||||
"""
|
||||
H, W = self.input_resolution
|
||||
B, L, C = x.shape
|
||||
assert L == H * W, "input feature has wrong size"
|
||||
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
|
||||
|
||||
x = x.view(B, H, W, C)
|
||||
|
||||
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
||||
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
||||
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
||||
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
||||
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
||||
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
||||
|
||||
x = self.norm(x)
|
||||
x = self.reduction(x)
|
||||
|
||||
return x
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
return f"input_resolution={self.input_resolution}, dim={self.dim}"
|
||||
|
||||
def flops(self):
|
||||
H, W = self.input_resolution
|
||||
flops = H * W * self.dim
|
||||
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
|
||||
return flops
|
||||
|
||||
|
||||
class BasicLayer(nn.Module):
|
||||
""" A basic Swin Transformer layer for one stage.
|
||||
|
||||
Args:
|
||||
dim (int): Number of input channels.
|
||||
input_resolution (tuple[int]): Input resolution.
|
||||
depth (int): Number of blocks.
|
||||
num_heads (int): Number of attention heads.
|
||||
window_size (int): Local window size.
|
||||
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
||||
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
||||
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
||||
drop (float, optional): Dropout rate. Default: 0.0
|
||||
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
||||
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
||||
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
||||
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
||||
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
||||
"""
|
||||
|
||||
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
||||
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
||||
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
|
||||
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.input_resolution = input_resolution
|
||||
self.depth = depth
|
||||
self.use_checkpoint = use_checkpoint
|
||||
|
||||
# build blocks
|
||||
self.blocks = nn.ModuleList([
|
||||
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
|
||||
num_heads=num_heads, window_size=window_size,
|
||||
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
||||
mlp_ratio=mlp_ratio,
|
||||
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
||||
drop=drop, attn_drop=attn_drop,
|
||||
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
||||
norm_layer=norm_layer)
|
||||
for i in range(depth)])
|
||||
|
||||
# patch merging layer
|
||||
if downsample is not None:
|
||||
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
|
||||
else:
|
||||
self.downsample = None
|
||||
|
||||
def forward(self, x, x_size):
|
||||
for blk in self.blocks:
|
||||
if self.use_checkpoint:
|
||||
x = checkpoint.checkpoint(blk, x, x_size)
|
||||
else:
|
||||
x = blk(x, x_size)
|
||||
if self.downsample is not None:
|
||||
x = self.downsample(x)
|
||||
return x
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
||||
|
||||
def flops(self):
|
||||
flops = 0
|
||||
for blk in self.blocks:
|
||||
flops += blk.flops()
|
||||
if self.downsample is not None:
|
||||
flops += self.downsample.flops()
|
||||
return flops
|
||||
|
||||
|
||||
class RSTB(nn.Module):
|
||||
"""Residual Swin Transformer Block (RSTB).
|
||||
|
||||
Args:
|
||||
dim (int): Number of input channels.
|
||||
input_resolution (tuple[int]): Input resolution.
|
||||
depth (int): Number of blocks.
|
||||
num_heads (int): Number of attention heads.
|
||||
window_size (int): Local window size.
|
||||
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
||||
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
||||
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
||||
drop (float, optional): Dropout rate. Default: 0.0
|
||||
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
||||
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
||||
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
||||
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
||||
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
||||
img_size: Input image size.
|
||||
patch_size: Patch size.
|
||||
resi_connection: The convolutional block before residual connection.
|
||||
"""
|
||||
|
||||
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
||||
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
||||
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
|
||||
img_size=224, patch_size=4, resi_connection='1conv'):
|
||||
super(RSTB, self).__init__()
|
||||
|
||||
self.dim = dim
|
||||
self.input_resolution = input_resolution
|
||||
|
||||
self.residual_group = BasicLayer(dim=dim,
|
||||
input_resolution=input_resolution,
|
||||
depth=depth,
|
||||
num_heads=num_heads,
|
||||
window_size=window_size,
|
||||
mlp_ratio=mlp_ratio,
|
||||
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
||||
drop=drop, attn_drop=attn_drop,
|
||||
drop_path=drop_path,
|
||||
norm_layer=norm_layer,
|
||||
downsample=downsample,
|
||||
use_checkpoint=use_checkpoint)
|
||||
|
||||
if resi_connection == '1conv':
|
||||
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
|
||||
elif resi_connection == '3conv':
|
||||
# to save parameters and memory
|
||||
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
||||
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
|
||||
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
||||
nn.Conv2d(dim // 4, dim, 3, 1, 1))
|
||||
|
||||
self.patch_embed = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
|
||||
norm_layer=None)
|
||||
|
||||
self.patch_unembed = PatchUnEmbed(
|
||||
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
|
||||
norm_layer=None)
|
||||
|
||||
def forward(self, x, x_size):
|
||||
return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
|
||||
|
||||
def flops(self):
|
||||
flops = 0
|
||||
flops += self.residual_group.flops()
|
||||
H, W = self.input_resolution
|
||||
flops += H * W * self.dim * self.dim * 9
|
||||
flops += self.patch_embed.flops()
|
||||
flops += self.patch_unembed.flops()
|
||||
|
||||
return flops
|
||||
|
||||
|
||||
class PatchEmbed(nn.Module):
|
||||
r""" Image to Patch Embedding
|
||||
|
||||
Args:
|
||||
img_size (int): Image size. Default: 224.
|
||||
patch_size (int): Patch token size. Default: 4.
|
||||
in_chans (int): Number of input image channels. Default: 3.
|
||||
embed_dim (int): Number of linear projection output channels. Default: 96.
|
||||
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
||||
"""
|
||||
|
||||
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
||||
super().__init__()
|
||||
img_size = to_2tuple(img_size)
|
||||
patch_size = to_2tuple(patch_size)
|
||||
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
||||
self.img_size = img_size
|
||||
self.patch_size = patch_size
|
||||
self.patches_resolution = patches_resolution
|
||||
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
||||
|
||||
self.in_chans = in_chans
|
||||
self.embed_dim = embed_dim
|
||||
|
||||
if norm_layer is not None:
|
||||
self.norm = norm_layer(embed_dim)
|
||||
else:
|
||||
self.norm = None
|
||||
|
||||
def forward(self, x):
|
||||
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
|
||||
if self.norm is not None:
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
def flops(self):
|
||||
flops = 0
|
||||
H, W = self.img_size
|
||||
if self.norm is not None:
|
||||
flops += H * W * self.embed_dim
|
||||
return flops
|
||||
|
||||
|
||||
class PatchUnEmbed(nn.Module):
|
||||
r""" Image to Patch Unembedding
|
||||
|
||||
Args:
|
||||
img_size (int): Image size. Default: 224.
|
||||
patch_size (int): Patch token size. Default: 4.
|
||||
in_chans (int): Number of input image channels. Default: 3.
|
||||
embed_dim (int): Number of linear projection output channels. Default: 96.
|
||||
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
||||
"""
|
||||
|
||||
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
||||
super().__init__()
|
||||
img_size = to_2tuple(img_size)
|
||||
patch_size = to_2tuple(patch_size)
|
||||
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
||||
self.img_size = img_size
|
||||
self.patch_size = patch_size
|
||||
self.patches_resolution = patches_resolution
|
||||
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
||||
|
||||
self.in_chans = in_chans
|
||||
self.embed_dim = embed_dim
|
||||
|
||||
def forward(self, x, x_size):
|
||||
B, HW, C = x.shape
|
||||
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
|
||||
return x
|
||||
|
||||
def flops(self):
|
||||
flops = 0
|
||||
return flops
|
||||
|
||||
|
||||
class Upsample(nn.Sequential):
|
||||
"""Upsample module.
|
||||
|
||||
Args:
|
||||
scale (int): Scale factor. Supported scales: 2^n and 3.
|
||||
num_feat (int): Channel number of intermediate features.
|
||||
"""
|
||||
|
||||
def __init__(self, scale, num_feat):
|
||||
m = []
|
||||
if (scale & (scale - 1)) == 0: # scale = 2^n
|
||||
for _ in range(int(math.log(scale, 2))):
|
||||
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
|
||||
m.append(nn.PixelShuffle(2))
|
||||
elif scale == 3:
|
||||
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
|
||||
m.append(nn.PixelShuffle(3))
|
||||
else:
|
||||
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
|
||||
super(Upsample, self).__init__(*m)
|
||||
|
||||
|
||||
class UpsampleOneStep(nn.Sequential):
|
||||
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
|
||||
Used in lightweight SR to save parameters.
|
||||
|
||||
Args:
|
||||
scale (int): Scale factor. Supported scales: 2^n and 3.
|
||||
num_feat (int): Channel number of intermediate features.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
|
||||
self.num_feat = num_feat
|
||||
self.input_resolution = input_resolution
|
||||
m = []
|
||||
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
|
||||
m.append(nn.PixelShuffle(scale))
|
||||
super(UpsampleOneStep, self).__init__(*m)
|
||||
|
||||
def flops(self):
|
||||
H, W = self.input_resolution
|
||||
flops = H * W * self.num_feat * 3 * 9
|
||||
return flops
|
||||
|
||||
|
||||
class SwinIR(nn.Module):
|
||||
r""" SwinIR
|
||||
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
|
||||
|
||||
Args:
|
||||
img_size (int | tuple(int)): Input image size. Default 64
|
||||
patch_size (int | tuple(int)): Patch size. Default: 1
|
||||
in_chans (int): Number of input image channels. Default: 3
|
||||
embed_dim (int): Patch embedding dimension. Default: 96
|
||||
depths (tuple(int)): Depth of each Swin Transformer layer.
|
||||
num_heads (tuple(int)): Number of attention heads in different layers.
|
||||
window_size (int): Window size. Default: 7
|
||||
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
||||
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
||||
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
|
||||
drop_rate (float): Dropout rate. Default: 0
|
||||
attn_drop_rate (float): Attention dropout rate. Default: 0
|
||||
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
||||
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
||||
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
||||
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
||||
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
||||
upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
|
||||
img_range: Image range. 1. or 255.
|
||||
upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
|
||||
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
|
||||
"""
|
||||
|
||||
def __init__(self, img_size=64, patch_size=1, in_chans=3,
|
||||
embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
|
||||
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
|
||||
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
||||
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
|
||||
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
|
||||
**kwargs):
|
||||
super(SwinIR, self).__init__()
|
||||
num_in_ch = in_chans
|
||||
num_out_ch = in_chans
|
||||
num_feat = 64
|
||||
self.img_range = img_range
|
||||
if in_chans == 3:
|
||||
rgb_mean = (0.4488, 0.4371, 0.4040)
|
||||
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
|
||||
else:
|
||||
self.mean = torch.zeros(1, 1, 1, 1)
|
||||
self.upscale = upscale
|
||||
self.upsampler = upsampler
|
||||
self.window_size = window_size
|
||||
|
||||
#####################################################################################################
|
||||
################################### 1, shallow feature extraction ###################################
|
||||
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
|
||||
|
||||
#####################################################################################################
|
||||
################################### 2, deep feature extraction ######################################
|
||||
self.num_layers = len(depths)
|
||||
self.embed_dim = embed_dim
|
||||
self.ape = ape
|
||||
self.patch_norm = patch_norm
|
||||
self.num_features = embed_dim
|
||||
self.mlp_ratio = mlp_ratio
|
||||
|
||||
# split image into non-overlapping patches
|
||||
self.patch_embed = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
|
||||
norm_layer=norm_layer if self.patch_norm else None)
|
||||
num_patches = self.patch_embed.num_patches
|
||||
patches_resolution = self.patch_embed.patches_resolution
|
||||
self.patches_resolution = patches_resolution
|
||||
|
||||
# merge non-overlapping patches into image
|
||||
self.patch_unembed = PatchUnEmbed(
|
||||
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
|
||||
norm_layer=norm_layer if self.patch_norm else None)
|
||||
|
||||
# absolute position embedding
|
||||
if self.ape:
|
||||
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
|
||||
trunc_normal_(self.absolute_pos_embed, std=.02)
|
||||
|
||||
self.pos_drop = nn.Dropout(p=drop_rate)
|
||||
|
||||
# stochastic depth
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
||||
|
||||
# build Residual Swin Transformer blocks (RSTB)
|
||||
self.layers = nn.ModuleList()
|
||||
for i_layer in range(self.num_layers):
|
||||
layer = RSTB(dim=embed_dim,
|
||||
input_resolution=(patches_resolution[0],
|
||||
patches_resolution[1]),
|
||||
depth=depths[i_layer],
|
||||
num_heads=num_heads[i_layer],
|
||||
window_size=window_size,
|
||||
mlp_ratio=self.mlp_ratio,
|
||||
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
||||
drop=drop_rate, attn_drop=attn_drop_rate,
|
||||
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
|
||||
norm_layer=norm_layer,
|
||||
downsample=None,
|
||||
use_checkpoint=use_checkpoint,
|
||||
img_size=img_size,
|
||||
patch_size=patch_size,
|
||||
resi_connection=resi_connection
|
||||
|
||||
)
|
||||
self.layers.append(layer)
|
||||
self.norm = norm_layer(self.num_features)
|
||||
|
||||
# build the last conv layer in deep feature extraction
|
||||
if resi_connection == '1conv':
|
||||
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
|
||||
elif resi_connection == '3conv':
|
||||
# to save parameters and memory
|
||||
self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
|
||||
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
||||
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
|
||||
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
||||
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
|
||||
|
||||
#####################################################################################################
|
||||
################################ 3, high quality image reconstruction ################################
|
||||
if self.upsampler == 'pixelshuffle':
|
||||
# for classical SR
|
||||
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
||||
nn.LeakyReLU(inplace=True))
|
||||
self.upsample = Upsample(upscale, num_feat)
|
||||
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
||||
elif self.upsampler == 'pixelshuffledirect':
|
||||
# for lightweight SR (to save parameters)
|
||||
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
|
||||
(patches_resolution[0], patches_resolution[1]))
|
||||
elif self.upsampler == 'nearest+conv':
|
||||
# for real-world SR (less artifacts)
|
||||
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
||||
nn.LeakyReLU(inplace=True))
|
||||
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
if self.upscale == 4:
|
||||
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
||||
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
||||
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
||||
else:
|
||||
# for image denoising and JPEG compression artifact reduction
|
||||
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
|
||||
|
||||
self.apply(self._init_weights)
|
||||
|
||||
def _init_weights(self, m):
|
||||
if isinstance(m, nn.Linear):
|
||||
trunc_normal_(m.weight, std=.02)
|
||||
if isinstance(m, nn.Linear) and m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.LayerNorm):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
||||
|
||||
@torch.jit.ignore
|
||||
def no_weight_decay(self):
|
||||
return {'absolute_pos_embed'}
|
||||
|
||||
@torch.jit.ignore
|
||||
def no_weight_decay_keywords(self):
|
||||
return {'relative_position_bias_table'}
|
||||
|
||||
def check_image_size(self, x):
|
||||
_, _, h, w = x.size()
|
||||
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
|
||||
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
|
||||
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
|
||||
return x
|
||||
|
||||
def forward_features(self, x):
|
||||
x_size = (x.shape[2], x.shape[3])
|
||||
x = self.patch_embed(x)
|
||||
if self.ape:
|
||||
x = x + self.absolute_pos_embed
|
||||
x = self.pos_drop(x)
|
||||
|
||||
for layer in self.layers:
|
||||
x = layer(x, x_size)
|
||||
|
||||
x = self.norm(x) # B L C
|
||||
x = self.patch_unembed(x, x_size)
|
||||
|
||||
return x
|
||||
|
||||
def forward(self, x):
|
||||
H, W = x.shape[2:]
|
||||
x = self.check_image_size(x)
|
||||
|
||||
self.mean = self.mean.type_as(x)
|
||||
x = (x - self.mean) * self.img_range
|
||||
|
||||
if self.upsampler == 'pixelshuffle':
|
||||
# for classical SR
|
||||
x = self.conv_first(x)
|
||||
x = self.conv_after_body(self.forward_features(x)) + x
|
||||
x = self.conv_before_upsample(x)
|
||||
x = self.conv_last(self.upsample(x))
|
||||
elif self.upsampler == 'pixelshuffledirect':
|
||||
# for lightweight SR
|
||||
x = self.conv_first(x)
|
||||
x = self.conv_after_body(self.forward_features(x)) + x
|
||||
x = self.upsample(x)
|
||||
elif self.upsampler == 'nearest+conv':
|
||||
# for real-world SR
|
||||
x = self.conv_first(x)
|
||||
x = self.conv_after_body(self.forward_features(x)) + x
|
||||
x = self.conv_before_upsample(x)
|
||||
x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
||||
if self.upscale == 4:
|
||||
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
||||
x = self.conv_last(self.lrelu(self.conv_hr(x)))
|
||||
else:
|
||||
# for image denoising and JPEG compression artifact reduction
|
||||
x_first = self.conv_first(x)
|
||||
res = self.conv_after_body(self.forward_features(x_first)) + x_first
|
||||
x = x + self.conv_last(res)
|
||||
|
||||
x = x / self.img_range + self.mean
|
||||
|
||||
return x[:, :, :H*self.upscale, :W*self.upscale]
|
||||
|
||||
def flops(self):
|
||||
flops = 0
|
||||
H, W = self.patches_resolution
|
||||
flops += H * W * 3 * self.embed_dim * 9
|
||||
flops += self.patch_embed.flops()
|
||||
for layer in self.layers:
|
||||
flops += layer.flops()
|
||||
flops += H * W * 3 * self.embed_dim * self.embed_dim
|
||||
flops += self.upsample.flops()
|
||||
return flops
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
upscale = 4
|
||||
window_size = 8
|
||||
height = (1024 // upscale // window_size + 1) * window_size
|
||||
width = (720 // upscale // window_size + 1) * window_size
|
||||
model = SwinIR(upscale=2, img_size=(height, width),
|
||||
window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
|
||||
embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
|
||||
print(model)
|
||||
print(height, width, model.flops() / 1e9)
|
||||
|
||||
x = torch.randn((1, 3, height, width))
|
||||
x = model(x)
|
||||
print(x.shape)
|
1017
extensions-builtin/SwinIR/swinir_model_arch_v2.py
Normal file
1017
extensions-builtin/SwinIR/swinir_model_arch_v2.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,995 +0,0 @@
|
||||
onUiLoaded(async() => {
|
||||
const elementIDs = {
|
||||
img2imgTabs: "#mode_img2img .tab-nav",
|
||||
inpaint: "#img2maskimg",
|
||||
inpaintSketch: "#inpaint_sketch",
|
||||
rangeGroup: "#img2img_column_size",
|
||||
sketch: "#img2img_sketch"
|
||||
};
|
||||
const tabNameToElementId = {
|
||||
"Inpaint sketch": elementIDs.inpaintSketch,
|
||||
"Inpaint": elementIDs.inpaint,
|
||||
"Sketch": elementIDs.sketch
|
||||
};
|
||||
|
||||
|
||||
// Helper functions
|
||||
// Get active tab
|
||||
|
||||
/**
|
||||
* Waits for an element to be present in the DOM.
|
||||
*/
|
||||
const waitForElement = (id) => new Promise(resolve => {
|
||||
const checkForElement = () => {
|
||||
const element = document.querySelector(id);
|
||||
if (element) return resolve(element);
|
||||
setTimeout(checkForElement, 100);
|
||||
};
|
||||
checkForElement();
|
||||
});
|
||||
|
||||
function getActiveTab(elements, all = false) {
|
||||
if (!elements.img2imgTabs) return null;
|
||||
const tabs = elements.img2imgTabs.querySelectorAll("button");
|
||||
|
||||
if (all) return tabs;
|
||||
|
||||
for (let tab of tabs) {
|
||||
if (tab.classList.contains("selected")) {
|
||||
return tab;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get tab ID
|
||||
function getTabId(elements) {
|
||||
const activeTab = getActiveTab(elements);
|
||||
if (!activeTab) return null;
|
||||
return tabNameToElementId[activeTab.innerText];
|
||||
}
|
||||
|
||||
// Wait until opts loaded
|
||||
async function waitForOpts() {
|
||||
for (; ;) {
|
||||
if (window.opts && Object.keys(window.opts).length) {
|
||||
return window.opts;
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 100));
|
||||
}
|
||||
}
|
||||
|
||||
// Detect whether the element has a horizontal scroll bar
|
||||
function hasHorizontalScrollbar(element) {
|
||||
return element.scrollWidth > element.clientWidth;
|
||||
}
|
||||
|
||||
// Function for defining the "Ctrl", "Shift" and "Alt" keys
|
||||
function isModifierKey(event, key) {
|
||||
switch (key) {
|
||||
case "Ctrl":
|
||||
return event.ctrlKey;
|
||||
case "Shift":
|
||||
return event.shiftKey;
|
||||
case "Alt":
|
||||
return event.altKey;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if hotkey is valid
|
||||
function isValidHotkey(value) {
|
||||
const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"];
|
||||
return (
|
||||
(typeof value === "string" &&
|
||||
value.length === 1 &&
|
||||
/[a-z]/i.test(value)) ||
|
||||
specialKeys.includes(value)
|
||||
);
|
||||
}
|
||||
|
||||
// Normalize hotkey
|
||||
function normalizeHotkey(hotkey) {
|
||||
return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey;
|
||||
}
|
||||
|
||||
// Format hotkey for display
|
||||
function formatHotkeyForDisplay(hotkey) {
|
||||
return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey;
|
||||
}
|
||||
|
||||
// Create hotkey configuration with the provided options
|
||||
function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) {
|
||||
const result = {}; // Resulting hotkey configuration
|
||||
const usedKeys = new Set(); // Set of used hotkeys
|
||||
|
||||
// Iterate through defaultHotkeysConfig keys
|
||||
for (const key in defaultHotkeysConfig) {
|
||||
const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value
|
||||
const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value
|
||||
|
||||
// Apply appropriate value for undefined, boolean, or object userValue
|
||||
if (
|
||||
userValue === undefined ||
|
||||
typeof userValue === "boolean" ||
|
||||
typeof userValue === "object" ||
|
||||
userValue === "disable"
|
||||
) {
|
||||
result[key] =
|
||||
userValue === undefined ? defaultValue : userValue;
|
||||
} else if (isValidHotkey(userValue)) {
|
||||
const normalizedUserValue = normalizeHotkey(userValue);
|
||||
|
||||
// Check for conflicting hotkeys
|
||||
if (!usedKeys.has(normalizedUserValue)) {
|
||||
usedKeys.add(normalizedUserValue);
|
||||
result[key] = normalizedUserValue;
|
||||
} else {
|
||||
console.error(
|
||||
`Hotkey: ${formatHotkeyForDisplay(
|
||||
userValue
|
||||
)} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay(
|
||||
defaultValue
|
||||
)}`
|
||||
);
|
||||
result[key] = defaultValue;
|
||||
}
|
||||
} else {
|
||||
console.error(
|
||||
`Hotkey: ${formatHotkeyForDisplay(
|
||||
userValue
|
||||
)} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay(
|
||||
defaultValue
|
||||
)}`
|
||||
);
|
||||
result[key] = defaultValue;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Disables functions in the config object based on the provided list of function names
|
||||
function disableFunctions(config, disabledFunctions) {
|
||||
// Bind the hasOwnProperty method to the functionMap object to avoid errors
|
||||
const hasOwnProperty =
|
||||
Object.prototype.hasOwnProperty.bind(functionMap);
|
||||
|
||||
// Loop through the disabledFunctions array and disable the corresponding functions in the config object
|
||||
disabledFunctions.forEach(funcName => {
|
||||
if (hasOwnProperty(funcName)) {
|
||||
const key = functionMap[funcName];
|
||||
config[key] = "disable";
|
||||
}
|
||||
});
|
||||
|
||||
// Return the updated config object
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio.
|
||||
* If the image display property is set to 'none', the mask breaks. To fix this, the function
|
||||
* temporarily sets the display property to 'block' and then hides the mask again after 300 milliseconds
|
||||
* to avoid breaking the canvas. Additionally, the function adjusts the mask to work correctly on
|
||||
* very long images.
|
||||
*/
|
||||
function restoreImgRedMask(elements) {
|
||||
const mainTabId = getTabId(elements);
|
||||
|
||||
if (!mainTabId) return;
|
||||
|
||||
const mainTab = gradioApp().querySelector(mainTabId);
|
||||
const img = mainTab.querySelector("img");
|
||||
const imageARPreview = gradioApp().querySelector("#imageARPreview");
|
||||
|
||||
if (!img || !imageARPreview) return;
|
||||
|
||||
imageARPreview.style.transform = "";
|
||||
if (parseFloat(mainTab.style.width) > 865) {
|
||||
const transformString = mainTab.style.transform;
|
||||
const scaleMatch = transformString.match(
|
||||
/scale\(([-+]?[0-9]*\.?[0-9]+)\)/
|
||||
);
|
||||
let zoom = 1; // default zoom
|
||||
|
||||
if (scaleMatch && scaleMatch[1]) {
|
||||
zoom = Number(scaleMatch[1]);
|
||||
}
|
||||
|
||||
imageARPreview.style.transformOrigin = "0 0";
|
||||
imageARPreview.style.transform = `scale(${zoom})`;
|
||||
}
|
||||
|
||||
if (img.style.display !== "none") return;
|
||||
|
||||
img.style.display = "block";
|
||||
|
||||
setTimeout(() => {
|
||||
img.style.display = "none";
|
||||
}, 400);
|
||||
}
|
||||
|
||||
const hotkeysConfigOpts = await waitForOpts();
|
||||
|
||||
// Default config
|
||||
const defaultHotkeysConfig = {
|
||||
canvas_hotkey_zoom: "Alt",
|
||||
canvas_hotkey_adjust: "Ctrl",
|
||||
canvas_hotkey_reset: "KeyR",
|
||||
canvas_hotkey_fullscreen: "KeyS",
|
||||
canvas_hotkey_move: "KeyF",
|
||||
canvas_hotkey_overlap: "KeyO",
|
||||
canvas_hotkey_shrink_brush: "KeyQ",
|
||||
canvas_hotkey_grow_brush: "KeyW",
|
||||
canvas_disabled_functions: [],
|
||||
canvas_show_tooltip: true,
|
||||
canvas_auto_expand: true,
|
||||
canvas_blur_prompt: false,
|
||||
};
|
||||
|
||||
const functionMap = {
|
||||
"Zoom": "canvas_hotkey_zoom",
|
||||
"Adjust brush size": "canvas_hotkey_adjust",
|
||||
"Hotkey shrink brush": "canvas_hotkey_shrink_brush",
|
||||
"Hotkey enlarge brush": "canvas_hotkey_grow_brush",
|
||||
"Moving canvas": "canvas_hotkey_move",
|
||||
"Fullscreen": "canvas_hotkey_fullscreen",
|
||||
"Reset Zoom": "canvas_hotkey_reset",
|
||||
"Overlap": "canvas_hotkey_overlap"
|
||||
};
|
||||
|
||||
// Loading the configuration from opts
|
||||
const preHotkeysConfig = createHotkeyConfig(
|
||||
defaultHotkeysConfig,
|
||||
hotkeysConfigOpts
|
||||
);
|
||||
|
||||
// Disable functions that are not needed by the user
|
||||
const hotkeysConfig = disableFunctions(
|
||||
preHotkeysConfig,
|
||||
preHotkeysConfig.canvas_disabled_functions
|
||||
);
|
||||
|
||||
let isMoving = false;
|
||||
let mouseX, mouseY;
|
||||
let activeElement;
|
||||
let interactedWithAltKey = false;
|
||||
|
||||
const elements = Object.fromEntries(
|
||||
Object.keys(elementIDs).map(id => [
|
||||
id,
|
||||
gradioApp().querySelector(elementIDs[id])
|
||||
])
|
||||
);
|
||||
const elemData = {};
|
||||
|
||||
// Apply functionality to the range inputs. Restore redmask and correct for long images.
|
||||
const rangeInputs = elements.rangeGroup ?
|
||||
Array.from(elements.rangeGroup.querySelectorAll("input")) :
|
||||
[
|
||||
gradioApp().querySelector("#img2img_width input[type='range']"),
|
||||
gradioApp().querySelector("#img2img_height input[type='range']")
|
||||
];
|
||||
|
||||
for (const input of rangeInputs) {
|
||||
input?.addEventListener("input", () => restoreImgRedMask(elements));
|
||||
}
|
||||
|
||||
function applyZoomAndPan(elemId, isExtension = true) {
|
||||
const targetElement = gradioApp().querySelector(elemId);
|
||||
|
||||
if (!targetElement) {
|
||||
console.log("Element not found", elemId);
|
||||
return;
|
||||
}
|
||||
|
||||
targetElement.style.transformOrigin = "0 0";
|
||||
|
||||
elemData[elemId] = {
|
||||
zoom: 1,
|
||||
panX: 0,
|
||||
panY: 0
|
||||
};
|
||||
let fullScreenMode = false;
|
||||
|
||||
// Create tooltip
|
||||
function createTooltip() {
|
||||
const toolTipElement =
|
||||
targetElement.querySelector(".image-container");
|
||||
const tooltip = document.createElement("div");
|
||||
tooltip.className = "canvas-tooltip";
|
||||
|
||||
// Creating an item of information
|
||||
const info = document.createElement("i");
|
||||
info.className = "canvas-tooltip-info";
|
||||
info.textContent = "";
|
||||
|
||||
// Create a container for the contents of the tooltip
|
||||
const tooltipContent = document.createElement("div");
|
||||
tooltipContent.className = "canvas-tooltip-content";
|
||||
|
||||
// Define an array with hotkey information and their actions
|
||||
const hotkeysInfo = [
|
||||
{
|
||||
configKey: "canvas_hotkey_zoom",
|
||||
action: "Zoom canvas",
|
||||
keySuffix: " + wheel"
|
||||
},
|
||||
{
|
||||
configKey: "canvas_hotkey_adjust",
|
||||
action: "Adjust brush size",
|
||||
keySuffix: " + wheel"
|
||||
},
|
||||
{configKey: "canvas_hotkey_reset", action: "Reset zoom"},
|
||||
{
|
||||
configKey: "canvas_hotkey_fullscreen",
|
||||
action: "Fullscreen mode"
|
||||
},
|
||||
{configKey: "canvas_hotkey_move", action: "Move canvas"},
|
||||
{configKey: "canvas_hotkey_overlap", action: "Overlap"}
|
||||
];
|
||||
|
||||
// Create hotkeys array with disabled property based on the config values
|
||||
const hotkeys = hotkeysInfo.map(info => {
|
||||
const configValue = hotkeysConfig[info.configKey];
|
||||
const key = info.keySuffix ?
|
||||
`${configValue}${info.keySuffix}` :
|
||||
configValue.charAt(configValue.length - 1);
|
||||
return {
|
||||
key,
|
||||
action: info.action,
|
||||
disabled: configValue === "disable"
|
||||
};
|
||||
});
|
||||
|
||||
for (const hotkey of hotkeys) {
|
||||
if (hotkey.disabled) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const p = document.createElement("p");
|
||||
p.innerHTML = `<b>${hotkey.key}</b> - ${hotkey.action}`;
|
||||
tooltipContent.appendChild(p);
|
||||
}
|
||||
|
||||
// Add information and content elements to the tooltip element
|
||||
tooltip.appendChild(info);
|
||||
tooltip.appendChild(tooltipContent);
|
||||
|
||||
// Add a hint element to the target element
|
||||
toolTipElement.appendChild(tooltip);
|
||||
}
|
||||
|
||||
//Show tool tip if setting enable
|
||||
if (hotkeysConfig.canvas_show_tooltip) {
|
||||
createTooltip();
|
||||
}
|
||||
|
||||
// In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
|
||||
function fixCanvas() {
|
||||
const activeTab = getActiveTab(elements)?.textContent.trim();
|
||||
|
||||
if (activeTab && activeTab !== "img2img") {
|
||||
const img = targetElement.querySelector(`${elemId} img`);
|
||||
|
||||
if (img && img.style.display !== "none") {
|
||||
img.style.display = "none";
|
||||
img.style.visibility = "hidden";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the zoom level and pan position of the target element to their initial values
|
||||
function resetZoom() {
|
||||
elemData[elemId] = {
|
||||
zoomLevel: 1,
|
||||
panX: 0,
|
||||
panY: 0
|
||||
};
|
||||
|
||||
if (isExtension) {
|
||||
targetElement.style.overflow = "hidden";
|
||||
}
|
||||
|
||||
targetElement.isZoomed = false;
|
||||
|
||||
fixCanvas();
|
||||
targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
|
||||
|
||||
const canvas = gradioApp().querySelector(
|
||||
`${elemId} canvas[key="interface"]`
|
||||
);
|
||||
|
||||
toggleOverlap("off");
|
||||
fullScreenMode = false;
|
||||
|
||||
const closeBtn = targetElement.querySelector("button[aria-label='Remove Image']");
|
||||
if (closeBtn) {
|
||||
closeBtn.addEventListener("click", resetZoom);
|
||||
}
|
||||
|
||||
if (canvas && isExtension) {
|
||||
const parentElement = targetElement.closest('[id^="component-"]');
|
||||
if (
|
||||
canvas &&
|
||||
parseFloat(canvas.style.width) > parentElement.offsetWidth &&
|
||||
parseFloat(targetElement.style.width) > parentElement.offsetWidth
|
||||
) {
|
||||
fitToElement();
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (
|
||||
canvas &&
|
||||
!isExtension &&
|
||||
parseFloat(canvas.style.width) > 865 &&
|
||||
parseFloat(targetElement.style.width) > 865
|
||||
) {
|
||||
fitToElement();
|
||||
return;
|
||||
}
|
||||
|
||||
targetElement.style.width = "";
|
||||
}
|
||||
|
||||
// Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
|
||||
function toggleOverlap(forced = "") {
|
||||
const zIndex1 = "0";
|
||||
const zIndex2 = "998";
|
||||
|
||||
targetElement.style.zIndex =
|
||||
targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
|
||||
|
||||
if (forced === "off") {
|
||||
targetElement.style.zIndex = zIndex1;
|
||||
} else if (forced === "on") {
|
||||
targetElement.style.zIndex = zIndex2;
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust the brush size based on the deltaY value from a mouse wheel event
|
||||
function adjustBrushSize(
|
||||
elemId,
|
||||
deltaY,
|
||||
withoutValue = false,
|
||||
percentage = 5
|
||||
) {
|
||||
const input =
|
||||
gradioApp().querySelector(
|
||||
`${elemId} input[aria-label='Brush radius']`
|
||||
) ||
|
||||
gradioApp().querySelector(
|
||||
`${elemId} button[aria-label="Use brush"]`
|
||||
);
|
||||
|
||||
if (input) {
|
||||
input.click();
|
||||
if (!withoutValue) {
|
||||
const maxValue =
|
||||
parseFloat(input.getAttribute("max")) || 100;
|
||||
const changeAmount = maxValue * (percentage / 100);
|
||||
const newValue =
|
||||
parseFloat(input.value) +
|
||||
(deltaY > 0 ? -changeAmount : changeAmount);
|
||||
input.value = Math.min(Math.max(newValue, 0), maxValue);
|
||||
input.dispatchEvent(new Event("change"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset zoom when uploading a new image
|
||||
const fileInput = gradioApp().querySelector(
|
||||
`${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
|
||||
);
|
||||
fileInput.addEventListener("click", resetZoom);
|
||||
|
||||
// Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
|
||||
function updateZoom(newZoomLevel, mouseX, mouseY) {
|
||||
newZoomLevel = Math.max(0.1, Math.min(newZoomLevel, 15));
|
||||
|
||||
elemData[elemId].panX +=
|
||||
mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
|
||||
elemData[elemId].panY +=
|
||||
mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel;
|
||||
|
||||
targetElement.style.transformOrigin = "0 0";
|
||||
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
|
||||
|
||||
toggleOverlap("on");
|
||||
if (isExtension) {
|
||||
targetElement.style.overflow = "visible";
|
||||
}
|
||||
|
||||
return newZoomLevel;
|
||||
}
|
||||
|
||||
// Change the zoom level based on user interaction
|
||||
function changeZoomLevel(operation, e) {
|
||||
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
|
||||
e.preventDefault();
|
||||
|
||||
if (hotkeysConfig.canvas_hotkey_zoom === "Alt") {
|
||||
interactedWithAltKey = true;
|
||||
}
|
||||
|
||||
let zoomPosX, zoomPosY;
|
||||
let delta = 0.2;
|
||||
if (elemData[elemId].zoomLevel > 7) {
|
||||
delta = 0.9;
|
||||
} else if (elemData[elemId].zoomLevel > 2) {
|
||||
delta = 0.6;
|
||||
}
|
||||
|
||||
zoomPosX = e.clientX;
|
||||
zoomPosY = e.clientY;
|
||||
|
||||
fullScreenMode = false;
|
||||
elemData[elemId].zoomLevel = updateZoom(
|
||||
elemData[elemId].zoomLevel +
|
||||
(operation === "+" ? delta : -delta),
|
||||
zoomPosX - targetElement.getBoundingClientRect().left,
|
||||
zoomPosY - targetElement.getBoundingClientRect().top
|
||||
);
|
||||
|
||||
targetElement.isZoomed = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This function fits the target element to the screen by calculating
|
||||
* the required scale and offsets. It also updates the global variables
|
||||
* zoomLevel, panX, and panY to reflect the new state.
|
||||
*/
|
||||
|
||||
function fitToElement() {
|
||||
//Reset Zoom
|
||||
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
|
||||
|
||||
let parentElement;
|
||||
|
||||
if (isExtension) {
|
||||
parentElement = targetElement.closest('[id^="component-"]');
|
||||
} else {
|
||||
parentElement = targetElement.parentElement;
|
||||
}
|
||||
|
||||
|
||||
// Get element and screen dimensions
|
||||
const elementWidth = targetElement.offsetWidth;
|
||||
const elementHeight = targetElement.offsetHeight;
|
||||
|
||||
const screenWidth = parentElement.clientWidth;
|
||||
const screenHeight = parentElement.clientHeight;
|
||||
|
||||
// Get element's coordinates relative to the parent element
|
||||
const elementRect = targetElement.getBoundingClientRect();
|
||||
const parentRect = parentElement.getBoundingClientRect();
|
||||
const elementX = elementRect.x - parentRect.x;
|
||||
|
||||
// Calculate scale and offsets
|
||||
const scaleX = screenWidth / elementWidth;
|
||||
const scaleY = screenHeight / elementHeight;
|
||||
const scale = Math.min(scaleX, scaleY);
|
||||
|
||||
const transformOrigin =
|
||||
window.getComputedStyle(targetElement).transformOrigin;
|
||||
const [originX, originY] = transformOrigin.split(" ");
|
||||
const originXValue = parseFloat(originX);
|
||||
const originYValue = parseFloat(originY);
|
||||
|
||||
const offsetX =
|
||||
(screenWidth - elementWidth * scale) / 2 -
|
||||
originXValue * (1 - scale);
|
||||
const offsetY =
|
||||
(screenHeight - elementHeight * scale) / 2.5 -
|
||||
originYValue * (1 - scale);
|
||||
|
||||
// Apply scale and offsets to the element
|
||||
targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
|
||||
|
||||
// Update global variables
|
||||
elemData[elemId].zoomLevel = scale;
|
||||
elemData[elemId].panX = offsetX;
|
||||
elemData[elemId].panY = offsetY;
|
||||
|
||||
fullScreenMode = false;
|
||||
toggleOverlap("off");
|
||||
}
|
||||
|
||||
/**
|
||||
* This function fits the target element to the screen by calculating
|
||||
* the required scale and offsets. It also updates the global variables
|
||||
* zoomLevel, panX, and panY to reflect the new state.
|
||||
*/
|
||||
|
||||
// Fullscreen mode
|
||||
function fitToScreen() {
|
||||
const canvas = gradioApp().querySelector(
|
||||
`${elemId} canvas[key="interface"]`
|
||||
);
|
||||
|
||||
if (!canvas) return;
|
||||
|
||||
if (canvas.offsetWidth > 862 || isExtension) {
|
||||
targetElement.style.width = (canvas.offsetWidth + 2) + "px";
|
||||
}
|
||||
|
||||
if (isExtension) {
|
||||
targetElement.style.overflow = "visible";
|
||||
}
|
||||
|
||||
if (fullScreenMode) {
|
||||
resetZoom();
|
||||
fullScreenMode = false;
|
||||
return;
|
||||
}
|
||||
|
||||
//Reset Zoom
|
||||
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
|
||||
|
||||
// Get scrollbar width to right-align the image
|
||||
const scrollbarWidth =
|
||||
window.innerWidth - document.documentElement.clientWidth;
|
||||
|
||||
// Get element and screen dimensions
|
||||
const elementWidth = targetElement.offsetWidth;
|
||||
const elementHeight = targetElement.offsetHeight;
|
||||
const screenWidth = window.innerWidth - scrollbarWidth;
|
||||
const screenHeight = window.innerHeight;
|
||||
|
||||
// Get element's coordinates relative to the page
|
||||
const elementRect = targetElement.getBoundingClientRect();
|
||||
const elementY = elementRect.y;
|
||||
const elementX = elementRect.x;
|
||||
|
||||
// Calculate scale and offsets
|
||||
const scaleX = screenWidth / elementWidth;
|
||||
const scaleY = screenHeight / elementHeight;
|
||||
const scale = Math.min(scaleX, scaleY);
|
||||
|
||||
// Get the current transformOrigin
|
||||
const computedStyle = window.getComputedStyle(targetElement);
|
||||
const transformOrigin = computedStyle.transformOrigin;
|
||||
const [originX, originY] = transformOrigin.split(" ");
|
||||
const originXValue = parseFloat(originX);
|
||||
const originYValue = parseFloat(originY);
|
||||
|
||||
// Calculate offsets with respect to the transformOrigin
|
||||
const offsetX =
|
||||
(screenWidth - elementWidth * scale) / 2 -
|
||||
elementX -
|
||||
originXValue * (1 - scale);
|
||||
const offsetY =
|
||||
(screenHeight - elementHeight * scale) / 2 -
|
||||
elementY -
|
||||
originYValue * (1 - scale);
|
||||
|
||||
// Apply scale and offsets to the element
|
||||
targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
|
||||
|
||||
// Update global variables
|
||||
elemData[elemId].zoomLevel = scale;
|
||||
elemData[elemId].panX = offsetX;
|
||||
elemData[elemId].panY = offsetY;
|
||||
|
||||
fullScreenMode = true;
|
||||
toggleOverlap("on");
|
||||
}
|
||||
|
||||
// Handle keydown events
|
||||
function handleKeyDown(event) {
|
||||
// Disable key locks to make pasting from the buffer work correctly
|
||||
if ((event.ctrlKey && event.code === 'KeyV') || (event.ctrlKey && event.code === 'KeyC') || event.code === "F5") {
|
||||
return;
|
||||
}
|
||||
|
||||
// before activating shortcut, ensure user is not actively typing in an input field
|
||||
if (!hotkeysConfig.canvas_blur_prompt) {
|
||||
if (event.target.nodeName === 'TEXTAREA' || event.target.nodeName === 'INPUT') {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const hotkeyActions = {
|
||||
[hotkeysConfig.canvas_hotkey_reset]: resetZoom,
|
||||
[hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
|
||||
[hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen,
|
||||
[hotkeysConfig.canvas_hotkey_shrink_brush]: () => adjustBrushSize(elemId, 10),
|
||||
[hotkeysConfig.canvas_hotkey_grow_brush]: () => adjustBrushSize(elemId, -10)
|
||||
};
|
||||
|
||||
const action = hotkeyActions[event.code];
|
||||
if (action) {
|
||||
event.preventDefault();
|
||||
action(event);
|
||||
}
|
||||
|
||||
if (
|
||||
isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) ||
|
||||
isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust)
|
||||
) {
|
||||
event.preventDefault();
|
||||
}
|
||||
}
|
||||
|
||||
// Get Mouse position
|
||||
function getMousePosition(e) {
|
||||
mouseX = e.offsetX;
|
||||
mouseY = e.offsetY;
|
||||
}
|
||||
|
||||
// Simulation of the function to put a long image into the screen.
|
||||
// We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element.
|
||||
// We hide the image and show it to the user when it is ready.
|
||||
|
||||
targetElement.isExpanded = false;
|
||||
function autoExpand() {
|
||||
const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
|
||||
if (canvas) {
|
||||
if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) {
|
||||
targetElement.style.visibility = "hidden";
|
||||
setTimeout(() => {
|
||||
fitToScreen();
|
||||
resetZoom();
|
||||
targetElement.style.visibility = "visible";
|
||||
targetElement.isExpanded = true;
|
||||
}, 10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
targetElement.addEventListener("mousemove", getMousePosition);
|
||||
|
||||
//observers
|
||||
// Creating an observer with a callback function to handle DOM changes
|
||||
const observer = new MutationObserver((mutationsList, observer) => {
|
||||
for (let mutation of mutationsList) {
|
||||
// If the style attribute of the canvas has changed, by observation it happens only when the picture changes
|
||||
if (mutation.type === 'attributes' && mutation.attributeName === 'style' &&
|
||||
mutation.target.tagName.toLowerCase() === 'canvas') {
|
||||
targetElement.isExpanded = false;
|
||||
setTimeout(resetZoom, 10);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Apply auto expand if enabled
|
||||
if (hotkeysConfig.canvas_auto_expand) {
|
||||
targetElement.addEventListener("mousemove", autoExpand);
|
||||
// Set up an observer to track attribute changes
|
||||
observer.observe(targetElement, {attributes: true, childList: true, subtree: true});
|
||||
}
|
||||
|
||||
// Handle events only inside the targetElement
|
||||
let isKeyDownHandlerAttached = false;
|
||||
|
||||
function handleMouseMove() {
|
||||
if (!isKeyDownHandlerAttached) {
|
||||
document.addEventListener("keydown", handleKeyDown);
|
||||
isKeyDownHandlerAttached = true;
|
||||
|
||||
activeElement = elemId;
|
||||
}
|
||||
}
|
||||
|
||||
function handleMouseLeave() {
|
||||
if (isKeyDownHandlerAttached) {
|
||||
document.removeEventListener("keydown", handleKeyDown);
|
||||
isKeyDownHandlerAttached = false;
|
||||
|
||||
activeElement = null;
|
||||
}
|
||||
}
|
||||
|
||||
// Add mouse event handlers
|
||||
targetElement.addEventListener("mousemove", handleMouseMove);
|
||||
targetElement.addEventListener("mouseleave", handleMouseLeave);
|
||||
|
||||
// Reset zoom when click on another tab
|
||||
if (elements.img2imgTabs) {
|
||||
elements.img2imgTabs.addEventListener("click", resetZoom);
|
||||
elements.img2imgTabs.addEventListener("click", () => {
|
||||
// targetElement.style.width = "";
|
||||
if (parseInt(targetElement.style.width) > 865) {
|
||||
setTimeout(fitToElement, 0);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
targetElement.addEventListener("wheel", e => {
|
||||
// change zoom level
|
||||
const operation = (e.deltaY || -e.wheelDelta) > 0 ? "-" : "+";
|
||||
changeZoomLevel(operation, e);
|
||||
|
||||
// Handle brush size adjustment with ctrl key pressed
|
||||
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) {
|
||||
e.preventDefault();
|
||||
|
||||
if (hotkeysConfig.canvas_hotkey_adjust === "Alt") {
|
||||
interactedWithAltKey = true;
|
||||
}
|
||||
|
||||
// Increase or decrease brush size based on scroll direction
|
||||
adjustBrushSize(elemId, e.deltaY);
|
||||
}
|
||||
});
|
||||
|
||||
// Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
|
||||
function handleMoveKeyDown(e) {
|
||||
|
||||
// Disable key locks to make pasting from the buffer work correctly
|
||||
if ((e.ctrlKey && e.code === 'KeyV') || (e.ctrlKey && event.code === 'KeyC') || e.code === "F5") {
|
||||
return;
|
||||
}
|
||||
|
||||
// before activating shortcut, ensure user is not actively typing in an input field
|
||||
if (!hotkeysConfig.canvas_blur_prompt) {
|
||||
if (e.target.nodeName === 'TEXTAREA' || e.target.nodeName === 'INPUT') {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (e.code === hotkeysConfig.canvas_hotkey_move) {
|
||||
if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) {
|
||||
e.preventDefault();
|
||||
document.activeElement.blur();
|
||||
isMoving = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function handleMoveKeyUp(e) {
|
||||
if (e.code === hotkeysConfig.canvas_hotkey_move) {
|
||||
isMoving = false;
|
||||
}
|
||||
}
|
||||
|
||||
document.addEventListener("keydown", handleMoveKeyDown);
|
||||
document.addEventListener("keyup", handleMoveKeyUp);
|
||||
|
||||
|
||||
// Prevent firefox from opening main menu when alt is used as a hotkey for zoom or brush size
|
||||
function handleAltKeyUp(e) {
|
||||
if (e.key !== "Alt" || !interactedWithAltKey) {
|
||||
return;
|
||||
}
|
||||
|
||||
e.preventDefault();
|
||||
interactedWithAltKey = false;
|
||||
}
|
||||
|
||||
document.addEventListener("keyup", handleAltKeyUp);
|
||||
|
||||
|
||||
// Detect zoom level and update the pan speed.
|
||||
function updatePanPosition(movementX, movementY) {
|
||||
let panSpeed = 2;
|
||||
|
||||
if (elemData[elemId].zoomLevel > 8) {
|
||||
panSpeed = 3.5;
|
||||
}
|
||||
|
||||
elemData[elemId].panX += movementX * panSpeed;
|
||||
elemData[elemId].panY += movementY * panSpeed;
|
||||
|
||||
// Delayed redraw of an element
|
||||
requestAnimationFrame(() => {
|
||||
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${elemData[elemId].zoomLevel})`;
|
||||
toggleOverlap("on");
|
||||
});
|
||||
}
|
||||
|
||||
function handleMoveByKey(e) {
|
||||
if (isMoving && elemId === activeElement) {
|
||||
updatePanPosition(e.movementX, e.movementY);
|
||||
targetElement.style.pointerEvents = "none";
|
||||
|
||||
if (isExtension) {
|
||||
targetElement.style.overflow = "visible";
|
||||
}
|
||||
|
||||
} else {
|
||||
targetElement.style.pointerEvents = "auto";
|
||||
}
|
||||
}
|
||||
|
||||
// Prevents sticking to the mouse
|
||||
window.onblur = function() {
|
||||
isMoving = false;
|
||||
};
|
||||
|
||||
// Checks for extension
|
||||
function checkForOutBox() {
|
||||
const parentElement = targetElement.closest('[id^="component-"]');
|
||||
if (parentElement.offsetWidth < targetElement.offsetWidth && !targetElement.isExpanded) {
|
||||
resetZoom();
|
||||
targetElement.isExpanded = true;
|
||||
}
|
||||
|
||||
if (parentElement.offsetWidth < targetElement.offsetWidth && elemData[elemId].zoomLevel == 1) {
|
||||
resetZoom();
|
||||
}
|
||||
|
||||
if (parentElement.offsetWidth < targetElement.offsetWidth && targetElement.offsetWidth * elemData[elemId].zoomLevel > parentElement.offsetWidth && elemData[elemId].zoomLevel < 1 && !targetElement.isZoomed) {
|
||||
resetZoom();
|
||||
}
|
||||
}
|
||||
|
||||
if (isExtension) {
|
||||
targetElement.addEventListener("mousemove", checkForOutBox);
|
||||
}
|
||||
|
||||
|
||||
window.addEventListener('resize', (e) => {
|
||||
resetZoom();
|
||||
|
||||
if (isExtension) {
|
||||
targetElement.isExpanded = false;
|
||||
targetElement.isZoomed = false;
|
||||
}
|
||||
});
|
||||
|
||||
gradioApp().addEventListener("mousemove", handleMoveByKey);
|
||||
|
||||
|
||||
}
|
||||
|
||||
applyZoomAndPan(elementIDs.sketch, false);
|
||||
applyZoomAndPan(elementIDs.inpaint, false);
|
||||
applyZoomAndPan(elementIDs.inpaintSketch, false);
|
||||
|
||||
// Make the function global so that other extensions can take advantage of this solution
|
||||
const applyZoomAndPanIntegration = async(id, elementIDs) => {
|
||||
const mainEl = document.querySelector(id);
|
||||
if (id.toLocaleLowerCase() === "none") {
|
||||
for (const elementID of elementIDs) {
|
||||
const el = await waitForElement(elementID);
|
||||
if (!el) break;
|
||||
applyZoomAndPan(elementID);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (!mainEl) return;
|
||||
mainEl.addEventListener("click", async() => {
|
||||
for (const elementID of elementIDs) {
|
||||
const el = await waitForElement(elementID);
|
||||
if (!el) break;
|
||||
applyZoomAndPan(elementID);
|
||||
}
|
||||
}, {once: true});
|
||||
};
|
||||
|
||||
window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image")
|
||||
|
||||
window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension
|
||||
|
||||
/*
|
||||
The function `applyZoomAndPanIntegration` takes two arguments:
|
||||
|
||||
1. `id`: A string identifier for the element to which zoom and pan functionality will be applied on click.
|
||||
If the `id` value is "none", the functionality will be applied to all elements specified in the second argument without a click event.
|
||||
|
||||
2. `elementIDs`: An array of string identifiers for elements. Zoom and pan functionality will be applied to each of these elements on click of the element specified by the first argument.
|
||||
If "none" is specified in the first argument, the functionality will be applied to each of these elements without a click event.
|
||||
|
||||
Example usage:
|
||||
applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]);
|
||||
In this example, zoom and pan functionality will be applied to the element with the identifier "txt2img_controlnet_ControlNet_input_image" upon clicking the element with the identifier "txt2img_controlnet".
|
||||
*/
|
||||
|
||||
// More examples
|
||||
// Add integration with ControlNet txt2img One TAB
|
||||
// applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]);
|
||||
|
||||
// Add integration with ControlNet txt2img Tabs
|
||||
// applyZoomAndPanIntegration("#txt2img_controlnet",Array.from({ length: 10 }, (_, i) => `#txt2img_controlnet_ControlNet-${i}_input_image`));
|
||||
|
||||
// Add integration with Inpaint Anything
|
||||
// applyZoomAndPanIntegration("None", ["#ia_sam_image", "#ia_sel_mask"]);
|
||||
});
|
@ -1,17 +0,0 @@
|
||||
import gradio as gr
|
||||
from modules import shared
|
||||
|
||||
shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
|
||||
"canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
|
||||
"canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
|
||||
"canvas_hotkey_shrink_brush": shared.OptionInfo("Q", "Shrink the brush size"),
|
||||
"canvas_hotkey_grow_brush": shared.OptionInfo("W", "Enlarge the brush size"),
|
||||
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
|
||||
"canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
|
||||
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas position"),
|
||||
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, needed for testing"),
|
||||
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
|
||||
"canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
|
||||
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
|
||||
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size","Hotkey enlarge brush","Hotkey shrink brush","Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
|
||||
}))
|
@ -1,66 +0,0 @@
|
||||
.canvas-tooltip-info {
|
||||
position: absolute;
|
||||
top: 10px;
|
||||
left: 10px;
|
||||
cursor: help;
|
||||
background-color: rgba(0, 0, 0, 0.3);
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
border-radius: 50%;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
flex-direction: column;
|
||||
|
||||
z-index: 100;
|
||||
}
|
||||
|
||||
.canvas-tooltip-info::after {
|
||||
content: '';
|
||||
display: block;
|
||||
width: 2px;
|
||||
height: 7px;
|
||||
background-color: white;
|
||||
margin-top: 2px;
|
||||
}
|
||||
|
||||
.canvas-tooltip-info::before {
|
||||
content: '';
|
||||
display: block;
|
||||
width: 2px;
|
||||
height: 2px;
|
||||
background-color: white;
|
||||
}
|
||||
|
||||
.canvas-tooltip-content {
|
||||
display: none;
|
||||
background-color: #f9f9f9;
|
||||
color: #333;
|
||||
border: 1px solid #ddd;
|
||||
padding: 15px;
|
||||
position: absolute;
|
||||
top: 40px;
|
||||
left: 10px;
|
||||
width: 250px;
|
||||
font-size: 16px;
|
||||
opacity: 0;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
|
||||
|
||||
z-index: 100;
|
||||
}
|
||||
|
||||
.canvas-tooltip:hover .canvas-tooltip-content {
|
||||
display: block;
|
||||
animation: fadeIn 0.5s;
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
@keyframes fadeIn {
|
||||
from {opacity: 0;}
|
||||
to {opacity: 1;}
|
||||
}
|
||||
|
||||
.styler {
|
||||
overflow:inherit !important;
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
import math
|
||||
|
||||
import gradio as gr
|
||||
from modules import scripts, shared, ui_components, ui_settings, infotext_utils, errors
|
||||
from modules.ui_components import FormColumn
|
||||
|
||||
|
||||
class ExtraOptionsSection(scripts.Script):
|
||||
section = "extra_options"
|
||||
|
||||
def __init__(self):
|
||||
self.comps = None
|
||||
self.setting_names = None
|
||||
|
||||
def title(self):
|
||||
return "Extra options"
|
||||
|
||||
def show(self, is_img2img):
|
||||
return scripts.AlwaysVisible
|
||||
|
||||
def ui(self, is_img2img):
|
||||
self.comps = []
|
||||
self.setting_names = []
|
||||
self.infotext_fields = []
|
||||
extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img
|
||||
elem_id_tabname = "extra_options_" + ("img2img" if is_img2img else "txt2img")
|
||||
|
||||
mapping = {k: v for v, k in infotext_utils.infotext_to_setting_name_mapping}
|
||||
|
||||
with gr.Blocks() as interface:
|
||||
with gr.Accordion("Options", open=False, elem_id=elem_id_tabname) if shared.opts.extra_options_accordion and extra_options else gr.Group(elem_id=elem_id_tabname):
|
||||
|
||||
row_count = math.ceil(len(extra_options) / shared.opts.extra_options_cols)
|
||||
|
||||
for row in range(row_count):
|
||||
with gr.Row():
|
||||
for col in range(shared.opts.extra_options_cols):
|
||||
index = row * shared.opts.extra_options_cols + col
|
||||
if index >= len(extra_options):
|
||||
break
|
||||
|
||||
setting_name = extra_options[index]
|
||||
|
||||
with FormColumn():
|
||||
try:
|
||||
comp = ui_settings.create_setting_component(setting_name)
|
||||
except KeyError:
|
||||
errors.report(f"Can't add extra options for {setting_name} in ui")
|
||||
continue
|
||||
|
||||
self.comps.append(comp)
|
||||
self.setting_names.append(setting_name)
|
||||
|
||||
setting_infotext_name = mapping.get(setting_name)
|
||||
if setting_infotext_name is not None:
|
||||
self.infotext_fields.append((comp, setting_infotext_name))
|
||||
|
||||
def get_settings_values():
|
||||
res = [ui_settings.get_value_for_setting(key) for key in self.setting_names]
|
||||
return res[0] if len(res) == 1 else res
|
||||
|
||||
interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
|
||||
|
||||
return self.comps
|
||||
|
||||
def before_process(self, p, *args):
|
||||
for name, value in zip(self.setting_names, args):
|
||||
if name not in p.override_settings:
|
||||
p.override_settings[name] = value
|
||||
|
||||
|
||||
shared.options_templates.update(shared.options_section(('settings_in_ui', "Settings in UI", "ui"), {
|
||||
"settings_in_ui": shared.OptionHTML("""
|
||||
This page allows you to add some settings to the main interface of txt2img and img2img tabs.
|
||||
"""),
|
||||
"extra_options_txt2img": shared.OptionInfo([], "Settings for txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(),
|
||||
"extra_options_img2img": shared.OptionInfo([], "Settings for img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(),
|
||||
"extra_options_cols": shared.OptionInfo(1, "Number of columns for added settings", gr.Slider, {"step": 1, "minimum": 1, "maximum": 20}).info("displayed amount will depend on the actual browser window width").needs_reload_ui(),
|
||||
"extra_options_accordion": shared.OptionInfo(False, "Place added settings into an accordion").needs_reload_ui()
|
||||
}))
|
||||
|
||||
|
@ -1,351 +0,0 @@
|
||||
"""
|
||||
Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE
|
||||
Warn: The patch works well only if the input image has a width and height that are multiples of 128
|
||||
Original author: @tfernd Github: https://github.com/tfernd/HyperTile
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable
|
||||
|
||||
from functools import wraps, cache
|
||||
|
||||
import math
|
||||
import torch.nn as nn
|
||||
import random
|
||||
|
||||
from einops import rearrange
|
||||
|
||||
|
||||
@dataclass
|
||||
class HypertileParams:
|
||||
depth = 0
|
||||
layer_name = ""
|
||||
tile_size: int = 0
|
||||
swap_size: int = 0
|
||||
aspect_ratio: float = 1.0
|
||||
forward = None
|
||||
enabled = False
|
||||
|
||||
|
||||
|
||||
# TODO add SD-XL layers
|
||||
DEPTH_LAYERS = {
|
||||
0: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"down_blocks.0.attentions.0.transformer_blocks.0.attn1",
|
||||
"down_blocks.0.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.0.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.1.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.2.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.9.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.10.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.11.1.transformer_blocks.0.attn1",
|
||||
# SD 1.5 VAE
|
||||
"decoder.mid_block.attentions.0",
|
||||
"decoder.mid.attn_1",
|
||||
],
|
||||
1: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"down_blocks.1.attentions.0.transformer_blocks.0.attn1",
|
||||
"down_blocks.1.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.2.attentions.0.transformer_blocks.0.attn1",
|
||||
"up_blocks.2.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.2.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.4.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.5.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.6.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.7.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.8.1.transformer_blocks.0.attn1",
|
||||
],
|
||||
2: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"down_blocks.2.attentions.0.transformer_blocks.0.attn1",
|
||||
"down_blocks.2.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.1.attentions.0.transformer_blocks.0.attn1",
|
||||
"up_blocks.1.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.1.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.7.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.3.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.4.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.5.1.transformer_blocks.0.attn1",
|
||||
],
|
||||
3: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"mid_block.attentions.0.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"middle_block.1.transformer_blocks.0.attn1",
|
||||
],
|
||||
}
|
||||
# XL layers, thanks for GitHub@gel-crabs for the help
|
||||
DEPTH_LAYERS_XL = {
|
||||
0: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"down_blocks.0.attentions.0.transformer_blocks.0.attn1",
|
||||
"down_blocks.0.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.0.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.4.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.5.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.3.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.4.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.5.1.transformer_blocks.0.attn1",
|
||||
# SD 1.5 VAE
|
||||
"decoder.mid_block.attentions.0",
|
||||
"decoder.mid.attn_1",
|
||||
],
|
||||
1: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
#"down_blocks.1.attentions.0.transformer_blocks.0.attn1",
|
||||
#"down_blocks.1.attentions.1.transformer_blocks.0.attn1",
|
||||
#"up_blocks.2.attentions.0.transformer_blocks.0.attn1",
|
||||
#"up_blocks.2.attentions.1.transformer_blocks.0.attn1",
|
||||
#"up_blocks.2.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.4.1.transformer_blocks.1.attn1",
|
||||
"input_blocks.5.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.3.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.4.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.5.1.transformer_blocks.1.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.1.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.1.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.2.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.2.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.2.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.2.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.2.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.3.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.3.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.3.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.3.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.3.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.4.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.4.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.4.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.4.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.4.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.5.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.5.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.5.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.5.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.5.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.6.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.6.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.6.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.6.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.6.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.7.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.7.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.7.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.7.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.7.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.8.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.8.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.8.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.8.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.8.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.9.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.9.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.9.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.9.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.9.attn1",
|
||||
],
|
||||
2: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"mid_block.attentions.0.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"middle_block.1.transformer_blocks.0.attn1",
|
||||
"middle_block.1.transformer_blocks.1.attn1",
|
||||
"middle_block.1.transformer_blocks.2.attn1",
|
||||
"middle_block.1.transformer_blocks.3.attn1",
|
||||
"middle_block.1.transformer_blocks.4.attn1",
|
||||
"middle_block.1.transformer_blocks.5.attn1",
|
||||
"middle_block.1.transformer_blocks.6.attn1",
|
||||
"middle_block.1.transformer_blocks.7.attn1",
|
||||
"middle_block.1.transformer_blocks.8.attn1",
|
||||
"middle_block.1.transformer_blocks.9.attn1",
|
||||
],
|
||||
3 : [] # TODO - separate layers for SD-XL
|
||||
}
|
||||
|
||||
|
||||
RNG_INSTANCE = random.Random()
|
||||
|
||||
@cache
|
||||
def get_divisors(value: int, min_value: int, /, max_options: int = 1) -> list[int]:
|
||||
"""
|
||||
Returns divisors of value that
|
||||
x * min_value <= value
|
||||
in big -> small order, amount of divisors is limited by max_options
|
||||
"""
|
||||
max_options = max(1, max_options) # at least 1 option should be returned
|
||||
min_value = min(min_value, value)
|
||||
divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order
|
||||
ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order
|
||||
return ns
|
||||
|
||||
|
||||
def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
|
||||
"""
|
||||
Returns a random divisor of value that
|
||||
x * min_value <= value
|
||||
if max_options is 1, the behavior is deterministic
|
||||
"""
|
||||
ns = get_divisors(value, min_value, max_options=max_options) # get cached divisors
|
||||
idx = RNG_INSTANCE.randint(0, len(ns) - 1)
|
||||
|
||||
return ns[idx]
|
||||
|
||||
|
||||
def set_hypertile_seed(seed: int) -> None:
|
||||
RNG_INSTANCE.seed(seed)
|
||||
|
||||
|
||||
@cache
|
||||
def largest_tile_size_available(width: int, height: int) -> int:
|
||||
"""
|
||||
Calculates the largest tile size available for a given width and height
|
||||
Tile size is always a power of 2
|
||||
"""
|
||||
gcd = math.gcd(width, height)
|
||||
largest_tile_size_available = 1
|
||||
while gcd % (largest_tile_size_available * 2) == 0:
|
||||
largest_tile_size_available *= 2
|
||||
return largest_tile_size_available
|
||||
|
||||
|
||||
def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]:
|
||||
"""
|
||||
Finds h and w such that h*w = hw and h/w = aspect_ratio
|
||||
We check all possible divisors of hw and return the closest to the aspect ratio
|
||||
"""
|
||||
divisors = [i for i in range(2, hw + 1) if hw % i == 0] # all divisors of hw
|
||||
pairs = [(i, hw // i) for i in divisors] # all pairs of divisors of hw
|
||||
ratios = [w/h for h, w in pairs] # all ratios of pairs of divisors of hw
|
||||
closest_ratio = min(ratios, key=lambda x: abs(x - aspect_ratio)) # closest ratio to aspect_ratio
|
||||
closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio
|
||||
return closest_pair
|
||||
|
||||
|
||||
@cache
|
||||
def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]:
|
||||
"""
|
||||
Finds h and w such that h*w = hw and h/w = aspect_ratio
|
||||
"""
|
||||
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
|
||||
# find h and w such that h*w = hw and h/w = aspect_ratio
|
||||
if h * w != hw:
|
||||
w_candidate = hw / h
|
||||
# check if w is an integer
|
||||
if not w_candidate.is_integer():
|
||||
h_candidate = hw / w
|
||||
# check if h is an integer
|
||||
if not h_candidate.is_integer():
|
||||
return iterative_closest_divisors(hw, aspect_ratio)
|
||||
else:
|
||||
h = int(h_candidate)
|
||||
else:
|
||||
w = int(w_candidate)
|
||||
return h, w
|
||||
|
||||
|
||||
def self_attn_forward(params: HypertileParams, scale_depth=True) -> Callable:
|
||||
|
||||
@wraps(params.forward)
|
||||
def wrapper(*args, **kwargs):
|
||||
if not params.enabled:
|
||||
return params.forward(*args, **kwargs)
|
||||
|
||||
latent_tile_size = max(128, params.tile_size) // 8
|
||||
x = args[0]
|
||||
|
||||
# VAE
|
||||
if x.ndim == 4:
|
||||
b, c, h, w = x.shape
|
||||
|
||||
nh = random_divisor(h, latent_tile_size, params.swap_size)
|
||||
nw = random_divisor(w, latent_tile_size, params.swap_size)
|
||||
|
||||
if nh * nw > 1:
|
||||
x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles
|
||||
|
||||
out = params.forward(x, *args[1:], **kwargs)
|
||||
|
||||
if nh * nw > 1:
|
||||
out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw)
|
||||
|
||||
# U-Net
|
||||
else:
|
||||
hw: int = x.size(1)
|
||||
h, w = find_hw_candidates(hw, params.aspect_ratio)
|
||||
assert h * w == hw, f"Invalid aspect ratio {params.aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}"
|
||||
|
||||
factor = 2 ** params.depth if scale_depth else 1
|
||||
nh = random_divisor(h, latent_tile_size * factor, params.swap_size)
|
||||
nw = random_divisor(w, latent_tile_size * factor, params.swap_size)
|
||||
|
||||
if nh * nw > 1:
|
||||
x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
|
||||
|
||||
out = params.forward(x, *args[1:], **kwargs)
|
||||
|
||||
if nh * nw > 1:
|
||||
out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
|
||||
out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
|
||||
|
||||
return out
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def hypertile_hook_model(model: nn.Module, width, height, *, enable=False, tile_size_max=128, swap_size=1, max_depth=3, is_sdxl=False):
|
||||
hypertile_layers = getattr(model, "__webui_hypertile_layers", None)
|
||||
if hypertile_layers is None:
|
||||
if not enable:
|
||||
return
|
||||
|
||||
hypertile_layers = {}
|
||||
layers = DEPTH_LAYERS_XL if is_sdxl else DEPTH_LAYERS
|
||||
|
||||
for depth in range(4):
|
||||
for layer_name, module in model.named_modules():
|
||||
if any(layer_name.endswith(try_name) for try_name in layers[depth]):
|
||||
params = HypertileParams()
|
||||
module.__webui_hypertile_params = params
|
||||
params.forward = module.forward
|
||||
params.depth = depth
|
||||
params.layer_name = layer_name
|
||||
module.forward = self_attn_forward(params)
|
||||
|
||||
hypertile_layers[layer_name] = 1
|
||||
|
||||
model.__webui_hypertile_layers = hypertile_layers
|
||||
|
||||
aspect_ratio = width / height
|
||||
tile_size = min(largest_tile_size_available(width, height), tile_size_max)
|
||||
|
||||
for layer_name, module in model.named_modules():
|
||||
if layer_name in hypertile_layers:
|
||||
params = module.__webui_hypertile_params
|
||||
|
||||
params.tile_size = tile_size
|
||||
params.swap_size = swap_size
|
||||
params.aspect_ratio = aspect_ratio
|
||||
params.enabled = enable and params.depth <= max_depth
|
@ -1,122 +0,0 @@
|
||||
import hypertile
|
||||
from modules import scripts, script_callbacks, shared
|
||||
|
||||
|
||||
class ScriptHypertile(scripts.Script):
|
||||
name = "Hypertile"
|
||||
|
||||
def title(self):
|
||||
return self.name
|
||||
|
||||
def show(self, is_img2img):
|
||||
return scripts.AlwaysVisible
|
||||
|
||||
def process(self, p, *args):
|
||||
hypertile.set_hypertile_seed(p.all_seeds[0])
|
||||
|
||||
configure_hypertile(p.width, p.height, enable_unet=shared.opts.hypertile_enable_unet)
|
||||
|
||||
self.add_infotext(p)
|
||||
|
||||
def before_hr(self, p, *args):
|
||||
|
||||
enable = shared.opts.hypertile_enable_unet_secondpass or shared.opts.hypertile_enable_unet
|
||||
|
||||
# exclusive hypertile seed for the second pass
|
||||
if enable:
|
||||
hypertile.set_hypertile_seed(p.all_seeds[0])
|
||||
|
||||
configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=enable)
|
||||
|
||||
if enable and not shared.opts.hypertile_enable_unet:
|
||||
p.extra_generation_params["Hypertile U-Net second pass"] = True
|
||||
|
||||
self.add_infotext(p, add_unet_params=True)
|
||||
|
||||
def add_infotext(self, p, add_unet_params=False):
|
||||
def option(name):
|
||||
value = getattr(shared.opts, name)
|
||||
default_value = shared.opts.get_default(name)
|
||||
return None if value == default_value else value
|
||||
|
||||
if shared.opts.hypertile_enable_unet:
|
||||
p.extra_generation_params["Hypertile U-Net"] = True
|
||||
|
||||
if shared.opts.hypertile_enable_unet or add_unet_params:
|
||||
p.extra_generation_params["Hypertile U-Net max depth"] = option('hypertile_max_depth_unet')
|
||||
p.extra_generation_params["Hypertile U-Net max tile size"] = option('hypertile_max_tile_unet')
|
||||
p.extra_generation_params["Hypertile U-Net swap size"] = option('hypertile_swap_size_unet')
|
||||
|
||||
if shared.opts.hypertile_enable_vae:
|
||||
p.extra_generation_params["Hypertile VAE"] = True
|
||||
p.extra_generation_params["Hypertile VAE max depth"] = option('hypertile_max_depth_vae')
|
||||
p.extra_generation_params["Hypertile VAE max tile size"] = option('hypertile_max_tile_vae')
|
||||
p.extra_generation_params["Hypertile VAE swap size"] = option('hypertile_swap_size_vae')
|
||||
|
||||
|
||||
def configure_hypertile(width, height, enable_unet=True):
|
||||
hypertile.hypertile_hook_model(
|
||||
shared.sd_model.first_stage_model,
|
||||
width,
|
||||
height,
|
||||
swap_size=shared.opts.hypertile_swap_size_vae,
|
||||
max_depth=shared.opts.hypertile_max_depth_vae,
|
||||
tile_size_max=shared.opts.hypertile_max_tile_vae,
|
||||
enable=shared.opts.hypertile_enable_vae,
|
||||
)
|
||||
|
||||
hypertile.hypertile_hook_model(
|
||||
shared.sd_model.model,
|
||||
width,
|
||||
height,
|
||||
swap_size=shared.opts.hypertile_swap_size_unet,
|
||||
max_depth=shared.opts.hypertile_max_depth_unet,
|
||||
tile_size_max=shared.opts.hypertile_max_tile_unet,
|
||||
enable=enable_unet,
|
||||
is_sdxl=shared.sd_model.is_sdxl
|
||||
)
|
||||
|
||||
|
||||
def on_ui_settings():
|
||||
import gradio as gr
|
||||
|
||||
options = {
|
||||
"hypertile_explanation": shared.OptionHTML("""
|
||||
<a href='https://github.com/tfernd/HyperTile'>Hypertile</a> optimizes the self-attention layer within U-Net and VAE models,
|
||||
resulting in a reduction in computation time ranging from 1 to 4 times. The larger the generated image is, the greater the
|
||||
benefit.
|
||||
"""),
|
||||
|
||||
"hypertile_enable_unet": shared.OptionInfo(False, "Enable Hypertile U-Net", infotext="Hypertile U-Net").info("enables hypertile for all modes, including hires fix second pass; noticeable change in details of the generated picture"),
|
||||
"hypertile_enable_unet_secondpass": shared.OptionInfo(False, "Enable Hypertile U-Net for hires fix second pass", infotext="Hypertile U-Net second pass").info("enables hypertile just for hires fix second pass - regardless of whether the above setting is enabled"),
|
||||
"hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"),
|
||||
"hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"),
|
||||
"hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"),
|
||||
"hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"),
|
||||
"hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"),
|
||||
"hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"),
|
||||
"hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile VAE swap size"),
|
||||
}
|
||||
|
||||
for name, opt in options.items():
|
||||
opt.section = ('hypertile', "Hypertile")
|
||||
shared.opts.add_option(name, opt)
|
||||
|
||||
|
||||
def add_axis_options():
|
||||
xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
|
||||
xyz_grid.axis_options.extend([
|
||||
xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
|
||||
xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet_secondpass', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
|
||||
xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_unet"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] Unet Max Depth'), choices=lambda: [str(x) for x in range(4)]),
|
||||
xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_unet"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] Unet Max Tile Size')),
|
||||
xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_unet"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] Unet Swap Size')),
|
||||
xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, xyz_grid.apply_override('hypertile_enable_vae', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
|
||||
xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_vae"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] VAE Max Depth'), choices=lambda: [str(x) for x in range(4)]),
|
||||
xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_vae"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] VAE Max Tile Size')),
|
||||
xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_vae"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] VAE Swap Size')),
|
||||
])
|
||||
|
||||
|
||||
script_callbacks.on_ui_settings(on_ui_settings)
|
||||
script_callbacks.on_before_ui(add_axis_options)
|
@ -1,34 +0,0 @@
|
||||
var isSetupForMobile = false;
|
||||
|
||||
function isMobile() {
|
||||
for (var tab of ["txt2img", "img2img"]) {
|
||||
var imageTab = gradioApp().getElementById(tab + '_results');
|
||||
if (imageTab && imageTab.offsetParent && imageTab.offsetLeft == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function reportWindowSize() {
|
||||
if (gradioApp().querySelector('.toprow-compact-tools')) return; // not applicable for compact prompt layout
|
||||
|
||||
var currentlyMobile = isMobile();
|
||||
if (currentlyMobile == isSetupForMobile) return;
|
||||
isSetupForMobile = currentlyMobile;
|
||||
|
||||
for (var tab of ["txt2img", "img2img"]) {
|
||||
var button = gradioApp().getElementById(tab + '_generate_box');
|
||||
var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column');
|
||||
target.insertBefore(button, target.firstElementChild);
|
||||
|
||||
gradioApp().getElementById(tab + '_results').classList.toggle('mobile', currentlyMobile);
|
||||
}
|
||||
}
|
||||
|
||||
window.addEventListener("resize", reportWindowSize);
|
||||
|
||||
onUiLoaded(function() {
|
||||
reportWindowSize();
|
||||
});
|
@ -1,64 +0,0 @@
|
||||
from PIL import Image
|
||||
|
||||
from modules import scripts_postprocessing, ui_components
|
||||
import gradio as gr
|
||||
|
||||
|
||||
def center_crop(image: Image, w: int, h: int):
|
||||
iw, ih = image.size
|
||||
if ih / h < iw / w:
|
||||
sw = w * ih / h
|
||||
box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
|
||||
else:
|
||||
sh = h * iw / w
|
||||
box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
|
||||
return image.resize((w, h), Image.Resampling.LANCZOS, box)
|
||||
|
||||
|
||||
def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
|
||||
iw, ih = image.size
|
||||
err = lambda w, h: 1 - (lambda x: x if x < 1 else 1 / x)(iw / ih / (w / h))
|
||||
wh = max(((w, h) for w in range(mindim, maxdim + 1, 64) for h in range(mindim, maxdim + 1, 64)
|
||||
if minarea <= w * h <= maxarea and err(w, h) <= threshold),
|
||||
key=lambda wh: (wh[0] * wh[1], -err(*wh))[::1 if objective == 'Maximize area' else -1],
|
||||
default=None
|
||||
)
|
||||
return wh and center_crop(image, *wh)
|
||||
|
||||
|
||||
class ScriptPostprocessingAutosizedCrop(scripts_postprocessing.ScriptPostprocessing):
|
||||
name = "Auto-sized crop"
|
||||
order = 4020
|
||||
|
||||
def ui(self):
|
||||
with ui_components.InputAccordion(False, label="Auto-sized crop") as enable:
|
||||
gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
|
||||
with gr.Row():
|
||||
mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim")
|
||||
maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim")
|
||||
with gr.Row():
|
||||
minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea")
|
||||
maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea")
|
||||
with gr.Row():
|
||||
objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective")
|
||||
threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold")
|
||||
|
||||
return {
|
||||
"enable": enable,
|
||||
"mindim": mindim,
|
||||
"maxdim": maxdim,
|
||||
"minarea": minarea,
|
||||
"maxarea": maxarea,
|
||||
"objective": objective,
|
||||
"threshold": threshold,
|
||||
}
|
||||
|
||||
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, mindim, maxdim, minarea, maxarea, objective, threshold):
|
||||
if not enable:
|
||||
return
|
||||
|
||||
cropped = multicrop_pic(pp.image, mindim, maxdim, minarea, maxarea, objective, threshold)
|
||||
if cropped is not None:
|
||||
pp.image = cropped
|
||||
else:
|
||||
print(f"skipped {pp.image.width}x{pp.image.height} image (can't find suitable size within error threshold)")
|
@ -1,30 +0,0 @@
|
||||
from modules import scripts_postprocessing, ui_components, deepbooru, shared
|
||||
import gradio as gr
|
||||
|
||||
|
||||
class ScriptPostprocessingCeption(scripts_postprocessing.ScriptPostprocessing):
|
||||
name = "Caption"
|
||||
order = 4040
|
||||
|
||||
def ui(self):
|
||||
with ui_components.InputAccordion(False, label="Caption") as enable:
|
||||
option = gr.CheckboxGroup(value=["Deepbooru"], choices=["Deepbooru", "BLIP"], show_label=False)
|
||||
|
||||
return {
|
||||
"enable": enable,
|
||||
"option": option,
|
||||
}
|
||||
|
||||
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option):
|
||||
if not enable:
|
||||
return
|
||||
|
||||
captions = [pp.caption]
|
||||
|
||||
if "Deepbooru" in option:
|
||||
captions.append(deepbooru.model.tag(pp.image))
|
||||
|
||||
if "BLIP" in option:
|
||||
captions.append(shared.interrogator.interrogate(pp.image.convert("RGB")))
|
||||
|
||||
pp.caption = ", ".join([x for x in captions if x])
|
@ -1,32 +0,0 @@
|
||||
from PIL import ImageOps, Image
|
||||
|
||||
from modules import scripts_postprocessing, ui_components
|
||||
import gradio as gr
|
||||
|
||||
|
||||
class ScriptPostprocessingCreateFlippedCopies(scripts_postprocessing.ScriptPostprocessing):
|
||||
name = "Create flipped copies"
|
||||
order = 4030
|
||||
|
||||
def ui(self):
|
||||
with ui_components.InputAccordion(False, label="Create flipped copies") as enable:
|
||||
with gr.Row():
|
||||
option = gr.CheckboxGroup(value=["Horizontal"], choices=["Horizontal", "Vertical", "Both"], show_label=False)
|
||||
|
||||
return {
|
||||
"enable": enable,
|
||||
"option": option,
|
||||
}
|
||||
|
||||
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, option):
|
||||
if not enable:
|
||||
return
|
||||
|
||||
if "Horizontal" in option:
|
||||
pp.extra_images.append(ImageOps.mirror(pp.image))
|
||||
|
||||
if "Vertical" in option:
|
||||
pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM))
|
||||
|
||||
if "Both" in option:
|
||||
pp.extra_images.append(pp.image.transpose(Image.Transpose.FLIP_TOP_BOTTOM).transpose(Image.Transpose.FLIP_LEFT_RIGHT))
|
@ -1,54 +0,0 @@
|
||||
|
||||
from modules import scripts_postprocessing, ui_components, errors
|
||||
import gradio as gr
|
||||
|
||||
from modules.textual_inversion import autocrop
|
||||
|
||||
|
||||
class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing):
|
||||
name = "Auto focal point crop"
|
||||
order = 4010
|
||||
|
||||
def ui(self):
|
||||
with ui_components.InputAccordion(False, label="Auto focal point crop") as enable:
|
||||
face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight")
|
||||
entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight")
|
||||
edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight")
|
||||
debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
|
||||
|
||||
return {
|
||||
"enable": enable,
|
||||
"face_weight": face_weight,
|
||||
"entropy_weight": entropy_weight,
|
||||
"edges_weight": edges_weight,
|
||||
"debug": debug,
|
||||
}
|
||||
|
||||
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, face_weight, entropy_weight, edges_weight, debug):
|
||||
if not enable:
|
||||
return
|
||||
|
||||
if not pp.shared.target_width or not pp.shared.target_height:
|
||||
return
|
||||
|
||||
dnn_model_path = None
|
||||
try:
|
||||
dnn_model_path = autocrop.download_and_cache_models()
|
||||
except Exception:
|
||||
errors.report("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", exc_info=True)
|
||||
|
||||
autocrop_settings = autocrop.Settings(
|
||||
crop_width=pp.shared.target_width,
|
||||
crop_height=pp.shared.target_height,
|
||||
face_points_weight=face_weight,
|
||||
entropy_points_weight=entropy_weight,
|
||||
corner_points_weight=edges_weight,
|
||||
annotate_image=debug,
|
||||
dnn_model_path=dnn_model_path,
|
||||
)
|
||||
|
||||
result, *others = autocrop.crop_image(pp.image, autocrop_settings)
|
||||
|
||||
pp.image = result
|
||||
pp.extra_images = [pp.create_copy(x, nametags=["focal-crop-debug"], disable_processing=True) for x in others]
|
||||
|
@ -1,71 +0,0 @@
|
||||
import math
|
||||
|
||||
from modules import scripts_postprocessing, ui_components
|
||||
import gradio as gr
|
||||
|
||||
|
||||
def split_pic(image, inverse_xy, width, height, overlap_ratio):
|
||||
if inverse_xy:
|
||||
from_w, from_h = image.height, image.width
|
||||
to_w, to_h = height, width
|
||||
else:
|
||||
from_w, from_h = image.width, image.height
|
||||
to_w, to_h = width, height
|
||||
h = from_h * to_w // from_w
|
||||
if inverse_xy:
|
||||
image = image.resize((h, to_w))
|
||||
else:
|
||||
image = image.resize((to_w, h))
|
||||
|
||||
split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio)))
|
||||
y_step = (h - to_h) / (split_count - 1)
|
||||
for i in range(split_count):
|
||||
y = int(y_step * i)
|
||||
if inverse_xy:
|
||||
splitted = image.crop((y, 0, y + to_h, to_w))
|
||||
else:
|
||||
splitted = image.crop((0, y, to_w, y + to_h))
|
||||
yield splitted
|
||||
|
||||
|
||||
class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostprocessing):
|
||||
name = "Split oversized images"
|
||||
order = 4000
|
||||
|
||||
def ui(self):
|
||||
with ui_components.InputAccordion(False, label="Split oversized images") as enable:
|
||||
with gr.Row():
|
||||
split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold")
|
||||
overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio")
|
||||
|
||||
return {
|
||||
"enable": enable,
|
||||
"split_threshold": split_threshold,
|
||||
"overlap_ratio": overlap_ratio,
|
||||
}
|
||||
|
||||
def process(self, pp: scripts_postprocessing.PostprocessedImage, enable, split_threshold, overlap_ratio):
|
||||
if not enable:
|
||||
return
|
||||
|
||||
width = pp.shared.target_width
|
||||
height = pp.shared.target_height
|
||||
|
||||
if not width or not height:
|
||||
return
|
||||
|
||||
if pp.image.height > pp.image.width:
|
||||
ratio = (pp.image.width * height) / (pp.image.height * width)
|
||||
inverse_xy = False
|
||||
else:
|
||||
ratio = (pp.image.height * width) / (pp.image.width * height)
|
||||
inverse_xy = True
|
||||
|
||||
if ratio >= 1.0 or ratio > split_threshold:
|
||||
return
|
||||
|
||||
result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio)
|
||||
|
||||
pp.image = result
|
||||
pp.extra_images = [pp.create_copy(x) for x in others]
|
||||
|
@ -1,760 +0,0 @@
|
||||
import numpy as np
|
||||
import gradio as gr
|
||||
import math
|
||||
from modules.ui_components import InputAccordion
|
||||
import modules.scripts as scripts
|
||||
from modules.torch_utils import float64
|
||||
|
||||
|
||||
class SoftInpaintingSettings:
|
||||
def __init__(self,
|
||||
mask_blend_power,
|
||||
mask_blend_scale,
|
||||
inpaint_detail_preservation,
|
||||
composite_mask_influence,
|
||||
composite_difference_threshold,
|
||||
composite_difference_contrast):
|
||||
self.mask_blend_power = mask_blend_power
|
||||
self.mask_blend_scale = mask_blend_scale
|
||||
self.inpaint_detail_preservation = inpaint_detail_preservation
|
||||
self.composite_mask_influence = composite_mask_influence
|
||||
self.composite_difference_threshold = composite_difference_threshold
|
||||
self.composite_difference_contrast = composite_difference_contrast
|
||||
|
||||
def add_generation_params(self, dest):
|
||||
dest[enabled_gen_param_label] = True
|
||||
dest[gen_param_labels.mask_blend_power] = self.mask_blend_power
|
||||
dest[gen_param_labels.mask_blend_scale] = self.mask_blend_scale
|
||||
dest[gen_param_labels.inpaint_detail_preservation] = self.inpaint_detail_preservation
|
||||
dest[gen_param_labels.composite_mask_influence] = self.composite_mask_influence
|
||||
dest[gen_param_labels.composite_difference_threshold] = self.composite_difference_threshold
|
||||
dest[gen_param_labels.composite_difference_contrast] = self.composite_difference_contrast
|
||||
|
||||
|
||||
# ------------------- Methods -------------------
|
||||
|
||||
def processing_uses_inpainting(p):
|
||||
# TODO: Figure out a better way to determine if inpainting is being used by p
|
||||
if getattr(p, "image_mask", None) is not None:
|
||||
return True
|
||||
|
||||
if getattr(p, "mask", None) is not None:
|
||||
return True
|
||||
|
||||
if getattr(p, "nmask", None) is not None:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def latent_blend(settings, a, b, t):
|
||||
"""
|
||||
Interpolates two latent image representations according to the parameter t,
|
||||
where the interpolated vectors' magnitudes are also interpolated separately.
|
||||
The "detail_preservation" factor biases the magnitude interpolation towards
|
||||
the larger of the two magnitudes.
|
||||
"""
|
||||
import torch
|
||||
|
||||
# NOTE: We use inplace operations wherever possible.
|
||||
|
||||
if len(t.shape) == 3:
|
||||
# [4][w][h] to [1][4][w][h]
|
||||
t2 = t.unsqueeze(0)
|
||||
# [4][w][h] to [1][1][w][h] - the [4] seem redundant.
|
||||
t3 = t[0].unsqueeze(0).unsqueeze(0)
|
||||
else:
|
||||
t2 = t
|
||||
t3 = t[:, 0][:, None]
|
||||
|
||||
one_minus_t2 = 1 - t2
|
||||
one_minus_t3 = 1 - t3
|
||||
|
||||
# Linearly interpolate the image vectors.
|
||||
a_scaled = a * one_minus_t2
|
||||
b_scaled = b * t2
|
||||
image_interp = a_scaled
|
||||
image_interp.add_(b_scaled)
|
||||
result_type = image_interp.dtype
|
||||
del a_scaled, b_scaled, t2, one_minus_t2
|
||||
|
||||
# Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.)
|
||||
# 64-bit operations are used here to allow large exponents.
|
||||
current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(float64(image_interp)).add_(0.00001)
|
||||
|
||||
# Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1).
|
||||
a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(float64(a)).pow_(settings.inpaint_detail_preservation) * one_minus_t3
|
||||
b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(float64(b)).pow_(settings.inpaint_detail_preservation) * t3
|
||||
desired_magnitude = a_magnitude
|
||||
desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation)
|
||||
del a_magnitude, b_magnitude, t3, one_minus_t3
|
||||
|
||||
# Change the linearly interpolated image vectors' magnitudes to the value we want.
|
||||
# This is the last 64-bit operation.
|
||||
image_interp_scaling_factor = desired_magnitude
|
||||
image_interp_scaling_factor.div_(current_magnitude)
|
||||
image_interp_scaling_factor = image_interp_scaling_factor.to(result_type)
|
||||
image_interp_scaled = image_interp
|
||||
image_interp_scaled.mul_(image_interp_scaling_factor)
|
||||
del current_magnitude
|
||||
del desired_magnitude
|
||||
del image_interp
|
||||
del image_interp_scaling_factor
|
||||
del result_type
|
||||
|
||||
return image_interp_scaled
|
||||
|
||||
|
||||
def get_modified_nmask(settings, nmask, sigma):
|
||||
"""
|
||||
Converts a negative mask representing the transparency of the original latent vectors being overlaid
|
||||
to a mask that is scaled according to the denoising strength for this step.
|
||||
|
||||
Where:
|
||||
0 = fully opaque, infinite density, fully masked
|
||||
1 = fully transparent, zero density, fully unmasked
|
||||
|
||||
We bring this transparency to a power, as this allows one to simulate N number of blending operations
|
||||
where N can be any positive real value. Using this one can control the balance of influence between
|
||||
the denoiser and the original latents according to the sigma value.
|
||||
|
||||
NOTE: "mask" is not used
|
||||
"""
|
||||
import torch
|
||||
return torch.pow(nmask, (sigma ** settings.mask_blend_power) * settings.mask_blend_scale)
|
||||
|
||||
|
||||
def apply_adaptive_masks(
|
||||
settings: SoftInpaintingSettings,
|
||||
nmask,
|
||||
latent_orig,
|
||||
latent_processed,
|
||||
overlay_images,
|
||||
width, height,
|
||||
paste_to):
|
||||
import torch
|
||||
import modules.processing as proc
|
||||
import modules.images as images
|
||||
from PIL import Image, ImageOps, ImageFilter
|
||||
|
||||
# TODO: Bias the blending according to the latent mask, add adjustable parameter for bias control.
|
||||
if len(nmask.shape) == 3:
|
||||
latent_mask = nmask[0].float()
|
||||
else:
|
||||
latent_mask = nmask[:, 0].float()
|
||||
# convert the original mask into a form we use to scale distances for thresholding
|
||||
mask_scalar = 1 - (torch.clamp(latent_mask, min=0, max=1) ** (settings.mask_blend_scale / 2))
|
||||
mask_scalar = (0.5 * (1 - settings.composite_mask_influence)
|
||||
+ mask_scalar * settings.composite_mask_influence)
|
||||
mask_scalar = mask_scalar / (1.00001 - mask_scalar)
|
||||
mask_scalar = mask_scalar.cpu().numpy()
|
||||
|
||||
latent_distance = torch.norm(latent_processed - latent_orig, p=2, dim=1)
|
||||
|
||||
kernel, kernel_center = get_gaussian_kernel(stddev_radius=1.5, max_radius=2)
|
||||
|
||||
masks_for_overlay = []
|
||||
|
||||
for i, (distance_map, overlay_image) in enumerate(zip(latent_distance, overlay_images)):
|
||||
converted_mask = distance_map.float().cpu().numpy()
|
||||
converted_mask = weighted_histogram_filter(converted_mask, kernel, kernel_center,
|
||||
percentile_min=0.9, percentile_max=1, min_width=1)
|
||||
converted_mask = weighted_histogram_filter(converted_mask, kernel, kernel_center,
|
||||
percentile_min=0.25, percentile_max=0.75, min_width=1)
|
||||
|
||||
# The distance at which opacity of original decreases to 50%
|
||||
if len(mask_scalar.shape) == 3:
|
||||
if mask_scalar.shape[0] > i:
|
||||
half_weighted_distance = settings.composite_difference_threshold * mask_scalar[i]
|
||||
else:
|
||||
half_weighted_distance = settings.composite_difference_threshold * mask_scalar[0]
|
||||
else:
|
||||
half_weighted_distance = settings.composite_difference_threshold * mask_scalar
|
||||
|
||||
converted_mask = converted_mask / half_weighted_distance
|
||||
|
||||
converted_mask = 1 / (1 + converted_mask ** settings.composite_difference_contrast)
|
||||
converted_mask = smootherstep(converted_mask)
|
||||
converted_mask = 1 - converted_mask
|
||||
converted_mask = 255. * converted_mask
|
||||
converted_mask = converted_mask.astype(np.uint8)
|
||||
converted_mask = Image.fromarray(converted_mask)
|
||||
converted_mask = images.resize_image(2, converted_mask, width, height)
|
||||
converted_mask = proc.create_binary_mask(converted_mask, round=False)
|
||||
|
||||
# Remove aliasing artifacts using a gaussian blur.
|
||||
converted_mask = converted_mask.filter(ImageFilter.GaussianBlur(radius=4))
|
||||
|
||||
# Expand the mask to fit the whole image if needed.
|
||||
if paste_to is not None:
|
||||
converted_mask = proc.uncrop(converted_mask,
|
||||
(overlay_image.width, overlay_image.height),
|
||||
paste_to)
|
||||
|
||||
masks_for_overlay.append(converted_mask)
|
||||
|
||||
image_masked = Image.new('RGBa', (overlay_image.width, overlay_image.height))
|
||||
image_masked.paste(overlay_image.convert("RGBA").convert("RGBa"),
|
||||
mask=ImageOps.invert(converted_mask.convert('L')))
|
||||
|
||||
overlay_images[i] = image_masked.convert('RGBA')
|
||||
|
||||
return masks_for_overlay
|
||||
|
||||
|
||||
def apply_masks(
|
||||
settings,
|
||||
nmask,
|
||||
overlay_images,
|
||||
width, height,
|
||||
paste_to):
|
||||
import torch
|
||||
import modules.processing as proc
|
||||
import modules.images as images
|
||||
from PIL import Image, ImageOps, ImageFilter
|
||||
|
||||
converted_mask = nmask[0].float()
|
||||
converted_mask = torch.clamp(converted_mask, min=0, max=1).pow_(settings.mask_blend_scale / 2)
|
||||
converted_mask = 255. * converted_mask
|
||||
converted_mask = converted_mask.cpu().numpy().astype(np.uint8)
|
||||
converted_mask = Image.fromarray(converted_mask)
|
||||
converted_mask = images.resize_image(2, converted_mask, width, height)
|
||||
converted_mask = proc.create_binary_mask(converted_mask, round=False)
|
||||
|
||||
# Remove aliasing artifacts using a gaussian blur.
|
||||
converted_mask = converted_mask.filter(ImageFilter.GaussianBlur(radius=4))
|
||||
|
||||
# Expand the mask to fit the whole image if needed.
|
||||
if paste_to is not None:
|
||||
converted_mask = proc.uncrop(converted_mask,
|
||||
(width, height),
|
||||
paste_to)
|
||||
|
||||
masks_for_overlay = []
|
||||
|
||||
for i, overlay_image in enumerate(overlay_images):
|
||||
masks_for_overlay[i] = converted_mask
|
||||
|
||||
image_masked = Image.new('RGBa', (overlay_image.width, overlay_image.height))
|
||||
image_masked.paste(overlay_image.convert("RGBA").convert("RGBa"),
|
||||
mask=ImageOps.invert(converted_mask.convert('L')))
|
||||
|
||||
overlay_images[i] = image_masked.convert('RGBA')
|
||||
|
||||
return masks_for_overlay
|
||||
|
||||
|
||||
def weighted_histogram_filter(img, kernel, kernel_center, percentile_min=0.0, percentile_max=1.0, min_width=1.0):
|
||||
"""
|
||||
Generalization convolution filter capable of applying
|
||||
weighted mean, median, maximum, and minimum filters
|
||||
parametrically using an arbitrary kernel.
|
||||
|
||||
Args:
|
||||
img (nparray):
|
||||
The image, a 2-D array of floats, to which the filter is being applied.
|
||||
kernel (nparray):
|
||||
The kernel, a 2-D array of floats.
|
||||
kernel_center (nparray):
|
||||
The kernel center coordinate, a 1-D array with two elements.
|
||||
percentile_min (float):
|
||||
The lower bound of the histogram window used by the filter,
|
||||
from 0 to 1.
|
||||
percentile_max (float):
|
||||
The upper bound of the histogram window used by the filter,
|
||||
from 0 to 1.
|
||||
min_width (float):
|
||||
The minimum size of the histogram window bounds, in weight units.
|
||||
Must be greater than 0.
|
||||
|
||||
Returns:
|
||||
(nparray): A filtered copy of the input image "img", a 2-D array of floats.
|
||||
"""
|
||||
|
||||
# Converts an index tuple into a vector.
|
||||
def vec(x):
|
||||
return np.array(x)
|
||||
|
||||
kernel_min = -kernel_center
|
||||
kernel_max = vec(kernel.shape) - kernel_center
|
||||
|
||||
def weighted_histogram_filter_single(idx):
|
||||
idx = vec(idx)
|
||||
min_index = np.maximum(0, idx + kernel_min)
|
||||
max_index = np.minimum(vec(img.shape), idx + kernel_max)
|
||||
window_shape = max_index - min_index
|
||||
|
||||
class WeightedElement:
|
||||
"""
|
||||
An element of the histogram, its weight
|
||||
and bounds.
|
||||
"""
|
||||
|
||||
def __init__(self, value, weight):
|
||||
self.value: float = value
|
||||
self.weight: float = weight
|
||||
self.window_min: float = 0.0
|
||||
self.window_max: float = 1.0
|
||||
|
||||
# Collect the values in the image as WeightedElements,
|
||||
# weighted by their corresponding kernel values.
|
||||
values = []
|
||||
for window_tup in np.ndindex(tuple(window_shape)):
|
||||
window_index = vec(window_tup)
|
||||
image_index = window_index + min_index
|
||||
centered_kernel_index = image_index - idx
|
||||
kernel_index = centered_kernel_index + kernel_center
|
||||
element = WeightedElement(img[tuple(image_index)], kernel[tuple(kernel_index)])
|
||||
values.append(element)
|
||||
|
||||
def sort_key(x: WeightedElement):
|
||||
return x.value
|
||||
|
||||
values.sort(key=sort_key)
|
||||
|
||||
# Calculate the height of the stack (sum)
|
||||
# and each sample's range they occupy in the stack
|
||||
sum = 0
|
||||
for i in range(len(values)):
|
||||
values[i].window_min = sum
|
||||
sum += values[i].weight
|
||||
values[i].window_max = sum
|
||||
|
||||
# Calculate what range of this stack ("window")
|
||||
# we want to get the weighted average across.
|
||||
window_min = sum * percentile_min
|
||||
window_max = sum * percentile_max
|
||||
window_width = window_max - window_min
|
||||
|
||||
# Ensure the window is within the stack and at least a certain size.
|
||||
if window_width < min_width:
|
||||
window_center = (window_min + window_max) / 2
|
||||
window_min = window_center - min_width / 2
|
||||
window_max = window_center + min_width / 2
|
||||
|
||||
if window_max > sum:
|
||||
window_max = sum
|
||||
window_min = sum - min_width
|
||||
|
||||
if window_min < 0:
|
||||
window_min = 0
|
||||
window_max = min_width
|
||||
|
||||
value = 0
|
||||
value_weight = 0
|
||||
|
||||
# Get the weighted average of all the samples
|
||||
# that overlap with the window, weighted
|
||||
# by the size of their overlap.
|
||||
for i in range(len(values)):
|
||||
if window_min >= values[i].window_max:
|
||||
continue
|
||||
if window_max <= values[i].window_min:
|
||||
break
|
||||
|
||||
s = max(window_min, values[i].window_min)
|
||||
e = min(window_max, values[i].window_max)
|
||||
w = e - s
|
||||
|
||||
value += values[i].value * w
|
||||
value_weight += w
|
||||
|
||||
return value / value_weight if value_weight != 0 else 0
|
||||
|
||||
img_out = img.copy()
|
||||
|
||||
# Apply the kernel operation over each pixel.
|
||||
for index in np.ndindex(img.shape):
|
||||
img_out[index] = weighted_histogram_filter_single(index)
|
||||
|
||||
return img_out
|
||||
|
||||
|
||||
def smoothstep(x):
|
||||
"""
|
||||
The smoothstep function, input should be clamped to 0-1 range.
|
||||
Turns a diagonal line (f(x) = x) into a sigmoid-like curve.
|
||||
"""
|
||||
return x * x * (3 - 2 * x)
|
||||
|
||||
|
||||
def smootherstep(x):
|
||||
"""
|
||||
The smootherstep function, input should be clamped to 0-1 range.
|
||||
Turns a diagonal line (f(x) = x) into a sigmoid-like curve.
|
||||
"""
|
||||
return x * x * x * (x * (6 * x - 15) + 10)
|
||||
|
||||
|
||||
def get_gaussian_kernel(stddev_radius=1.0, max_radius=2):
|
||||
"""
|
||||
Creates a Gaussian kernel with thresholded edges.
|
||||
|
||||
Args:
|
||||
stddev_radius (float):
|
||||
Standard deviation of the gaussian kernel, in pixels.
|
||||
max_radius (int):
|
||||
The size of the filter kernel. The number of pixels is (max_radius*2+1) ** 2.
|
||||
The kernel is thresholded so that any values one pixel beyond this radius
|
||||
is weighted at 0.
|
||||
|
||||
Returns:
|
||||
(nparray, nparray): A kernel array (shape: (N, N)), its center coordinate (shape: (2))
|
||||
"""
|
||||
|
||||
# Evaluates a 0-1 normalized gaussian function for a given square distance from the mean.
|
||||
def gaussian(sqr_mag):
|
||||
return math.exp(-sqr_mag / (stddev_radius * stddev_radius))
|
||||
|
||||
# Helper function for converting a tuple to an array.
|
||||
def vec(x):
|
||||
return np.array(x)
|
||||
|
||||
"""
|
||||
Since a gaussian is unbounded, we need to limit ourselves
|
||||
to a finite range.
|
||||
We taper the ends off at the end of that range so they equal zero
|
||||
while preserving the maximum value of 1 at the mean.
|
||||
"""
|
||||
zero_radius = max_radius + 1.0
|
||||
gauss_zero = gaussian(zero_radius * zero_radius)
|
||||
gauss_kernel_scale = 1 / (1 - gauss_zero)
|
||||
|
||||
def gaussian_kernel_func(coordinate):
|
||||
x = coordinate[0] ** 2.0 + coordinate[1] ** 2.0
|
||||
x = gaussian(x)
|
||||
x -= gauss_zero
|
||||
x *= gauss_kernel_scale
|
||||
x = max(0.0, x)
|
||||
return x
|
||||
|
||||
size = max_radius * 2 + 1
|
||||
kernel_center = max_radius
|
||||
kernel = np.zeros((size, size))
|
||||
|
||||
for index in np.ndindex(kernel.shape):
|
||||
kernel[index] = gaussian_kernel_func(vec(index) - kernel_center)
|
||||
|
||||
return kernel, kernel_center
|
||||
|
||||
|
||||
# ------------------- Constants -------------------
|
||||
|
||||
|
||||
default = SoftInpaintingSettings(1, 0.5, 4, 0, 0.5, 2)
|
||||
|
||||
enabled_ui_label = "Soft inpainting"
|
||||
enabled_gen_param_label = "Soft inpainting enabled"
|
||||
enabled_el_id = "soft_inpainting_enabled"
|
||||
|
||||
ui_labels = SoftInpaintingSettings(
|
||||
"Schedule bias",
|
||||
"Preservation strength",
|
||||
"Transition contrast boost",
|
||||
"Mask influence",
|
||||
"Difference threshold",
|
||||
"Difference contrast")
|
||||
|
||||
ui_info = SoftInpaintingSettings(
|
||||
"Shifts when preservation of original content occurs during denoising.",
|
||||
"How strongly partially masked content should be preserved.",
|
||||
"Amplifies the contrast that may be lost in partially masked regions.",
|
||||
"How strongly the original mask should bias the difference threshold.",
|
||||
"How much an image region can change before the original pixels are not blended in anymore.",
|
||||
"How sharp the transition should be between blended and not blended.")
|
||||
|
||||
gen_param_labels = SoftInpaintingSettings(
|
||||
"Soft inpainting schedule bias",
|
||||
"Soft inpainting preservation strength",
|
||||
"Soft inpainting transition contrast boost",
|
||||
"Soft inpainting mask influence",
|
||||
"Soft inpainting difference threshold",
|
||||
"Soft inpainting difference contrast")
|
||||
|
||||
el_ids = SoftInpaintingSettings(
|
||||
"mask_blend_power",
|
||||
"mask_blend_scale",
|
||||
"inpaint_detail_preservation",
|
||||
"composite_mask_influence",
|
||||
"composite_difference_threshold",
|
||||
"composite_difference_contrast")
|
||||
|
||||
|
||||
# ------------------- Script -------------------
|
||||
|
||||
|
||||
class Script(scripts.Script):
|
||||
def __init__(self):
|
||||
self.section = "inpaint"
|
||||
self.masks_for_overlay = None
|
||||
self.overlay_images = None
|
||||
|
||||
def title(self):
|
||||
return "Soft Inpainting"
|
||||
|
||||
def show(self, is_img2img):
|
||||
return scripts.AlwaysVisible if is_img2img else False
|
||||
|
||||
def ui(self, is_img2img):
|
||||
if not is_img2img:
|
||||
return
|
||||
|
||||
with InputAccordion(False, label=enabled_ui_label, elem_id=enabled_el_id) as soft_inpainting_enabled:
|
||||
with gr.Group():
|
||||
gr.Markdown(
|
||||
"""
|
||||
Soft inpainting allows you to **seamlessly blend original content with inpainted content** according to the mask opacity.
|
||||
**High _Mask blur_** values are recommended!
|
||||
""")
|
||||
|
||||
power = \
|
||||
gr.Slider(label=ui_labels.mask_blend_power,
|
||||
info=ui_info.mask_blend_power,
|
||||
minimum=0,
|
||||
maximum=8,
|
||||
step=0.1,
|
||||
value=default.mask_blend_power,
|
||||
elem_id=el_ids.mask_blend_power)
|
||||
scale = \
|
||||
gr.Slider(label=ui_labels.mask_blend_scale,
|
||||
info=ui_info.mask_blend_scale,
|
||||
minimum=0,
|
||||
maximum=8,
|
||||
step=0.05,
|
||||
value=default.mask_blend_scale,
|
||||
elem_id=el_ids.mask_blend_scale)
|
||||
detail = \
|
||||
gr.Slider(label=ui_labels.inpaint_detail_preservation,
|
||||
info=ui_info.inpaint_detail_preservation,
|
||||
minimum=1,
|
||||
maximum=32,
|
||||
step=0.5,
|
||||
value=default.inpaint_detail_preservation,
|
||||
elem_id=el_ids.inpaint_detail_preservation)
|
||||
|
||||
gr.Markdown(
|
||||
"""
|
||||
### Pixel Composite Settings
|
||||
""")
|
||||
|
||||
mask_inf = \
|
||||
gr.Slider(label=ui_labels.composite_mask_influence,
|
||||
info=ui_info.composite_mask_influence,
|
||||
minimum=0,
|
||||
maximum=1,
|
||||
step=0.05,
|
||||
value=default.composite_mask_influence,
|
||||
elem_id=el_ids.composite_mask_influence)
|
||||
|
||||
dif_thresh = \
|
||||
gr.Slider(label=ui_labels.composite_difference_threshold,
|
||||
info=ui_info.composite_difference_threshold,
|
||||
minimum=0,
|
||||
maximum=8,
|
||||
step=0.25,
|
||||
value=default.composite_difference_threshold,
|
||||
elem_id=el_ids.composite_difference_threshold)
|
||||
|
||||
dif_contr = \
|
||||
gr.Slider(label=ui_labels.composite_difference_contrast,
|
||||
info=ui_info.composite_difference_contrast,
|
||||
minimum=0,
|
||||
maximum=8,
|
||||
step=0.25,
|
||||
value=default.composite_difference_contrast,
|
||||
elem_id=el_ids.composite_difference_contrast)
|
||||
|
||||
with gr.Accordion("Help", open=False):
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.mask_blend_power}
|
||||
|
||||
The blending strength of original content is scaled proportionally with the decreasing noise level values at each step (sigmas).
|
||||
This ensures that the influence of the denoiser and original content preservation is roughly balanced at each step.
|
||||
This balance can be shifted using this parameter, controlling whether earlier or later steps have stronger preservation.
|
||||
|
||||
- **Below 1**: Stronger preservation near the end (with low sigma)
|
||||
- **1**: Balanced (proportional to sigma)
|
||||
- **Above 1**: Stronger preservation in the beginning (with high sigma)
|
||||
""")
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.mask_blend_scale}
|
||||
|
||||
Skews whether partially masked image regions should be more likely to preserve the original content or favor inpainted content.
|
||||
This may need to be adjusted depending on the {ui_labels.mask_blend_power}, CFG Scale, prompt and Denoising strength.
|
||||
|
||||
- **Low values**: Favors generated content.
|
||||
- **High values**: Favors original content.
|
||||
""")
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.inpaint_detail_preservation}
|
||||
|
||||
This parameter controls how the original latent vectors and denoised latent vectors are interpolated.
|
||||
With higher values, the magnitude of the resulting blended vector will be closer to the maximum of the two interpolated vectors.
|
||||
This can prevent the loss of contrast that occurs with linear interpolation.
|
||||
|
||||
- **Low values**: Softer blending, details may fade.
|
||||
- **High values**: Stronger contrast, may over-saturate colors.
|
||||
""")
|
||||
|
||||
gr.Markdown(
|
||||
"""
|
||||
## Pixel Composite Settings
|
||||
|
||||
Masks are generated based on how much a part of the image changed after denoising.
|
||||
These masks are used to blend the original and final images together.
|
||||
If the difference is low, the original pixels are used instead of the pixels returned by the inpainting process.
|
||||
""")
|
||||
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.composite_mask_influence}
|
||||
|
||||
This parameter controls how much the mask should bias this sensitivity to difference.
|
||||
|
||||
- **0**: Ignore the mask, only consider differences in image content.
|
||||
- **1**: Follow the mask closely despite image content changes.
|
||||
""")
|
||||
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.composite_difference_threshold}
|
||||
|
||||
This value represents the difference at which the original pixels will have less than 50% opacity.
|
||||
|
||||
- **Low values**: Two images patches must be almost the same in order to retain original pixels.
|
||||
- **High values**: Two images patches can be very different and still retain original pixels.
|
||||
""")
|
||||
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.composite_difference_contrast}
|
||||
|
||||
This value represents the contrast between the opacity of the original and inpainted content.
|
||||
|
||||
- **Low values**: The blend will be more gradual and have longer transitions, but may cause ghosting.
|
||||
- **High values**: Ghosting will be less common, but transitions may be very sudden.
|
||||
""")
|
||||
|
||||
self.infotext_fields = [(soft_inpainting_enabled, enabled_gen_param_label),
|
||||
(power, gen_param_labels.mask_blend_power),
|
||||
(scale, gen_param_labels.mask_blend_scale),
|
||||
(detail, gen_param_labels.inpaint_detail_preservation),
|
||||
(mask_inf, gen_param_labels.composite_mask_influence),
|
||||
(dif_thresh, gen_param_labels.composite_difference_threshold),
|
||||
(dif_contr, gen_param_labels.composite_difference_contrast)]
|
||||
|
||||
self.paste_field_names = []
|
||||
for _, field_name in self.infotext_fields:
|
||||
self.paste_field_names.append(field_name)
|
||||
|
||||
return [soft_inpainting_enabled,
|
||||
power,
|
||||
scale,
|
||||
detail,
|
||||
mask_inf,
|
||||
dif_thresh,
|
||||
dif_contr]
|
||||
|
||||
def process(self, p, enabled, power, scale, detail_preservation, mask_inf, dif_thresh, dif_contr):
|
||||
if not enabled:
|
||||
return
|
||||
|
||||
if not processing_uses_inpainting(p):
|
||||
return
|
||||
|
||||
# Shut off the rounding it normally does.
|
||||
p.mask_round = False
|
||||
|
||||
settings = SoftInpaintingSettings(power, scale, detail_preservation, mask_inf, dif_thresh, dif_contr)
|
||||
|
||||
# p.extra_generation_params["Mask rounding"] = False
|
||||
settings.add_generation_params(p.extra_generation_params)
|
||||
|
||||
def on_mask_blend(self, p, mba: scripts.MaskBlendArgs, enabled, power, scale, detail_preservation, mask_inf,
|
||||
dif_thresh, dif_contr):
|
||||
if not enabled:
|
||||
return
|
||||
|
||||
if not processing_uses_inpainting(p):
|
||||
return
|
||||
|
||||
if mba.is_final_blend:
|
||||
mba.blended_latent = mba.current_latent
|
||||
return
|
||||
|
||||
settings = SoftInpaintingSettings(power, scale, detail_preservation, mask_inf, dif_thresh, dif_contr)
|
||||
|
||||
# todo: Why is sigma 2D? Both values are the same.
|
||||
mba.blended_latent = latent_blend(settings,
|
||||
mba.init_latent,
|
||||
mba.current_latent,
|
||||
get_modified_nmask(settings, mba.nmask, mba.sigma[0]))
|
||||
|
||||
def post_sample(self, p, ps: scripts.PostSampleArgs, enabled, power, scale, detail_preservation, mask_inf,
|
||||
dif_thresh, dif_contr):
|
||||
if not enabled:
|
||||
return
|
||||
|
||||
if not processing_uses_inpainting(p):
|
||||
return
|
||||
|
||||
nmask = getattr(p, "nmask", None)
|
||||
if nmask is None:
|
||||
return
|
||||
|
||||
from modules import images
|
||||
from modules.shared import opts
|
||||
|
||||
settings = SoftInpaintingSettings(power, scale, detail_preservation, mask_inf, dif_thresh, dif_contr)
|
||||
|
||||
# since the original code puts holes in the existing overlay images,
|
||||
# we have to rebuild them.
|
||||
self.overlay_images = []
|
||||
for img in p.init_images:
|
||||
|
||||
image = images.flatten(img, opts.img2img_background_color)
|
||||
|
||||
if p.paste_to is None and p.resize_mode != 3:
|
||||
image = images.resize_image(p.resize_mode, image, p.width, p.height)
|
||||
|
||||
self.overlay_images.append(image.convert('RGBA'))
|
||||
|
||||
if len(p.init_images) == 1:
|
||||
self.overlay_images = self.overlay_images * p.batch_size
|
||||
|
||||
if getattr(ps.samples, 'already_decoded', False):
|
||||
self.masks_for_overlay = apply_masks(settings=settings,
|
||||
nmask=nmask,
|
||||
overlay_images=self.overlay_images,
|
||||
width=p.width,
|
||||
height=p.height,
|
||||
paste_to=p.paste_to)
|
||||
else:
|
||||
self.masks_for_overlay = apply_adaptive_masks(settings=settings,
|
||||
nmask=nmask,
|
||||
latent_orig=p.init_latent,
|
||||
latent_processed=ps.samples,
|
||||
overlay_images=self.overlay_images,
|
||||
width=p.width,
|
||||
height=p.height,
|
||||
paste_to=p.paste_to)
|
||||
|
||||
def postprocess_maskoverlay(self, p, ppmo: scripts.PostProcessMaskOverlayArgs, enabled, power, scale,
|
||||
detail_preservation, mask_inf, dif_thresh, dif_contr):
|
||||
if not enabled:
|
||||
return
|
||||
|
||||
if not processing_uses_inpainting(p):
|
||||
return
|
||||
|
||||
if self.masks_for_overlay is None:
|
||||
return
|
||||
|
||||
if self.overlay_images is None:
|
||||
return
|
||||
|
||||
ppmo.mask_for_overlay = self.masks_for_overlay[ppmo.index]
|
||||
ppmo.overlay_image = self.overlay_images[ppmo.index]
|
@ -1,9 +1,14 @@
|
||||
<div class="card" style="{style}" onclick="{card_clicked}" data-name="{name}" {sort_keys}>
|
||||
<div class='card' style={style} onclick={card_clicked}>
|
||||
{background_image}
|
||||
<div class="button-row">{copy_path_button}{metadata_button}{edit_button}</div>
|
||||
<div class="actions">
|
||||
<div class="additional">{search_terms}</div>
|
||||
<span class="name">{name}</span>
|
||||
<span class="description">{description}</span>
|
||||
{metadata_button}
|
||||
<div class='actions'>
|
||||
<div class='additional'>
|
||||
<ul>
|
||||
<a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a>
|
||||
</ul>
|
||||
<span style="display:none" class='search_term{search_only}'>{search_term}</span>
|
||||
</div>
|
||||
<span class='name'>{name}</span>
|
||||
<span class='description'>{description}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -1,5 +0,0 @@
|
||||
<div class="copy-path-button card-button"
|
||||
title="Copy path to clipboard"
|
||||
onclick="extraNetworksCopyCardPath(event)"
|
||||
data-clipboard-text="{filename}">
|
||||
</div>
|
@ -1,4 +0,0 @@
|
||||
<div class="edit-button card-button"
|
||||
title="Edit metadata"
|
||||
onclick="extraNetworksEditUserMetadata(event, '{tabname}', '{extra_networks_tabname}')">
|
||||
</div>
|
@ -1,4 +0,0 @@
|
||||
<div class="metadata-button card-button"
|
||||
title="Show internal metadata"
|
||||
onclick="extraNetworksRequestMetadata(event, '{extra_networks_tabname}')">
|
||||
</div>
|
@ -1,8 +0,0 @@
|
||||
<div class="extra-network-pane-content-dirs">
|
||||
<div id='{tabname}_{extra_networks_tabname}_dirs' class='extra-network-dirs'>
|
||||
{dirs_html}
|
||||
</div>
|
||||
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards'>
|
||||
{items_html}
|
||||
</div>
|
||||
</div>
|
@ -1,8 +0,0 @@
|
||||
<div class="extra-network-pane-content-tree resize-handle-row">
|
||||
<div id='{tabname}_{extra_networks_tabname}_tree' class='extra-network-tree' style='flex-basis: {extra_networks_tree_view_default_width}px'>
|
||||
{tree_html}
|
||||
</div>
|
||||
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards' style='flex-grow: 1;'>
|
||||
{items_html}
|
||||
</div>
|
||||
</div>
|
@ -1,81 +0,0 @@
|
||||
<div id='{tabname}_{extra_networks_tabname}_pane' class='extra-network-pane {tree_view_div_default_display_class}'>
|
||||
<div class="extra-network-control" id="{tabname}_{extra_networks_tabname}_controls" style="display:none" >
|
||||
<div class="extra-network-control--search">
|
||||
<input
|
||||
id="{tabname}_{extra_networks_tabname}_extra_search"
|
||||
class="extra-network-control--search-text"
|
||||
type="search"
|
||||
placeholder="Search"
|
||||
>
|
||||
</div>
|
||||
|
||||
<small>Sort: </small>
|
||||
<div
|
||||
id="{tabname}_{extra_networks_tabname}_extra_sort_path"
|
||||
class="extra-network-control--sort{sort_path_active}"
|
||||
data-sortkey="default"
|
||||
title="Sort by path"
|
||||
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
|
||||
>
|
||||
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
|
||||
</div>
|
||||
<div
|
||||
id="{tabname}_{extra_networks_tabname}_extra_sort_name"
|
||||
class="extra-network-control--sort{sort_name_active}"
|
||||
data-sortkey="name"
|
||||
title="Sort by name"
|
||||
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
|
||||
>
|
||||
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
|
||||
</div>
|
||||
<div
|
||||
id="{tabname}_{extra_networks_tabname}_extra_sort_date_created"
|
||||
class="extra-network-control--sort{sort_date_created_active}"
|
||||
data-sortkey="date_created"
|
||||
title="Sort by date created"
|
||||
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
|
||||
>
|
||||
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
|
||||
</div>
|
||||
<div
|
||||
id="{tabname}_{extra_networks_tabname}_extra_sort_date_modified"
|
||||
class="extra-network-control--sort{sort_date_modified_active}"
|
||||
data-sortkey="date_modified"
|
||||
title="Sort by date modified"
|
||||
onclick="extraNetworksControlSortOnClick(event, '{tabname}', '{extra_networks_tabname}');"
|
||||
>
|
||||
<i class="extra-network-control--icon extra-network-control--sort-icon"></i>
|
||||
</div>
|
||||
|
||||
<small> </small>
|
||||
<div
|
||||
id="{tabname}_{extra_networks_tabname}_extra_sort_dir"
|
||||
class="extra-network-control--sort-dir"
|
||||
data-sortdir="{data_sortdir}"
|
||||
title="Sort ascending"
|
||||
onclick="extraNetworksControlSortDirOnClick(event, '{tabname}', '{extra_networks_tabname}');"
|
||||
>
|
||||
<i class="extra-network-control--icon extra-network-control--sort-dir-icon"></i>
|
||||
</div>
|
||||
|
||||
|
||||
<small> </small>
|
||||
<div
|
||||
id="{tabname}_{extra_networks_tabname}_extra_tree_view"
|
||||
class="extra-network-control--tree-view {tree_view_btn_extra_class}"
|
||||
title="Enable Tree View"
|
||||
onclick="extraNetworksControlTreeViewOnClick(event, '{tabname}', '{extra_networks_tabname}');"
|
||||
>
|
||||
<i class="extra-network-control--icon extra-network-control--tree-view-icon"></i>
|
||||
</div>
|
||||
<div
|
||||
id="{tabname}_{extra_networks_tabname}_extra_refresh"
|
||||
class="extra-network-control--refresh"
|
||||
title="Refresh page"
|
||||
onclick="extraNetworksControlRefreshOnClick(event, '{tabname}', '{extra_networks_tabname}');"
|
||||
>
|
||||
<i class="extra-network-control--icon extra-network-control--refresh-icon"></i>
|
||||
</div>
|
||||
</div>
|
||||
{pane_content}
|
||||
</div>
|
@ -1,23 +0,0 @@
|
||||
<span data-filterable-item-text hidden>{search_terms}</span>
|
||||
<div class="tree-list-content {subclass}"
|
||||
type="button"
|
||||
onclick="extraNetworksTreeOnClick(event, '{tabname}', '{extra_networks_tabname}');{onclick_extra}"
|
||||
data-path="{data_path}"
|
||||
data-hash="{data_hash}"
|
||||
>
|
||||
<span class='tree-list-item-action tree-list-item-action--leading'>
|
||||
{action_list_item_action_leading}
|
||||
</span>
|
||||
<span class="tree-list-item-visual tree-list-item-visual--leading">
|
||||
{action_list_item_visual_leading}
|
||||
</span>
|
||||
<span class="tree-list-item-label tree-list-item-label--truncate">
|
||||
{action_list_item_label}
|
||||
</span>
|
||||
<span class="tree-list-item-visual tree-list-item-visual--trailing">
|
||||
{action_list_item_visual_trailing}
|
||||
</span>
|
||||
<span class="tree-list-item-action tree-list-item-action--trailing">
|
||||
{action_list_item_action_trailing}
|
||||
</span>
|
||||
</div>
|
@ -1,12 +1,10 @@
|
||||
<div>
|
||||
<a href="{api_docs}">API</a>
|
||||
<a href="/docs">API</a>
|
||||
•
|
||||
<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">Github</a>
|
||||
•
|
||||
<a href="https://gradio.app">Gradio</a>
|
||||
•
|
||||
<a href="#" onclick="showProfile('./internal/profile-startup'); return false;">Startup profile</a>
|
||||
•
|
||||
<a href="/" onclick="javascript:gradioApp().getElementById('settings_restart_gradio').click(); return false">Reload UI</a>
|
||||
</div>
|
||||
<br />
|
||||
|
7
html/image-update.svg
Normal file
7
html/image-update.svg
Normal file
@ -0,0 +1,7 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
||||
<filter id='shadow' color-interpolation-filters="sRGB">
|
||||
<feDropShadow flood-color="black" dx="0" dy="0" flood-opacity="0.9" stdDeviation="0.5"/>
|
||||
<feDropShadow flood-color="black" dx="0" dy="0" flood-opacity="0.9" stdDeviation="0.5"/>
|
||||
</filter>
|
||||
<path style="filter:url(#shadow);" fill="#FFFFFF" d="M13.18 19C13.35 19.72 13.64 20.39 14.03 21H5C3.9 21 3 20.11 3 19V5C3 3.9 3.9 3 5 3H19C20.11 3 21 3.9 21 5V11.18C20.5 11.07 20 11 19.5 11C19.33 11 19.17 11 19 11.03V5H5V19H13.18M11.21 15.83L9.25 13.47L6.5 17H13.03C13.14 15.54 13.73 14.22 14.64 13.19L13.96 12.29L11.21 15.83M19 13.5V12L16.75 14.25L19 16.5V15C20.38 15 21.5 16.12 21.5 17.5C21.5 17.9 21.41 18.28 21.24 18.62L22.33 19.71C22.75 19.08 23 18.32 23 17.5C23 15.29 21.21 13.5 19 13.5M19 20C17.62 20 16.5 18.88 16.5 17.5C16.5 17.1 16.59 16.72 16.76 16.38L15.67 15.29C15.25 15.92 15 16.68 15 17.5C15 19.71 16.79 21.5 19 21.5V23L21.25 20.75L19 18.5V20Z" />
|
||||
</svg>
|
After Width: | Height: | Size: 989 B |
@ -4,6 +4,107 @@
|
||||
#licenses pre { margin: 1em 0 2em 0;}
|
||||
</style>
|
||||
|
||||
<h2><a href="https://github.com/sczhou/CodeFormer/blob/master/LICENSE">CodeFormer</a></h2>
|
||||
<small>Parts of CodeFormer code had to be copied to be compatible with GFPGAN.</small>
|
||||
<pre>
|
||||
S-Lab License 1.0
|
||||
|
||||
Copyright 2022 S-Lab
|
||||
|
||||
Redistribution and use for non-commercial purpose in source and
|
||||
binary forms, with or without modification, are permitted provided
|
||||
that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
In the event that redistribution and/or use for commercial purpose in
|
||||
source or binary forms, with or without modification is required,
|
||||
please contact the contributor(s) of the work.
|
||||
</pre>
|
||||
|
||||
|
||||
<h2><a href="https://github.com/victorca25/iNNfer/blob/main/LICENSE">ESRGAN</a></h2>
|
||||
<small>Code for architecture and reading models copied.</small>
|
||||
<pre>
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2021 victorca25
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
</pre>
|
||||
|
||||
<h2><a href="https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE">Real-ESRGAN</a></h2>
|
||||
<small>Some code is copied to support ESRGAN models.</small>
|
||||
<pre>
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2021, Xintao Wang
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
</pre>
|
||||
|
||||
<h2><a href="https://github.com/invoke-ai/InvokeAI/blob/main/LICENSE">InvokeAI</a></h2>
|
||||
<small>Some code for compatibility with OSX is taken from lstein's repository.</small>
|
||||
<pre>
|
||||
@ -82,6 +183,213 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
</pre>
|
||||
|
||||
<h2><a href="https://github.com/JingyunLiang/SwinIR/blob/main/LICENSE">SwinIR</a></h2>
|
||||
<small>Code added by contributors, most likely copied from this repository.</small>
|
||||
|
||||
<pre>
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [2021] [SwinIR Authors]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
</pre>
|
||||
|
||||
<h2><a href="https://github.com/AminRezaei0x443/memory-efficient-attention/blob/main/LICENSE">Memory Efficient Attention</a></h2>
|
||||
<small>The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that.</small>
|
||||
<pre>
|
||||
@ -379,4 +687,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
</pre>
|
||||
</pre>
|
@ -50,17 +50,17 @@ function dimensionChange(e, is_width, is_height) {
|
||||
var scaledx = targetElement.naturalWidth * viewportscale;
|
||||
var scaledy = targetElement.naturalHeight * viewportscale;
|
||||
|
||||
var clientRectTop = (viewportOffset.top + window.scrollY);
|
||||
var clientRectLeft = (viewportOffset.left + window.scrollX);
|
||||
var clientRectCentreY = clientRectTop + (targetElement.clientHeight / 2);
|
||||
var clientRectCentreX = clientRectLeft + (targetElement.clientWidth / 2);
|
||||
var cleintRectTop = (viewportOffset.top + window.scrollY);
|
||||
var cleintRectLeft = (viewportOffset.left + window.scrollX);
|
||||
var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight / 2);
|
||||
var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth / 2);
|
||||
|
||||
var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight);
|
||||
var arscaledx = currentWidth * arscale;
|
||||
var arscaledy = currentHeight * arscale;
|
||||
|
||||
var arRectTop = clientRectCentreY - (arscaledy / 2);
|
||||
var arRectLeft = clientRectCentreX - (arscaledx / 2);
|
||||
var arRectTop = cleintRectCentreY - (arscaledy / 2);
|
||||
var arRectLeft = cleintRectCentreX - (arscaledx / 2);
|
||||
var arRectWidth = arscaledx;
|
||||
var arRectHeight = arscaledy;
|
||||
|
||||
@ -81,7 +81,7 @@ function dimensionChange(e, is_width, is_height) {
|
||||
}
|
||||
|
||||
|
||||
onAfterUiUpdate(function() {
|
||||
onUiUpdate(function() {
|
||||
var arPreviewRect = gradioApp().querySelector('#imageARPreview');
|
||||
if (arPreviewRect) {
|
||||
arPreviewRect.style.display = 'none';
|
||||
|
@ -8,6 +8,9 @@ var contextMenuInit = function() {
|
||||
};
|
||||
|
||||
function showContextMenu(event, element, menuEntries) {
|
||||
let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
|
||||
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;
|
||||
|
||||
let oldMenu = gradioApp().querySelector('#context-menu');
|
||||
if (oldMenu) {
|
||||
oldMenu.remove();
|
||||
@ -20,8 +23,10 @@ var contextMenuInit = function() {
|
||||
contextMenu.style.background = baseStyle.background;
|
||||
contextMenu.style.color = baseStyle.color;
|
||||
contextMenu.style.fontFamily = baseStyle.fontFamily;
|
||||
contextMenu.style.top = event.pageY + 'px';
|
||||
contextMenu.style.left = event.pageX + 'px';
|
||||
contextMenu.style.top = posy + 'px';
|
||||
contextMenu.style.left = posx + 'px';
|
||||
|
||||
|
||||
|
||||
const contextMenuList = document.createElement('ul');
|
||||
contextMenuList.className = 'context-menu-items';
|
||||
@ -38,6 +43,21 @@ var contextMenuInit = function() {
|
||||
});
|
||||
|
||||
gradioApp().appendChild(contextMenu);
|
||||
|
||||
let menuWidth = contextMenu.offsetWidth + 4;
|
||||
let menuHeight = contextMenu.offsetHeight + 4;
|
||||
|
||||
let windowWidth = window.innerWidth;
|
||||
let windowHeight = window.innerHeight;
|
||||
|
||||
if ((windowWidth - posx) < menuWidth) {
|
||||
contextMenu.style.left = windowWidth - menuWidth + "px";
|
||||
}
|
||||
|
||||
if ((windowHeight - posy) < menuHeight) {
|
||||
contextMenu.style.top = windowHeight - menuHeight + "px";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
function appendContextMenuOption(targetElementSelector, entryName, entryFunction) {
|
||||
@ -87,23 +107,16 @@ var contextMenuInit = function() {
|
||||
oldMenu.remove();
|
||||
}
|
||||
});
|
||||
['contextmenu', 'touchstart'].forEach((eventType) => {
|
||||
gradioApp().addEventListener(eventType, function(e) {
|
||||
let ev = e;
|
||||
if (eventType.startsWith('touch')) {
|
||||
if (e.touches.length !== 2) return;
|
||||
ev = e.touches[0];
|
||||
gradioApp().addEventListener("contextmenu", function(e) {
|
||||
let oldMenu = gradioApp().querySelector('#context-menu');
|
||||
if (oldMenu) {
|
||||
oldMenu.remove();
|
||||
}
|
||||
menuSpecs.forEach(function(v, k) {
|
||||
if (e.composedPath()[0].matches(k)) {
|
||||
showContextMenu(e, e.composedPath()[0], v);
|
||||
e.preventDefault();
|
||||
}
|
||||
let oldMenu = gradioApp().querySelector('#context-menu');
|
||||
if (oldMenu) {
|
||||
oldMenu.remove();
|
||||
}
|
||||
menuSpecs.forEach(function(v, k) {
|
||||
if (e.composedPath()[0].matches(k)) {
|
||||
showContextMenu(ev, e.composedPath()[0], v);
|
||||
e.preventDefault();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
eventListenerApplied = true;
|
||||
@ -135,18 +148,12 @@ var addContextMenuEventListener = initResponse[2];
|
||||
500);
|
||||
};
|
||||
|
||||
let generateOnRepeat_txt2img = function() {
|
||||
appendContextMenuOption('#txt2img_generate', 'Generate forever', function() {
|
||||
generateOnRepeat('#txt2img_generate', '#txt2img_interrupt');
|
||||
};
|
||||
|
||||
let generateOnRepeat_img2img = function() {
|
||||
});
|
||||
appendContextMenuOption('#img2img_generate', 'Generate forever', function() {
|
||||
generateOnRepeat('#img2img_generate', '#img2img_interrupt');
|
||||
};
|
||||
|
||||
appendContextMenuOption('#txt2img_generate', 'Generate forever', generateOnRepeat_txt2img);
|
||||
appendContextMenuOption('#txt2img_interrupt', 'Generate forever', generateOnRepeat_txt2img);
|
||||
appendContextMenuOption('#img2img_generate', 'Generate forever', generateOnRepeat_img2img);
|
||||
appendContextMenuOption('#img2img_interrupt', 'Generate forever', generateOnRepeat_img2img);
|
||||
});
|
||||
|
||||
let cancelGenerateForever = function() {
|
||||
clearInterval(window.generateOnRepeatInterval);
|
||||
@ -160,4 +167,6 @@ var addContextMenuEventListener = initResponse[2];
|
||||
})();
|
||||
//End example Context Menu Items
|
||||
|
||||
onAfterUiUpdate(addContextMenuEventListener);
|
||||
onUiUpdate(function() {
|
||||
addContextMenuEventListener();
|
||||
});
|
||||
|
85
javascript/dragdrop.js
vendored
85
javascript/dragdrop.js
vendored
@ -48,85 +48,30 @@ function dropReplaceImage(imgWrap, files) {
|
||||
}
|
||||
}
|
||||
|
||||
function eventHasFiles(e) {
|
||||
if (!e.dataTransfer || !e.dataTransfer.files) return false;
|
||||
if (e.dataTransfer.files.length > 0) return true;
|
||||
if (e.dataTransfer.items.length > 0 && e.dataTransfer.items[0].kind == "file") return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function isURL(url) {
|
||||
try {
|
||||
const _ = new URL(url);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function dragDropTargetIsPrompt(target) {
|
||||
if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true;
|
||||
if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
window.document.addEventListener('dragover', e => {
|
||||
const target = e.composedPath()[0];
|
||||
if (!eventHasFiles(e)) return;
|
||||
|
||||
var targetImage = target.closest('[data-testid="image"]');
|
||||
if (!dragDropTargetIsPrompt(target) && !targetImage) return;
|
||||
|
||||
const imgWrap = target.closest('[data-testid="image"]');
|
||||
if (!imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1) {
|
||||
return;
|
||||
}
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
e.dataTransfer.dropEffect = 'copy';
|
||||
});
|
||||
|
||||
window.document.addEventListener('drop', async e => {
|
||||
window.document.addEventListener('drop', e => {
|
||||
const target = e.composedPath()[0];
|
||||
const url = e.dataTransfer.getData('text/uri-list') || e.dataTransfer.getData('text/plain');
|
||||
if (!eventHasFiles(e) && !isURL(url)) return;
|
||||
|
||||
if (dragDropTargetIsPrompt(target)) {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
|
||||
const isImg2img = get_tab_index('tabs') == 1;
|
||||
let prompt_image_target = isImg2img ? "img2img_prompt_image" : "txt2img_prompt_image";
|
||||
|
||||
const imgParent = gradioApp().getElementById(prompt_image_target);
|
||||
const files = e.dataTransfer.files;
|
||||
const fileInput = imgParent.querySelector('input[type="file"]');
|
||||
if (eventHasFiles(e) && fileInput) {
|
||||
fileInput.files = files;
|
||||
fileInput.dispatchEvent(new Event('change'));
|
||||
} else if (url) {
|
||||
try {
|
||||
const request = await fetch(url);
|
||||
if (!request.ok) {
|
||||
console.error('Error fetching URL:', url, request.status);
|
||||
return;
|
||||
}
|
||||
const data = new DataTransfer();
|
||||
data.items.add(new File([await request.blob()], 'image.png'));
|
||||
fileInput.files = data.files;
|
||||
fileInput.dispatchEvent(new Event('change'));
|
||||
} catch (error) {
|
||||
console.error('Error fetching URL:', url, error);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var targetImage = target.closest('[data-testid="image"]');
|
||||
if (targetImage) {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
const files = e.dataTransfer.files;
|
||||
dropReplaceImage(targetImage, files);
|
||||
if (target.placeholder.indexOf("Prompt") == -1) {
|
||||
return;
|
||||
}
|
||||
const imgWrap = target.closest('[data-testid="image"]');
|
||||
if (!imgWrap) {
|
||||
return;
|
||||
}
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
const files = e.dataTransfer.files;
|
||||
dropReplaceImage(imgWrap, files);
|
||||
});
|
||||
|
||||
window.addEventListener('paste', e => {
|
||||
@ -145,7 +90,7 @@ window.addEventListener('paste', e => {
|
||||
}
|
||||
|
||||
const firstFreeImageField = visibleImageFields
|
||||
.filter(el => !el.querySelector('img'))?.[0];
|
||||
.filter(el => el.querySelector('input[type=file]'))?.[0];
|
||||
|
||||
dropReplaceImage(
|
||||
firstFreeImageField ?
|
||||
|
@ -18,43 +18,37 @@ function keyupEditAttention(event) {
|
||||
const before = text.substring(0, selectionStart);
|
||||
let beforeParen = before.lastIndexOf(OPEN);
|
||||
if (beforeParen == -1) return false;
|
||||
|
||||
let beforeClosingParen = before.lastIndexOf(CLOSE);
|
||||
if (beforeClosingParen != -1 && beforeClosingParen > beforeParen) return false;
|
||||
let beforeParenClose = before.lastIndexOf(CLOSE);
|
||||
while (beforeParenClose !== -1 && beforeParenClose > beforeParen) {
|
||||
beforeParen = before.lastIndexOf(OPEN, beforeParen - 1);
|
||||
beforeParenClose = before.lastIndexOf(CLOSE, beforeParenClose - 1);
|
||||
}
|
||||
|
||||
// Find closing parenthesis around current cursor
|
||||
const after = text.substring(selectionStart);
|
||||
let afterParen = after.indexOf(CLOSE);
|
||||
if (afterParen == -1) return false;
|
||||
|
||||
let afterOpeningParen = after.indexOf(OPEN);
|
||||
if (afterOpeningParen != -1 && afterOpeningParen < afterParen) return false;
|
||||
let afterParenOpen = after.indexOf(OPEN);
|
||||
while (afterParenOpen !== -1 && afterParen > afterParenOpen) {
|
||||
afterParen = after.indexOf(CLOSE, afterParen + 1);
|
||||
afterParenOpen = after.indexOf(OPEN, afterParenOpen + 1);
|
||||
}
|
||||
if (beforeParen === -1 || afterParen === -1) return false;
|
||||
|
||||
// Set the selection to the text between the parenthesis
|
||||
const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen);
|
||||
if (/.*:-?[\d.]+/s.test(parenContent)) {
|
||||
const lastColon = parenContent.lastIndexOf(":");
|
||||
selectionStart = beforeParen + 1;
|
||||
selectionEnd = selectionStart + lastColon;
|
||||
} else {
|
||||
selectionStart = beforeParen + 1;
|
||||
selectionEnd = selectionStart + parenContent.length;
|
||||
}
|
||||
|
||||
const lastColon = parenContent.lastIndexOf(":");
|
||||
selectionStart = beforeParen + 1;
|
||||
selectionEnd = selectionStart + lastColon;
|
||||
target.setSelectionRange(selectionStart, selectionEnd);
|
||||
return true;
|
||||
}
|
||||
|
||||
function selectCurrentWord() {
|
||||
if (selectionStart !== selectionEnd) return false;
|
||||
const whitespace_delimiters = {"Tab": "\t", "Carriage Return": "\r", "Line Feed": "\n"};
|
||||
let delimiters = opts.keyedit_delimiters;
|
||||
const delimiters = opts.keyedit_delimiters + " \r\n\t";
|
||||
|
||||
for (let i of opts.keyedit_delimiters_whitespace) {
|
||||
delimiters += whitespace_delimiters[i];
|
||||
}
|
||||
|
||||
// seek backward to find beginning
|
||||
// seek backward until to find beggining
|
||||
while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) {
|
||||
selectionStart--;
|
||||
}
|
||||
@ -64,20 +58,12 @@ function keyupEditAttention(event) {
|
||||
selectionEnd++;
|
||||
}
|
||||
|
||||
// deselect surrounding whitespace
|
||||
while (text[selectionStart] == " " && selectionStart < selectionEnd) {
|
||||
selectionStart++;
|
||||
}
|
||||
while (text[selectionEnd - 1] == " " && selectionEnd > selectionStart) {
|
||||
selectionEnd--;
|
||||
}
|
||||
|
||||
target.setSelectionRange(selectionStart, selectionEnd);
|
||||
return true;
|
||||
}
|
||||
|
||||
// If the user hasn't selected anything, let's select their current parenthesis block or word
|
||||
if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')') && !selectCurrentParenthesisBlock('[', ']')) {
|
||||
if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')')) {
|
||||
selectCurrentWord();
|
||||
}
|
||||
|
||||
@ -85,62 +71,40 @@ function keyupEditAttention(event) {
|
||||
|
||||
var closeCharacter = ')';
|
||||
var delta = opts.keyedit_precision_attention;
|
||||
var start = selectionStart > 0 ? text[selectionStart - 1] : "";
|
||||
var end = text[selectionEnd];
|
||||
|
||||
if (start == '<') {
|
||||
if (selectionStart > 0 && text[selectionStart - 1] == '<') {
|
||||
closeCharacter = '>';
|
||||
delta = opts.keyedit_precision_extra;
|
||||
} else if (start == '(' && end == ')' || start == '[' && end == ']') { // convert old-style (((emphasis)))
|
||||
let numParen = 0;
|
||||
} else if (selectionStart == 0 || text[selectionStart - 1] != "(") {
|
||||
|
||||
while (text[selectionStart - numParen - 1] == start && text[selectionEnd + numParen] == end) {
|
||||
numParen++;
|
||||
}
|
||||
|
||||
if (start == "[") {
|
||||
weight = (1 / 1.1) ** numParen;
|
||||
} else {
|
||||
weight = 1.1 ** numParen;
|
||||
}
|
||||
|
||||
weight = Math.round(weight / opts.keyedit_precision_attention) * opts.keyedit_precision_attention;
|
||||
|
||||
text = text.slice(0, selectionStart - numParen) + "(" + text.slice(selectionStart, selectionEnd) + ":" + weight + ")" + text.slice(selectionEnd + numParen);
|
||||
selectionStart -= numParen - 1;
|
||||
selectionEnd -= numParen - 1;
|
||||
} else if (start != '(') {
|
||||
// do not include spaces at the end
|
||||
while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') {
|
||||
selectionEnd--;
|
||||
selectionEnd -= 1;
|
||||
}
|
||||
|
||||
if (selectionStart == selectionEnd) {
|
||||
return;
|
||||
}
|
||||
|
||||
text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd);
|
||||
|
||||
selectionStart++;
|
||||
selectionEnd++;
|
||||
selectionStart += 1;
|
||||
selectionEnd += 1;
|
||||
}
|
||||
|
||||
if (text[selectionEnd] != ':') return;
|
||||
var weightLength = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1;
|
||||
var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + weightLength));
|
||||
var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1;
|
||||
var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end));
|
||||
if (isNaN(weight)) return;
|
||||
|
||||
weight += isPlus ? delta : -delta;
|
||||
weight = parseFloat(weight.toPrecision(12));
|
||||
if (Number.isInteger(weight)) weight += ".0";
|
||||
if (String(weight).length == 1) weight += ".0";
|
||||
|
||||
if (closeCharacter == ')' && weight == 1) {
|
||||
var endParenPos = text.substring(selectionEnd).indexOf(')');
|
||||
text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1);
|
||||
text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 5);
|
||||
selectionStart--;
|
||||
selectionEnd--;
|
||||
} else {
|
||||
text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + weightLength);
|
||||
text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + 1 + end - 1);
|
||||
}
|
||||
|
||||
target.focus();
|
||||
|
@ -1,41 +0,0 @@
|
||||
/* alt+left/right moves text in prompt */
|
||||
|
||||
function keyupEditOrder(event) {
|
||||
if (!opts.keyedit_move) return;
|
||||
|
||||
let target = event.originalTarget || event.composedPath()[0];
|
||||
if (!target.matches("*:is([id*='_toprow'] [id*='_prompt'], .prompt) textarea")) return;
|
||||
if (!event.altKey) return;
|
||||
|
||||
let isLeft = event.key == "ArrowLeft";
|
||||
let isRight = event.key == "ArrowRight";
|
||||
if (!isLeft && !isRight) return;
|
||||
event.preventDefault();
|
||||
|
||||
let selectionStart = target.selectionStart;
|
||||
let selectionEnd = target.selectionEnd;
|
||||
let text = target.value;
|
||||
let items = text.split(",");
|
||||
let indexStart = (text.slice(0, selectionStart).match(/,/g) || []).length;
|
||||
let indexEnd = (text.slice(0, selectionEnd).match(/,/g) || []).length;
|
||||
let range = indexEnd - indexStart + 1;
|
||||
|
||||
if (isLeft && indexStart > 0) {
|
||||
items.splice(indexStart - 1, 0, ...items.splice(indexStart, range));
|
||||
target.value = items.join();
|
||||
target.selectionStart = items.slice(0, indexStart - 1).join().length + (indexStart == 1 ? 0 : 1);
|
||||
target.selectionEnd = items.slice(0, indexEnd).join().length;
|
||||
} else if (isRight && indexEnd < items.length - 1) {
|
||||
items.splice(indexStart + 1, 0, ...items.splice(indexStart, range));
|
||||
target.value = items.join();
|
||||
target.selectionStart = items.slice(0, indexStart + 1).join().length + 1;
|
||||
target.selectionEnd = items.slice(0, indexEnd + 2).join().length;
|
||||
}
|
||||
|
||||
event.preventDefault();
|
||||
updateInput(target);
|
||||
}
|
||||
|
||||
addEventListener('keydown', (event) => {
|
||||
keyupEditOrder(event);
|
||||
});
|
@ -2,11 +2,8 @@
|
||||
function extensions_apply(_disabled_list, _update_list, disable_all) {
|
||||
var disable = [];
|
||||
var update = [];
|
||||
const extensions_input = gradioApp().querySelectorAll('#extensions input[type="checkbox"]');
|
||||
if (extensions_input.length == 0) {
|
||||
throw Error("Extensions page not yet loaded.");
|
||||
}
|
||||
extensions_input.forEach(function(x) {
|
||||
|
||||
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) {
|
||||
if (x.name.startsWith("enable_") && !x.checked) {
|
||||
disable.push(x.name.substring(7));
|
||||
}
|
||||
@ -36,7 +33,7 @@ function extensions_check() {
|
||||
|
||||
|
||||
var id = randomId();
|
||||
requestProgress(id, gradioApp().getElementById('extensions_installed_html'), null, function() {
|
||||
requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function() {
|
||||
|
||||
});
|
||||
|
||||
@ -75,21 +72,3 @@ function config_state_confirm_restore(_, config_state_name, config_restore_type)
|
||||
}
|
||||
return [confirmed, config_state_name, config_restore_type];
|
||||
}
|
||||
|
||||
function toggle_all_extensions(event) {
|
||||
gradioApp().querySelectorAll('#extensions .extension_toggle').forEach(function(checkbox_el) {
|
||||
checkbox_el.checked = event.target.checked;
|
||||
});
|
||||
}
|
||||
|
||||
function toggle_extension() {
|
||||
let all_extensions_toggled = true;
|
||||
for (const checkbox_el of gradioApp().querySelectorAll('#extensions .extension_toggle')) {
|
||||
if (!checkbox_el.checked) {
|
||||
all_extensions_toggled = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
gradioApp().querySelector('#extensions .all_extensions_toggle').checked = all_extensions_toggled;
|
||||
}
|
||||
|
@ -1,21 +1,48 @@
|
||||
function toggleCss(key, css, enable) {
|
||||
var style = document.getElementById(key);
|
||||
if (enable && !style) {
|
||||
style = document.createElement('style');
|
||||
style.id = key;
|
||||
style.type = 'text/css';
|
||||
document.head.appendChild(style);
|
||||
}
|
||||
if (style && !enable) {
|
||||
document.head.removeChild(style);
|
||||
}
|
||||
if (style) {
|
||||
style.innerHTML == '';
|
||||
style.appendChild(document.createTextNode(css));
|
||||
}
|
||||
function setupExtraNetworksForTab(tabname) {
|
||||
gradioApp().querySelector('#' + tabname + '_extra_tabs').classList.add('extra-networks');
|
||||
|
||||
var tabs = gradioApp().querySelector('#' + tabname + '_extra_tabs > div');
|
||||
var search = gradioApp().querySelector('#' + tabname + '_extra_search textarea');
|
||||
var refresh = gradioApp().getElementById(tabname + '_extra_refresh');
|
||||
|
||||
search.classList.add('search');
|
||||
tabs.appendChild(search);
|
||||
tabs.appendChild(refresh);
|
||||
|
||||
var applyFilter = function() {
|
||||
var searchTerm = search.value.toLowerCase();
|
||||
|
||||
gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card').forEach(function(elem) {
|
||||
var searchOnly = elem.querySelector('.search_only');
|
||||
var text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase();
|
||||
|
||||
var visible = text.indexOf(searchTerm) != -1;
|
||||
|
||||
if (searchOnly && searchTerm.length < 4) {
|
||||
visible = false;
|
||||
}
|
||||
|
||||
elem.style.display = visible ? "" : "none";
|
||||
});
|
||||
};
|
||||
|
||||
search.addEventListener("input", applyFilter);
|
||||
applyFilter();
|
||||
|
||||
extraNetworksApplyFilter[tabname] = applyFilter;
|
||||
}
|
||||
|
||||
function setupExtraNetworksForTab(tabname) {
|
||||
function applyExtraNetworkFilter(tabname) {
|
||||
setTimeout(extraNetworksApplyFilter[tabname], 1);
|
||||
}
|
||||
|
||||
var extraNetworksApplyFilter = {};
|
||||
var activePromptTextarea = {};
|
||||
|
||||
function setupExtraNetworks() {
|
||||
setupExtraNetworksForTab('txt2img');
|
||||
setupExtraNetworksForTab('img2img');
|
||||
|
||||
function registerPrompt(tabname, id) {
|
||||
var textarea = gradioApp().querySelector("#" + id + " > label > textarea");
|
||||
|
||||
@ -28,213 +55,39 @@ function setupExtraNetworksForTab(tabname) {
|
||||
});
|
||||
}
|
||||
|
||||
var tabnav = gradioApp().querySelector('#' + tabname + '_extra_tabs > div.tab-nav');
|
||||
var controlsDiv = document.createElement('DIV');
|
||||
controlsDiv.classList.add('extra-networks-controls-div');
|
||||
tabnav.appendChild(controlsDiv);
|
||||
tabnav.insertBefore(controlsDiv, null);
|
||||
|
||||
var this_tab = gradioApp().querySelector('#' + tabname + '_extra_tabs');
|
||||
this_tab.querySelectorAll(":scope > [id^='" + tabname + "_']").forEach(function(elem) {
|
||||
// tabname_full = {tabname}_{extra_networks_tabname}
|
||||
var tabname_full = elem.id;
|
||||
var search = gradioApp().querySelector("#" + tabname_full + "_extra_search");
|
||||
var sort_dir = gradioApp().querySelector("#" + tabname_full + "_extra_sort_dir");
|
||||
var refresh = gradioApp().querySelector("#" + tabname_full + "_extra_refresh");
|
||||
var currentSort = '';
|
||||
|
||||
// If any of the buttons above don't exist, we want to skip this iteration of the loop.
|
||||
if (!search || !sort_dir || !refresh) {
|
||||
return; // `return` is equivalent of `continue` but for forEach loops.
|
||||
}
|
||||
|
||||
var applyFilter = function(force) {
|
||||
var searchTerm = search.value.toLowerCase();
|
||||
gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card').forEach(function(elem) {
|
||||
var searchOnly = elem.querySelector('.search_only');
|
||||
var text = Array.prototype.map.call(elem.querySelectorAll('.search_terms, .description'), function(t) {
|
||||
return t.textContent.toLowerCase();
|
||||
}).join(" ");
|
||||
|
||||
var visible = text.indexOf(searchTerm) != -1;
|
||||
if (searchOnly && searchTerm.length < 4) {
|
||||
visible = false;
|
||||
}
|
||||
if (visible) {
|
||||
elem.classList.remove("hidden");
|
||||
} else {
|
||||
elem.classList.add("hidden");
|
||||
}
|
||||
});
|
||||
|
||||
applySort(force);
|
||||
};
|
||||
|
||||
var applySort = function(force) {
|
||||
var cards = gradioApp().querySelectorAll('#' + tabname_full + ' div.card');
|
||||
var parent = gradioApp().querySelector('#' + tabname_full + "_cards");
|
||||
var reverse = sort_dir.dataset.sortdir == "Descending";
|
||||
var activeSearchElem = gradioApp().querySelector('#' + tabname_full + "_controls .extra-network-control--sort.extra-network-control--enabled");
|
||||
var sortKey = activeSearchElem ? activeSearchElem.dataset.sortkey : "default";
|
||||
var sortKeyDataField = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1);
|
||||
var sortKeyStore = sortKey + "-" + sort_dir.dataset.sortdir + "-" + cards.length;
|
||||
|
||||
if (sortKeyStore == currentSort && !force) {
|
||||
return;
|
||||
}
|
||||
currentSort = sortKeyStore;
|
||||
|
||||
var sortedCards = Array.from(cards);
|
||||
sortedCards.sort(function(cardA, cardB) {
|
||||
var a = cardA.dataset[sortKeyDataField];
|
||||
var b = cardB.dataset[sortKeyDataField];
|
||||
if (!isNaN(a) && !isNaN(b)) {
|
||||
return parseInt(a) - parseInt(b);
|
||||
}
|
||||
|
||||
return (a < b ? -1 : (a > b ? 1 : 0));
|
||||
});
|
||||
|
||||
if (reverse) {
|
||||
sortedCards.reverse();
|
||||
}
|
||||
|
||||
parent.innerHTML = '';
|
||||
|
||||
var frag = document.createDocumentFragment();
|
||||
sortedCards.forEach(function(card) {
|
||||
frag.appendChild(card);
|
||||
});
|
||||
parent.appendChild(frag);
|
||||
};
|
||||
|
||||
search.addEventListener("input", function() {
|
||||
applyFilter();
|
||||
});
|
||||
applySort();
|
||||
applyFilter();
|
||||
extraNetworksApplySort[tabname_full] = applySort;
|
||||
extraNetworksApplyFilter[tabname_full] = applyFilter;
|
||||
|
||||
var controls = gradioApp().querySelector("#" + tabname_full + "_controls");
|
||||
controlsDiv.insertBefore(controls, null);
|
||||
|
||||
if (elem.style.display != "none") {
|
||||
extraNetworksShowControlsForPage(tabname, tabname_full);
|
||||
}
|
||||
});
|
||||
|
||||
registerPrompt(tabname, tabname + "_prompt");
|
||||
registerPrompt(tabname, tabname + "_neg_prompt");
|
||||
registerPrompt('txt2img', 'txt2img_prompt');
|
||||
registerPrompt('txt2img', 'txt2img_neg_prompt');
|
||||
registerPrompt('img2img', 'img2img_prompt');
|
||||
registerPrompt('img2img', 'img2img_neg_prompt');
|
||||
}
|
||||
|
||||
function extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt) {
|
||||
if (!gradioApp().querySelector('.toprow-compact-tools')) return; // only applicable for compact prompt layout
|
||||
onUiLoaded(setupExtraNetworks);
|
||||
|
||||
var promptContainer = gradioApp().getElementById(tabname + '_prompt_container');
|
||||
var prompt = gradioApp().getElementById(tabname + '_prompt_row');
|
||||
var negPrompt = gradioApp().getElementById(tabname + '_neg_prompt_row');
|
||||
var elem = id ? gradioApp().getElementById(id) : null;
|
||||
var re_extranet = /<([^:]+:[^:]+):[\d.]+>/;
|
||||
var re_extranet_g = /\s+<([^:]+:[^:]+):[\d.]+>/g;
|
||||
|
||||
if (showNegativePrompt && elem) {
|
||||
elem.insertBefore(negPrompt, elem.firstChild);
|
||||
} else {
|
||||
promptContainer.insertBefore(negPrompt, promptContainer.firstChild);
|
||||
}
|
||||
|
||||
if (showPrompt && elem) {
|
||||
elem.insertBefore(prompt, elem.firstChild);
|
||||
} else {
|
||||
promptContainer.insertBefore(prompt, promptContainer.firstChild);
|
||||
}
|
||||
|
||||
if (elem) {
|
||||
elem.classList.toggle('extra-page-prompts-active', showNegativePrompt || showPrompt);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function extraNetworksShowControlsForPage(tabname, tabname_full) {
|
||||
gradioApp().querySelectorAll('#' + tabname + '_extra_tabs .extra-networks-controls-div > div').forEach(function(elem) {
|
||||
var targetId = tabname_full + "_controls";
|
||||
elem.style.display = elem.id == targetId ? "" : "none";
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
function extraNetworksUnrelatedTabSelected(tabname) { // called from python when user selects an unrelated tab (generate)
|
||||
extraNetworksMovePromptToTab(tabname, '', false, false);
|
||||
|
||||
extraNetworksShowControlsForPage(tabname, null);
|
||||
}
|
||||
|
||||
function extraNetworksTabSelected(tabname, id, showPrompt, showNegativePrompt, tabname_full) { // called from python when user selects an extra networks tab
|
||||
extraNetworksMovePromptToTab(tabname, id, showPrompt, showNegativePrompt);
|
||||
|
||||
extraNetworksShowControlsForPage(tabname, tabname_full);
|
||||
}
|
||||
|
||||
function applyExtraNetworkFilter(tabname_full) {
|
||||
var doFilter = function() {
|
||||
var applyFunction = extraNetworksApplyFilter[tabname_full];
|
||||
|
||||
if (applyFunction) {
|
||||
applyFunction(true);
|
||||
}
|
||||
};
|
||||
setTimeout(doFilter, 1);
|
||||
}
|
||||
|
||||
function applyExtraNetworkSort(tabname_full) {
|
||||
var doSort = function() {
|
||||
extraNetworksApplySort[tabname_full](true);
|
||||
};
|
||||
setTimeout(doSort, 1);
|
||||
}
|
||||
|
||||
var extraNetworksApplyFilter = {};
|
||||
var extraNetworksApplySort = {};
|
||||
var activePromptTextarea = {};
|
||||
|
||||
function setupExtraNetworks() {
|
||||
setupExtraNetworksForTab('txt2img');
|
||||
setupExtraNetworksForTab('img2img');
|
||||
}
|
||||
|
||||
var re_extranet = /<([^:^>]+:[^:]+):[\d.]+>(.*)/;
|
||||
var re_extranet_g = /<([^:^>]+:[^:]+):[\d.]+>/g;
|
||||
|
||||
var re_extranet_neg = /\(([^:^>]+:[\d.]+)\)/;
|
||||
var re_extranet_g_neg = /\(([^:^>]+:[\d.]+)\)/g;
|
||||
function tryToRemoveExtraNetworkFromPrompt(textarea, text, isNeg) {
|
||||
var m = text.match(isNeg ? re_extranet_neg : re_extranet);
|
||||
function tryToRemoveExtraNetworkFromPrompt(textarea, text) {
|
||||
var m = text.match(re_extranet);
|
||||
var replaced = false;
|
||||
var newTextareaText;
|
||||
var extraTextBeforeNet = opts.extra_networks_add_text_separator;
|
||||
if (m) {
|
||||
var extraTextAfterNet = m[2];
|
||||
var partToSearch = m[1];
|
||||
var foundAtPosition = -1;
|
||||
newTextareaText = textarea.value.replaceAll(isNeg ? re_extranet_g_neg : re_extranet_g, function(found, net, pos) {
|
||||
m = found.match(isNeg ? re_extranet_neg : re_extranet);
|
||||
newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found) {
|
||||
m = found.match(re_extranet);
|
||||
if (m[1] == partToSearch) {
|
||||
replaced = true;
|
||||
foundAtPosition = pos;
|
||||
return "";
|
||||
}
|
||||
return found;
|
||||
});
|
||||
if (foundAtPosition >= 0) {
|
||||
if (extraTextAfterNet && newTextareaText.substr(foundAtPosition, extraTextAfterNet.length) == extraTextAfterNet) {
|
||||
newTextareaText = newTextareaText.substr(0, foundAtPosition) + newTextareaText.substr(foundAtPosition + extraTextAfterNet.length);
|
||||
}
|
||||
if (newTextareaText.substr(foundAtPosition - extraTextBeforeNet.length, extraTextBeforeNet.length) == extraTextBeforeNet) {
|
||||
newTextareaText = newTextareaText.substr(0, foundAtPosition - extraTextBeforeNet.length) + newTextareaText.substr(foundAtPosition);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
newTextareaText = textarea.value.replaceAll(new RegExp(`((?:${extraTextBeforeNet})?${text})`, "g"), "");
|
||||
replaced = (newTextareaText != textarea.value);
|
||||
newTextareaText = textarea.value.replaceAll(new RegExp(text, "g"), function(found) {
|
||||
if (found == text) {
|
||||
replaced = true;
|
||||
return "";
|
||||
}
|
||||
return found;
|
||||
});
|
||||
}
|
||||
|
||||
if (replaced) {
|
||||
@ -245,22 +98,14 @@ function tryToRemoveExtraNetworkFromPrompt(textarea, text, isNeg) {
|
||||
return false;
|
||||
}
|
||||
|
||||
function updatePromptArea(text, textArea, isNeg) {
|
||||
if (!tryToRemoveExtraNetworkFromPrompt(textArea, text, isNeg)) {
|
||||
textArea.value = textArea.value + opts.extra_networks_add_text_separator + text;
|
||||
function cardClicked(tabname, textToAdd, allowNegativePrompt) {
|
||||
var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea");
|
||||
|
||||
if (!tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)) {
|
||||
textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd;
|
||||
}
|
||||
|
||||
updateInput(textArea);
|
||||
}
|
||||
|
||||
function cardClicked(tabname, textToAdd, textToAddNegative, allowNegativePrompt) {
|
||||
if (textToAddNegative.length > 0) {
|
||||
updatePromptArea(textToAdd, gradioApp().querySelector("#" + tabname + "_prompt > label > textarea"));
|
||||
updatePromptArea(textToAddNegative, gradioApp().querySelector("#" + tabname + "_neg_prompt > label > textarea"), true);
|
||||
} else {
|
||||
var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea");
|
||||
updatePromptArea(textToAdd, textarea);
|
||||
}
|
||||
updateInput(textarea);
|
||||
}
|
||||
|
||||
function saveCardPreview(event, tabname, filename) {
|
||||
@ -276,8 +121,8 @@ function saveCardPreview(event, tabname, filename) {
|
||||
event.preventDefault();
|
||||
}
|
||||
|
||||
function extraNetworksSearchButton(tabname, extra_networks_tabname, event) {
|
||||
var searchTextarea = gradioApp().querySelector("#" + tabname + "_" + extra_networks_tabname + "_extra_search");
|
||||
function extraNetworksSearchButton(tabs_id, event) {
|
||||
var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea');
|
||||
var button = event.target;
|
||||
var text = button.classList.contains("search-all") ? "" : button.textContent.trim();
|
||||
|
||||
@ -285,211 +130,32 @@ function extraNetworksSearchButton(tabname, extra_networks_tabname, event) {
|
||||
updateInput(searchTextarea);
|
||||
}
|
||||
|
||||
function extraNetworksTreeProcessFileClick(event, btn, tabname, extra_networks_tabname) {
|
||||
/**
|
||||
* Processes `onclick` events when user clicks on files in tree.
|
||||
*
|
||||
* @param event The generated event.
|
||||
* @param btn The clicked `tree-list-item` button.
|
||||
* @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc.
|
||||
* @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc.
|
||||
*/
|
||||
// NOTE: Currently unused.
|
||||
return;
|
||||
}
|
||||
|
||||
function extraNetworksTreeProcessDirectoryClick(event, btn, tabname, extra_networks_tabname) {
|
||||
/**
|
||||
* Processes `onclick` events when user clicks on directories in tree.
|
||||
*
|
||||
* Here is how the tree reacts to clicks for various states:
|
||||
* unselected unopened directory: Directory is selected and expanded.
|
||||
* unselected opened directory: Directory is selected.
|
||||
* selected opened directory: Directory is collapsed and deselected.
|
||||
* chevron is clicked: Directory is expanded or collapsed. Selected state unchanged.
|
||||
*
|
||||
* @param event The generated event.
|
||||
* @param btn The clicked `tree-list-item` button.
|
||||
* @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc.
|
||||
* @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc.
|
||||
*/
|
||||
var ul = btn.nextElementSibling;
|
||||
// This is the actual target that the user clicked on within the target button.
|
||||
// We use this to detect if the chevron was clicked.
|
||||
var true_targ = event.target;
|
||||
|
||||
function _expand_or_collapse(_ul, _btn) {
|
||||
// Expands <ul> if it is collapsed, collapses otherwise. Updates button attributes.
|
||||
if (_ul.hasAttribute("hidden")) {
|
||||
_ul.removeAttribute("hidden");
|
||||
_btn.dataset.expanded = "";
|
||||
} else {
|
||||
_ul.setAttribute("hidden", "");
|
||||
delete _btn.dataset.expanded;
|
||||
}
|
||||
}
|
||||
|
||||
function _remove_selected_from_all() {
|
||||
// Removes the `selected` attribute from all buttons.
|
||||
var sels = document.querySelectorAll("div.tree-list-content");
|
||||
[...sels].forEach(el => {
|
||||
delete el.dataset.selected;
|
||||
});
|
||||
}
|
||||
|
||||
function _select_button(_btn) {
|
||||
// Removes `data-selected` attribute from all buttons then adds to passed button.
|
||||
_remove_selected_from_all();
|
||||
_btn.dataset.selected = "";
|
||||
}
|
||||
|
||||
function _update_search(_tabname, _extra_networks_tabname, _search_text) {
|
||||
// Update search input with select button's path.
|
||||
var search_input_elem = gradioApp().querySelector("#" + tabname + "_" + extra_networks_tabname + "_extra_search");
|
||||
search_input_elem.value = _search_text;
|
||||
updateInput(search_input_elem);
|
||||
}
|
||||
|
||||
|
||||
// If user clicks on the chevron, then we do not select the folder.
|
||||
if (true_targ.matches(".tree-list-item-action--leading, .tree-list-item-action-chevron")) {
|
||||
_expand_or_collapse(ul, btn);
|
||||
} else {
|
||||
// User clicked anywhere else on the button.
|
||||
if ("selected" in btn.dataset && !(ul.hasAttribute("hidden"))) {
|
||||
// If folder is select and open, collapse and deselect button.
|
||||
_expand_or_collapse(ul, btn);
|
||||
delete btn.dataset.selected;
|
||||
_update_search(tabname, extra_networks_tabname, "");
|
||||
} else if (!(!("selected" in btn.dataset) && !(ul.hasAttribute("hidden")))) {
|
||||
// If folder is open and not selected, then we don't collapse; just select.
|
||||
// NOTE: Double inversion sucks but it is the clearest way to show the branching here.
|
||||
_expand_or_collapse(ul, btn);
|
||||
_select_button(btn, tabname, extra_networks_tabname);
|
||||
_update_search(tabname, extra_networks_tabname, btn.dataset.path);
|
||||
} else {
|
||||
// All other cases, just select the button.
|
||||
_select_button(btn, tabname, extra_networks_tabname);
|
||||
_update_search(tabname, extra_networks_tabname, btn.dataset.path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function extraNetworksTreeOnClick(event, tabname, extra_networks_tabname) {
|
||||
/**
|
||||
* Handles `onclick` events for buttons within an `extra-network-tree .tree-list--tree`.
|
||||
*
|
||||
* Determines whether the clicked button in the tree is for a file entry or a directory
|
||||
* then calls the appropriate function.
|
||||
*
|
||||
* @param event The generated event.
|
||||
* @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc.
|
||||
* @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc.
|
||||
*/
|
||||
var btn = event.currentTarget;
|
||||
var par = btn.parentElement;
|
||||
if (par.dataset.treeEntryType === "file") {
|
||||
extraNetworksTreeProcessFileClick(event, btn, tabname, extra_networks_tabname);
|
||||
} else {
|
||||
extraNetworksTreeProcessDirectoryClick(event, btn, tabname, extra_networks_tabname);
|
||||
}
|
||||
}
|
||||
|
||||
function extraNetworksControlSortOnClick(event, tabname, extra_networks_tabname) {
|
||||
/** Handles `onclick` events for Sort Mode buttons. */
|
||||
|
||||
var self = event.currentTarget;
|
||||
var parent = event.currentTarget.parentElement;
|
||||
|
||||
parent.querySelectorAll('.extra-network-control--sort').forEach(function(x) {
|
||||
x.classList.remove('extra-network-control--enabled');
|
||||
});
|
||||
|
||||
self.classList.add('extra-network-control--enabled');
|
||||
|
||||
applyExtraNetworkSort(tabname + "_" + extra_networks_tabname);
|
||||
}
|
||||
|
||||
function extraNetworksControlSortDirOnClick(event, tabname, extra_networks_tabname) {
|
||||
/**
|
||||
* Handles `onclick` events for the Sort Direction button.
|
||||
*
|
||||
* Modifies the data attributes of the Sort Direction button to cycle between
|
||||
* ascending and descending sort directions.
|
||||
*
|
||||
* @param event The generated event.
|
||||
* @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc.
|
||||
* @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc.
|
||||
*/
|
||||
if (event.currentTarget.dataset.sortdir == "Ascending") {
|
||||
event.currentTarget.dataset.sortdir = "Descending";
|
||||
event.currentTarget.setAttribute("title", "Sort descending");
|
||||
} else {
|
||||
event.currentTarget.dataset.sortdir = "Ascending";
|
||||
event.currentTarget.setAttribute("title", "Sort ascending");
|
||||
}
|
||||
applyExtraNetworkSort(tabname + "_" + extra_networks_tabname);
|
||||
}
|
||||
|
||||
function extraNetworksControlTreeViewOnClick(event, tabname, extra_networks_tabname) {
|
||||
/**
|
||||
* Handles `onclick` events for the Tree View button.
|
||||
*
|
||||
* Toggles the tree view in the extra networks pane.
|
||||
*
|
||||
* @param event The generated event.
|
||||
* @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc.
|
||||
* @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc.
|
||||
*/
|
||||
var button = event.currentTarget;
|
||||
button.classList.toggle("extra-network-control--enabled");
|
||||
var show = !button.classList.contains("extra-network-control--enabled");
|
||||
|
||||
var pane = gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_pane");
|
||||
pane.classList.toggle("extra-network-dirs-hidden", show);
|
||||
}
|
||||
|
||||
function extraNetworksControlRefreshOnClick(event, tabname, extra_networks_tabname) {
|
||||
/**
|
||||
* Handles `onclick` events for the Refresh Page button.
|
||||
*
|
||||
* In order to actually call the python functions in `ui_extra_networks.py`
|
||||
* to refresh the page, we created an empty gradio button in that file with an
|
||||
* event handler that refreshes the page. So what this function here does
|
||||
* is it manually raises a `click` event on that button.
|
||||
*
|
||||
* @param event The generated event.
|
||||
* @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc.
|
||||
* @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc.
|
||||
*/
|
||||
var btn_refresh_internal = gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_extra_refresh_internal");
|
||||
btn_refresh_internal.dispatchEvent(new Event("click"));
|
||||
}
|
||||
|
||||
var globalPopup = null;
|
||||
var globalPopupInner = null;
|
||||
|
||||
function closePopup() {
|
||||
if (!globalPopup) return;
|
||||
globalPopup.style.display = "none";
|
||||
}
|
||||
|
||||
function popup(contents) {
|
||||
if (!globalPopup) {
|
||||
globalPopup = document.createElement('div');
|
||||
globalPopup.onclick = function() {
|
||||
globalPopup.style.display = "none";
|
||||
};
|
||||
globalPopup.classList.add('global-popup');
|
||||
|
||||
var close = document.createElement('div');
|
||||
close.classList.add('global-popup-close');
|
||||
close.addEventListener("click", closePopup);
|
||||
close.onclick = function() {
|
||||
globalPopup.style.display = "none";
|
||||
};
|
||||
close.title = "Close";
|
||||
globalPopup.appendChild(close);
|
||||
|
||||
globalPopupInner = document.createElement('div');
|
||||
globalPopupInner.onclick = function(event) {
|
||||
event.stopPropagation(); return false;
|
||||
};
|
||||
globalPopupInner.classList.add('global-popup-inner');
|
||||
globalPopup.appendChild(globalPopupInner);
|
||||
|
||||
gradioApp().querySelector('.main').appendChild(globalPopup);
|
||||
gradioApp().appendChild(globalPopup);
|
||||
}
|
||||
|
||||
globalPopupInner.innerHTML = '';
|
||||
@ -498,85 +164,12 @@ function popup(contents) {
|
||||
globalPopup.style.display = "flex";
|
||||
}
|
||||
|
||||
var storedPopupIds = {};
|
||||
function popupId(id) {
|
||||
if (!storedPopupIds[id]) {
|
||||
storedPopupIds[id] = gradioApp().getElementById(id);
|
||||
}
|
||||
|
||||
popup(storedPopupIds[id]);
|
||||
}
|
||||
|
||||
function extraNetworksFlattenMetadata(obj) {
|
||||
const result = {};
|
||||
|
||||
// Convert any stringified JSON objects to actual objects
|
||||
for (const key of Object.keys(obj)) {
|
||||
if (typeof obj[key] === 'string') {
|
||||
try {
|
||||
const parsed = JSON.parse(obj[key]);
|
||||
if (parsed && typeof parsed === 'object') {
|
||||
obj[key] = parsed;
|
||||
}
|
||||
} catch (error) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flatten the object
|
||||
for (const key of Object.keys(obj)) {
|
||||
if (typeof obj[key] === 'object' && obj[key] !== null) {
|
||||
const nested = extraNetworksFlattenMetadata(obj[key]);
|
||||
for (const nestedKey of Object.keys(nested)) {
|
||||
result[`${key}/${nestedKey}`] = nested[nestedKey];
|
||||
}
|
||||
} else {
|
||||
result[key] = obj[key];
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for handling modelspec keys
|
||||
for (const key of Object.keys(result)) {
|
||||
if (key.startsWith("modelspec.")) {
|
||||
result[key.replaceAll(".", "/")] = result[key];
|
||||
delete result[key];
|
||||
}
|
||||
}
|
||||
|
||||
// Add empty keys to designate hierarchy
|
||||
for (const key of Object.keys(result)) {
|
||||
const parts = key.split("/");
|
||||
for (let i = 1; i < parts.length; i++) {
|
||||
const parent = parts.slice(0, i).join("/");
|
||||
if (!result[parent]) {
|
||||
result[parent] = "";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function extraNetworksShowMetadata(text) {
|
||||
try {
|
||||
let parsed = JSON.parse(text);
|
||||
if (parsed && typeof parsed === 'object') {
|
||||
parsed = extraNetworksFlattenMetadata(parsed);
|
||||
const table = createVisualizationTable(parsed, 0);
|
||||
popup(table);
|
||||
return;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
var elem = document.createElement('pre');
|
||||
elem.classList.add('popup-metadata');
|
||||
elem.textContent = text;
|
||||
|
||||
popup(elem);
|
||||
return;
|
||||
}
|
||||
|
||||
function requestGet(url, data, handler, errorHandler) {
|
||||
@ -605,18 +198,11 @@ function requestGet(url, data, handler, errorHandler) {
|
||||
xhr.send(js);
|
||||
}
|
||||
|
||||
function extraNetworksCopyCardPath(event) {
|
||||
navigator.clipboard.writeText(event.target.getAttribute("data-clipboard-text"));
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
function extraNetworksRequestMetadata(event, extraPage) {
|
||||
function extraNetworksRequestMetadata(event, extraPage, cardName) {
|
||||
var showError = function() {
|
||||
extraNetworksShowMetadata("there was an error getting metadata");
|
||||
};
|
||||
|
||||
var cardName = event.target.parentElement.parentElement.getAttribute("data-name");
|
||||
|
||||
requestGet("./sd_extra_networks/metadata", {page: extraPage, item: cardName}, function(data) {
|
||||
if (data && data.metadata) {
|
||||
extraNetworksShowMetadata(data.metadata);
|
||||
@ -627,86 +213,3 @@ function extraNetworksRequestMetadata(event, extraPage) {
|
||||
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
var extraPageUserMetadataEditors = {};
|
||||
|
||||
function extraNetworksEditUserMetadata(event, tabname, extraPage) {
|
||||
var id = tabname + '_' + extraPage + '_edit_user_metadata';
|
||||
|
||||
var editor = extraPageUserMetadataEditors[id];
|
||||
if (!editor) {
|
||||
editor = {};
|
||||
editor.page = gradioApp().getElementById(id);
|
||||
editor.nameTextarea = gradioApp().querySelector("#" + id + "_name" + ' textarea');
|
||||
editor.button = gradioApp().querySelector("#" + id + "_button");
|
||||
extraPageUserMetadataEditors[id] = editor;
|
||||
}
|
||||
|
||||
var cardName = event.target.parentElement.parentElement.getAttribute("data-name");
|
||||
editor.nameTextarea.value = cardName;
|
||||
updateInput(editor.nameTextarea);
|
||||
|
||||
editor.button.click();
|
||||
|
||||
popup(editor.page);
|
||||
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
function extraNetworksRefreshSingleCard(page, tabname, name) {
|
||||
requestGet("./sd_extra_networks/get-single-card", {page: page, tabname: tabname, name: name}, function(data) {
|
||||
if (data && data.html) {
|
||||
var card = gradioApp().querySelector(`#${tabname}_${page.replace(" ", "_")}_cards > .card[data-name="${name}"]`);
|
||||
|
||||
var newDiv = document.createElement('DIV');
|
||||
newDiv.innerHTML = data.html;
|
||||
var newCard = newDiv.firstElementChild;
|
||||
|
||||
newCard.style.display = '';
|
||||
card.parentElement.insertBefore(newCard, card);
|
||||
card.parentElement.removeChild(card);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
window.addEventListener("keydown", function(event) {
|
||||
if (event.key == "Escape") {
|
||||
closePopup();
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Setup custom loading for this script.
|
||||
* We need to wait for all of our HTML to be generated in the extra networks tabs
|
||||
* before we can actually run the `setupExtraNetworks` function.
|
||||
* The `onUiLoaded` function actually runs before all of our extra network tabs are
|
||||
* finished generating. Thus we needed this new method.
|
||||
*
|
||||
*/
|
||||
|
||||
var uiAfterScriptsCallbacks = [];
|
||||
var uiAfterScriptsTimeout = null;
|
||||
var executedAfterScripts = false;
|
||||
|
||||
function scheduleAfterScriptsCallbacks() {
|
||||
clearTimeout(uiAfterScriptsTimeout);
|
||||
uiAfterScriptsTimeout = setTimeout(function() {
|
||||
executeCallbacks(uiAfterScriptsCallbacks);
|
||||
}, 200);
|
||||
}
|
||||
|
||||
onUiLoaded(function() {
|
||||
var mutationObserver = new MutationObserver(function(m) {
|
||||
let existingSearchfields = gradioApp().querySelectorAll("[id$='_extra_search']").length;
|
||||
let neededSearchfields = gradioApp().querySelectorAll("[id$='_extra_tabs'] > .tab-nav > button").length - 2;
|
||||
|
||||
if (!executedAfterScripts && existingSearchfields >= neededSearchfields) {
|
||||
mutationObserver.disconnect();
|
||||
executedAfterScripts = true;
|
||||
scheduleAfterScriptsCallbacks();
|
||||
}
|
||||
});
|
||||
mutationObserver.observe(gradioApp(), {childList: true, subtree: true});
|
||||
});
|
||||
|
||||
uiAfterScriptsCallbacks.push(setupExtraNetworks);
|
||||
|
@ -1,7 +1,7 @@
|
||||
// attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes
|
||||
|
||||
let txt2img_gallery, img2img_gallery, modal = undefined;
|
||||
onAfterUiUpdate(function() {
|
||||
onUiUpdate(function() {
|
||||
if (!txt2img_gallery) {
|
||||
txt2img_gallery = attachGalleryListeners("txt2img");
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ var titles = {
|
||||
"CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results",
|
||||
"Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
|
||||
"\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time",
|
||||
"\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomized",
|
||||
"\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed",
|
||||
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
|
||||
"\u{1f4c2}": "Open images output directory",
|
||||
"\u{1f4be}": "Save style",
|
||||
@ -84,6 +84,8 @@ var titles = {
|
||||
"Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.",
|
||||
"Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.",
|
||||
|
||||
"vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).",
|
||||
|
||||
"Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.",
|
||||
|
||||
"Filename word regex": "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.",
|
||||
@ -108,30 +110,23 @@ var titles = {
|
||||
"Upscale by": "Adjusts the size of the image by multiplying the original width and height by the selected value. Ignored if either Resize width to or Resize height to are non-zero.",
|
||||
"Resize width to": "Resizes image to this width. If 0, width is inferred from either of two nearby sliders.",
|
||||
"Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.",
|
||||
"Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.",
|
||||
"Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.",
|
||||
"Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order listed.",
|
||||
"Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited.",
|
||||
"Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction."
|
||||
};
|
||||
|
||||
function updateTooltip(element) {
|
||||
if (element.title) return; // already has a title
|
||||
function updateTooltipForSpan(span) {
|
||||
if (span.title) return; // already has a title
|
||||
|
||||
let text = element.textContent;
|
||||
let tooltip = localization[titles[text]] || titles[text];
|
||||
let tooltip = localization[titles[span.textContent]] || titles[span.textContent];
|
||||
|
||||
if (!tooltip) {
|
||||
let value = element.value;
|
||||
if (value) tooltip = localization[titles[value]] || titles[value];
|
||||
tooltip = localization[titles[span.value]] || titles[span.value];
|
||||
}
|
||||
|
||||
if (!tooltip) {
|
||||
// Gradio dropdown options have `data-value`.
|
||||
let dataValue = element.dataset.value;
|
||||
if (dataValue) tooltip = localization[titles[dataValue]] || titles[dataValue];
|
||||
}
|
||||
|
||||
if (!tooltip) {
|
||||
for (const c of element.classList) {
|
||||
for (const c of span.classList) {
|
||||
if (c in titles) {
|
||||
tooltip = localization[titles[c]] || titles[c];
|
||||
break;
|
||||
@ -140,64 +135,34 @@ function updateTooltip(element) {
|
||||
}
|
||||
|
||||
if (tooltip) {
|
||||
element.title = tooltip;
|
||||
span.title = tooltip;
|
||||
}
|
||||
}
|
||||
|
||||
// Nodes to check for adding tooltips.
|
||||
const tooltipCheckNodes = new Set();
|
||||
// Timer for debouncing tooltip check.
|
||||
let tooltipCheckTimer = null;
|
||||
function updateTooltipForSelect(select) {
|
||||
if (select.onchange != null) return;
|
||||
|
||||
function processTooltipCheckNodes() {
|
||||
for (const node of tooltipCheckNodes) {
|
||||
updateTooltip(node);
|
||||
}
|
||||
tooltipCheckNodes.clear();
|
||||
select.onchange = function() {
|
||||
select.title = localization[titles[select.value]] || titles[select.value] || "";
|
||||
};
|
||||
}
|
||||
|
||||
onUiUpdate(function(mutationRecords) {
|
||||
for (const record of mutationRecords) {
|
||||
if (record.type === "childList" && record.target.classList.contains("options")) {
|
||||
// This smells like a Gradio dropdown menu having changed,
|
||||
// so let's enqueue an update for the input element that shows the current value.
|
||||
let wrap = record.target.parentNode;
|
||||
let input = wrap?.querySelector("input");
|
||||
if (input) {
|
||||
input.title = ""; // So we'll even have a chance to update it.
|
||||
tooltipCheckNodes.add(input);
|
||||
}
|
||||
}
|
||||
for (const node of record.addedNodes) {
|
||||
if (node.nodeType === Node.ELEMENT_NODE && !node.classList.contains("hide")) {
|
||||
if (!node.title) {
|
||||
if (
|
||||
node.tagName === "SPAN" ||
|
||||
node.tagName === "BUTTON" ||
|
||||
node.tagName === "P" ||
|
||||
node.tagName === "INPUT" ||
|
||||
(node.tagName === "LI" && node.classList.contains("item")) // Gradio dropdown item
|
||||
) {
|
||||
tooltipCheckNodes.add(node);
|
||||
}
|
||||
}
|
||||
node.querySelectorAll('span, button, p').forEach(n => tooltipCheckNodes.add(n));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (tooltipCheckNodes.size) {
|
||||
clearTimeout(tooltipCheckTimer);
|
||||
tooltipCheckTimer = setTimeout(processTooltipCheckNodes, 1000);
|
||||
}
|
||||
});
|
||||
var observedTooltipElements = {SPAN: 1, BUTTON: 1, SELECT: 1, P: 1};
|
||||
|
||||
onUiLoaded(function() {
|
||||
for (var comp of window.gradio_config.components) {
|
||||
if (comp.props.webui_tooltip && comp.props.elem_id) {
|
||||
var elem = gradioApp().getElementById(comp.props.elem_id);
|
||||
if (elem) {
|
||||
elem.title = comp.props.webui_tooltip;
|
||||
onUiUpdate(function(m) {
|
||||
m.forEach(function(record) {
|
||||
record.addedNodes.forEach(function(node) {
|
||||
if (observedTooltipElements[node.tagName]) {
|
||||
updateTooltipForSpan(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (node.tagName == "SELECT") {
|
||||
updateTooltipForSelect(node);
|
||||
}
|
||||
|
||||
if (node.querySelectorAll) {
|
||||
node.querySelectorAll('span, button, select, p').forEach(updateTooltipForSpan);
|
||||
node.querySelectorAll('select').forEach(updateTooltipForSelect);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@ -39,5 +39,5 @@ function imageMaskResize() {
|
||||
});
|
||||
}
|
||||
|
||||
onAfterUiUpdate(imageMaskResize);
|
||||
onUiUpdate(imageMaskResize);
|
||||
window.addEventListener('resize', imageMaskResize);
|
||||
|
18
javascript/imageParams.js
Normal file
18
javascript/imageParams.js
Normal file
@ -0,0 +1,18 @@
|
||||
window.onload = (function() {
|
||||
window.addEventListener('drop', e => {
|
||||
const target = e.composedPath()[0];
|
||||
if (target.placeholder.indexOf("Prompt") == -1) return;
|
||||
|
||||
let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
|
||||
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
const imgParent = gradioApp().getElementById(prompt_target);
|
||||
const files = e.dataTransfer.files;
|
||||
const fileInput = imgParent.querySelector('input[type="file"]');
|
||||
if (fileInput) {
|
||||
fileInput.files = files;
|
||||
fileInput.dispatchEvent(new Event('change'));
|
||||
}
|
||||
});
|
||||
});
|
@ -6,8 +6,6 @@ function closeModal() {
|
||||
function showModal(event) {
|
||||
const source = event.target || event.srcElement;
|
||||
const modalImage = gradioApp().getElementById("modalImage");
|
||||
const modalToggleLivePreviewBtn = gradioApp().getElementById("modal_toggle_live_preview");
|
||||
modalToggleLivePreviewBtn.innerHTML = opts.js_live_preview_in_modal_lightbox ? "🗇" : "🗆";
|
||||
const lb = gradioApp().getElementById("lightboxModal");
|
||||
modalImage.src = source.src;
|
||||
if (modalImage.style.display === 'none') {
|
||||
@ -35,11 +33,8 @@ function updateOnBackgroundChange() {
|
||||
const modalImage = gradioApp().getElementById("modalImage");
|
||||
if (modalImage && modalImage.offsetParent) {
|
||||
let currentButton = selected_gallery_button();
|
||||
let preview = gradioApp().querySelectorAll('.livePreview > img');
|
||||
if (opts.js_live_preview_in_modal_lightbox && preview.length > 0) {
|
||||
// show preview image if available
|
||||
modalImage.src = preview[preview.length - 1].src;
|
||||
} else if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) {
|
||||
|
||||
if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) {
|
||||
modalImage.src = currentButton.children[0].src;
|
||||
if (modalImage.style.display === 'none') {
|
||||
const modal = gradioApp().getElementById("lightboxModal");
|
||||
@ -53,7 +48,14 @@ function modalImageSwitch(offset) {
|
||||
var galleryButtons = all_gallery_buttons();
|
||||
|
||||
if (galleryButtons.length > 1) {
|
||||
var result = selected_gallery_index();
|
||||
var currentButton = selected_gallery_button();
|
||||
|
||||
var result = -1;
|
||||
galleryButtons.forEach(function(v, i) {
|
||||
if (v == currentButton) {
|
||||
result = i;
|
||||
}
|
||||
});
|
||||
|
||||
if (result != -1) {
|
||||
var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)];
|
||||
@ -126,15 +128,14 @@ function setupImageForLightbox(e) {
|
||||
e.style.cursor = 'pointer';
|
||||
e.style.userSelect = 'none';
|
||||
|
||||
e.addEventListener('mousedown', function(evt) {
|
||||
if (evt.button == 1) {
|
||||
open(evt.target.src);
|
||||
evt.preventDefault();
|
||||
return;
|
||||
}
|
||||
}, true);
|
||||
var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1;
|
||||
|
||||
e.addEventListener('click', function(evt) {
|
||||
// For Firefox, listening on click first switched to next image then shows the lightbox.
|
||||
// If you know how to fix this without switching to mousedown event, please.
|
||||
// For other browsers the event is click to make it possiblr to drag picture.
|
||||
var event = isFirefox ? 'mousedown' : 'click';
|
||||
|
||||
e.addEventListener(event, function(evt) {
|
||||
if (!opts.js_modal_lightbox || evt.button != 0) return;
|
||||
|
||||
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed);
|
||||
@ -154,13 +155,6 @@ function modalZoomToggle(event) {
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
function modalLivePreviewToggle(event) {
|
||||
const modalToggleLivePreview = gradioApp().getElementById("modal_toggle_live_preview");
|
||||
opts.js_live_preview_in_modal_lightbox = !opts.js_live_preview_in_modal_lightbox;
|
||||
modalToggleLivePreview.innerHTML = opts.js_live_preview_in_modal_lightbox ? "🗇" : "🗆";
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
function modalTileImageToggle(event) {
|
||||
const modalImage = gradioApp().getElementById("modalImage");
|
||||
const modal = gradioApp().getElementById("lightboxModal");
|
||||
@ -176,7 +170,7 @@ function modalTileImageToggle(event) {
|
||||
event.stopPropagation();
|
||||
}
|
||||
|
||||
onAfterUiUpdate(function() {
|
||||
onUiUpdate(function() {
|
||||
var fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img');
|
||||
if (fullImg_preview != null) {
|
||||
fullImg_preview.forEach(setupImageForLightbox);
|
||||
@ -218,14 +212,6 @@ document.addEventListener("DOMContentLoaded", function() {
|
||||
modalSave.title = "Save Image(s)";
|
||||
modalControls.appendChild(modalSave);
|
||||
|
||||
const modalToggleLivePreview = document.createElement('span');
|
||||
modalToggleLivePreview.className = 'modalToggleLivePreview cursor';
|
||||
modalToggleLivePreview.id = "modal_toggle_live_preview";
|
||||
modalToggleLivePreview.innerHTML = "🗆";
|
||||
modalToggleLivePreview.onclick = modalLivePreviewToggle;
|
||||
modalToggleLivePreview.title = "Toggle live preview";
|
||||
modalControls.appendChild(modalToggleLivePreview);
|
||||
|
||||
const modalClose = document.createElement('span');
|
||||
modalClose.className = 'modalClose cursor';
|
||||
modalClose.innerHTML = '×';
|
||||
|
@ -1,9 +1,7 @@
|
||||
let gamepads = [];
|
||||
|
||||
window.addEventListener('gamepadconnected', (e) => {
|
||||
const index = e.gamepad.index;
|
||||
let isWaiting = false;
|
||||
gamepads[index] = setInterval(async() => {
|
||||
setInterval(async() => {
|
||||
if (!opts.js_modal_lightbox_gamepad || isWaiting) return;
|
||||
const gamepad = navigator.getGamepads()[index];
|
||||
const xValue = gamepad.axes[0];
|
||||
@ -26,10 +24,6 @@ window.addEventListener('gamepadconnected', (e) => {
|
||||
}, 10);
|
||||
});
|
||||
|
||||
window.addEventListener('gamepaddisconnected', (e) => {
|
||||
clearInterval(gamepads[e.gamepad.index]);
|
||||
});
|
||||
|
||||
/*
|
||||
Primarily for vr controller type pointer devices.
|
||||
I use the wheel event because there's currently no way to do it properly with web xr.
|
||||
|
@ -1,68 +0,0 @@
|
||||
function inputAccordionChecked(id, checked) {
|
||||
var accordion = gradioApp().getElementById(id);
|
||||
accordion.visibleCheckbox.checked = checked;
|
||||
accordion.onVisibleCheckboxChange();
|
||||
}
|
||||
|
||||
function setupAccordion(accordion) {
|
||||
var labelWrap = accordion.querySelector('.label-wrap');
|
||||
var gradioCheckbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input");
|
||||
var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
|
||||
var span = labelWrap.querySelector('span');
|
||||
var linked = true;
|
||||
|
||||
var isOpen = function() {
|
||||
return labelWrap.classList.contains('open');
|
||||
};
|
||||
|
||||
var observerAccordionOpen = new MutationObserver(function(mutations) {
|
||||
mutations.forEach(function(mutationRecord) {
|
||||
accordion.classList.toggle('input-accordion-open', isOpen());
|
||||
|
||||
if (linked) {
|
||||
accordion.visibleCheckbox.checked = isOpen();
|
||||
accordion.onVisibleCheckboxChange();
|
||||
}
|
||||
});
|
||||
});
|
||||
observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']});
|
||||
|
||||
if (extra) {
|
||||
labelWrap.insertBefore(extra, labelWrap.lastElementChild);
|
||||
}
|
||||
|
||||
accordion.onChecked = function(checked) {
|
||||
if (isOpen() != checked) {
|
||||
labelWrap.click();
|
||||
}
|
||||
};
|
||||
|
||||
var visibleCheckbox = document.createElement('INPUT');
|
||||
visibleCheckbox.type = 'checkbox';
|
||||
visibleCheckbox.checked = isOpen();
|
||||
visibleCheckbox.id = accordion.id + "-visible-checkbox";
|
||||
visibleCheckbox.className = gradioCheckbox.className + " input-accordion-checkbox";
|
||||
span.insertBefore(visibleCheckbox, span.firstChild);
|
||||
|
||||
accordion.visibleCheckbox = visibleCheckbox;
|
||||
accordion.onVisibleCheckboxChange = function() {
|
||||
if (linked && isOpen() != visibleCheckbox.checked) {
|
||||
labelWrap.click();
|
||||
}
|
||||
|
||||
gradioCheckbox.checked = visibleCheckbox.checked;
|
||||
updateInput(gradioCheckbox);
|
||||
};
|
||||
|
||||
visibleCheckbox.addEventListener('click', function(event) {
|
||||
linked = false;
|
||||
event.stopPropagation();
|
||||
});
|
||||
visibleCheckbox.addEventListener('input', accordion.onVisibleCheckboxChange);
|
||||
}
|
||||
|
||||
onUiLoaded(function() {
|
||||
for (var accordion of gradioApp().querySelectorAll('.input-accordion')) {
|
||||
setupAccordion(accordion);
|
||||
}
|
||||
});
|
@ -1,26 +0,0 @@
|
||||
|
||||
function localSet(k, v) {
|
||||
try {
|
||||
localStorage.setItem(k, v);
|
||||
} catch (e) {
|
||||
console.warn(`Failed to save ${k} to localStorage: ${e}`);
|
||||
}
|
||||
}
|
||||
|
||||
function localGet(k, def) {
|
||||
try {
|
||||
return localStorage.getItem(k);
|
||||
} catch (e) {
|
||||
console.warn(`Failed to load ${k} from localStorage: ${e}`);
|
||||
}
|
||||
|
||||
return def;
|
||||
}
|
||||
|
||||
function localRemove(k) {
|
||||
try {
|
||||
return localStorage.removeItem(k);
|
||||
} catch (e) {
|
||||
console.warn(`Failed to remove ${k} from localStorage: ${e}`);
|
||||
}
|
||||
}
|
@ -11,11 +11,11 @@ var ignore_ids_for_localization = {
|
||||
train_hypernetwork: 'OPTION',
|
||||
txt2img_styles: 'OPTION',
|
||||
img2img_styles: 'OPTION',
|
||||
setting_random_artist_categories: 'OPTION',
|
||||
setting_face_restoration_model: 'OPTION',
|
||||
setting_realesrgan_enabled_models: 'OPTION',
|
||||
extras_upscaler_1: 'OPTION',
|
||||
extras_upscaler_2: 'OPTION',
|
||||
setting_random_artist_categories: 'SPAN',
|
||||
setting_face_restoration_model: 'SPAN',
|
||||
setting_realesrgan_enabled_models: 'SPAN',
|
||||
extras_upscaler_1: 'SPAN',
|
||||
extras_upscaler_2: 'SPAN',
|
||||
};
|
||||
|
||||
var re_num = /^[.\d]+$/;
|
||||
@ -107,41 +107,12 @@ function processNode(node) {
|
||||
});
|
||||
}
|
||||
|
||||
function localizeWholePage() {
|
||||
processNode(gradioApp());
|
||||
|
||||
function elem(comp) {
|
||||
var elem_id = comp.props.elem_id ? comp.props.elem_id : "component-" + comp.id;
|
||||
return gradioApp().getElementById(elem_id);
|
||||
}
|
||||
|
||||
for (var comp of window.gradio_config.components) {
|
||||
if (comp.props.webui_tooltip) {
|
||||
let e = elem(comp);
|
||||
|
||||
let tl = e ? getTranslation(e.title) : undefined;
|
||||
if (tl !== undefined) {
|
||||
e.title = tl;
|
||||
}
|
||||
}
|
||||
if (comp.props.placeholder) {
|
||||
let e = elem(comp);
|
||||
let textbox = e ? e.querySelector('[placeholder]') : null;
|
||||
|
||||
let tl = textbox ? getTranslation(textbox.placeholder) : undefined;
|
||||
if (tl !== undefined) {
|
||||
textbox.placeholder = tl;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function dumpTranslations() {
|
||||
if (!hasLocalization()) {
|
||||
// If we don't have any localization,
|
||||
// we will not have traversed the app to find
|
||||
// original_lines, so do that now.
|
||||
localizeWholePage();
|
||||
processNode(gradioApp());
|
||||
}
|
||||
var dumped = {};
|
||||
if (localization.rtl) {
|
||||
@ -183,7 +154,7 @@ document.addEventListener("DOMContentLoaded", function() {
|
||||
});
|
||||
});
|
||||
|
||||
localizeWholePage();
|
||||
processNode(gradioApp());
|
||||
|
||||
if (localization.rtl) { // if the language is from right to left,
|
||||
(new MutationObserver((mutations, observer) => { // wait for the style to load
|
||||
|
@ -4,7 +4,7 @@ let lastHeadImg = null;
|
||||
|
||||
let notificationButton = null;
|
||||
|
||||
onAfterUiUpdate(function() {
|
||||
onUiUpdate(function() {
|
||||
if (notificationButton == null) {
|
||||
notificationButton = gradioApp().getElementById('request_notifications');
|
||||
|
||||
@ -15,7 +15,7 @@ onAfterUiUpdate(function() {
|
||||
}
|
||||
}
|
||||
|
||||
const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"] div[id$="_results"] .thumbnail-item > img');
|
||||
const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"][style*="display: block"] div[id$="_results"] .thumbnail-item > img');
|
||||
|
||||
if (galleryPreviews == null) return;
|
||||
|
||||
@ -26,11 +26,7 @@ onAfterUiUpdate(function() {
|
||||
lastHeadImg = headImg;
|
||||
|
||||
// play notification sound if available
|
||||
const notificationAudio = gradioApp().querySelector('#audio_notification audio');
|
||||
if (notificationAudio) {
|
||||
notificationAudio.volume = opts.notification_volume / 100.0 || 1.0;
|
||||
notificationAudio.play();
|
||||
}
|
||||
gradioApp().querySelector('#audio_notification audio')?.play();
|
||||
|
||||
if (document.hasFocus()) return;
|
||||
|
||||
|
@ -1,174 +0,0 @@
|
||||
|
||||
function createRow(table, cellName, items) {
|
||||
var tr = document.createElement('tr');
|
||||
var res = [];
|
||||
|
||||
items.forEach(function(x, i) {
|
||||
if (x === undefined) {
|
||||
res.push(null);
|
||||
return;
|
||||
}
|
||||
|
||||
var td = document.createElement(cellName);
|
||||
td.textContent = x;
|
||||
tr.appendChild(td);
|
||||
res.push(td);
|
||||
|
||||
var colspan = 1;
|
||||
for (var n = i + 1; n < items.length; n++) {
|
||||
if (items[n] !== undefined) {
|
||||
break;
|
||||
}
|
||||
|
||||
colspan += 1;
|
||||
}
|
||||
|
||||
if (colspan > 1) {
|
||||
td.colSpan = colspan;
|
||||
}
|
||||
});
|
||||
|
||||
table.appendChild(tr);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
function createVisualizationTable(data, cutoff = 0, sort = "") {
|
||||
var table = document.createElement('table');
|
||||
table.className = 'popup-table';
|
||||
|
||||
var keys = Object.keys(data);
|
||||
if (sort === "number") {
|
||||
keys = keys.sort(function(a, b) {
|
||||
return data[b] - data[a];
|
||||
});
|
||||
} else {
|
||||
keys = keys.sort();
|
||||
}
|
||||
var items = keys.map(function(x) {
|
||||
return {key: x, parts: x.split('/'), value: data[x]};
|
||||
});
|
||||
var maxLength = items.reduce(function(a, b) {
|
||||
return Math.max(a, b.parts.length);
|
||||
}, 0);
|
||||
|
||||
var cols = createRow(
|
||||
table,
|
||||
'th',
|
||||
[
|
||||
cutoff === 0 ? 'key' : 'record',
|
||||
cutoff === 0 ? 'value' : 'seconds'
|
||||
]
|
||||
);
|
||||
cols[0].colSpan = maxLength;
|
||||
|
||||
function arraysEqual(a, b) {
|
||||
return !(a < b || b < a);
|
||||
}
|
||||
|
||||
var addLevel = function(level, parent, hide) {
|
||||
var matching = items.filter(function(x) {
|
||||
return x.parts[level] && !x.parts[level + 1] && arraysEqual(x.parts.slice(0, level), parent);
|
||||
});
|
||||
if (sort === "number") {
|
||||
matching = matching.sort(function(a, b) {
|
||||
return b.value - a.value;
|
||||
});
|
||||
} else {
|
||||
matching = matching.sort();
|
||||
}
|
||||
var othersTime = 0;
|
||||
var othersList = [];
|
||||
var othersRows = [];
|
||||
var childrenRows = [];
|
||||
matching.forEach(function(x) {
|
||||
var visible = (cutoff === 0 && !hide) || (x.value >= cutoff && !hide);
|
||||
|
||||
var cells = [];
|
||||
for (var i = 0; i < maxLength; i++) {
|
||||
cells.push(x.parts[i]);
|
||||
}
|
||||
cells.push(cutoff === 0 ? x.value : x.value.toFixed(3));
|
||||
var cols = createRow(table, 'td', cells);
|
||||
for (i = 0; i < level; i++) {
|
||||
cols[i].className = 'muted';
|
||||
}
|
||||
|
||||
var tr = cols[0].parentNode;
|
||||
if (!visible) {
|
||||
tr.classList.add("hidden");
|
||||
}
|
||||
|
||||
if (cutoff === 0 || x.value >= cutoff) {
|
||||
childrenRows.push(tr);
|
||||
} else {
|
||||
othersTime += x.value;
|
||||
othersList.push(x.parts[level]);
|
||||
othersRows.push(tr);
|
||||
}
|
||||
|
||||
var children = addLevel(level + 1, parent.concat([x.parts[level]]), true);
|
||||
if (children.length > 0) {
|
||||
var cell = cols[level];
|
||||
var onclick = function() {
|
||||
cell.classList.remove("link");
|
||||
cell.removeEventListener("click", onclick);
|
||||
children.forEach(function(x) {
|
||||
x.classList.remove("hidden");
|
||||
});
|
||||
};
|
||||
cell.classList.add("link");
|
||||
cell.addEventListener("click", onclick);
|
||||
}
|
||||
});
|
||||
|
||||
if (othersTime > 0) {
|
||||
var cells = [];
|
||||
for (var i = 0; i < maxLength; i++) {
|
||||
cells.push(parent[i]);
|
||||
}
|
||||
cells.push(othersTime.toFixed(3));
|
||||
cells[level] = 'others';
|
||||
var cols = createRow(table, 'td', cells);
|
||||
for (i = 0; i < level; i++) {
|
||||
cols[i].className = 'muted';
|
||||
}
|
||||
|
||||
var cell = cols[level];
|
||||
var tr = cell.parentNode;
|
||||
var onclick = function() {
|
||||
tr.classList.add("hidden");
|
||||
cell.classList.remove("link");
|
||||
cell.removeEventListener("click", onclick);
|
||||
othersRows.forEach(function(x) {
|
||||
x.classList.remove("hidden");
|
||||
});
|
||||
};
|
||||
|
||||
cell.title = othersList.join(", ");
|
||||
cell.classList.add("link");
|
||||
cell.addEventListener("click", onclick);
|
||||
|
||||
if (hide) {
|
||||
tr.classList.add("hidden");
|
||||
}
|
||||
|
||||
childrenRows.push(tr);
|
||||
}
|
||||
|
||||
return childrenRows;
|
||||
};
|
||||
|
||||
addLevel(0, []);
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
function showProfile(path, cutoff = 0.05) {
|
||||
requestGet(path, {}, function(data) {
|
||||
data.records['total'] = data.total;
|
||||
const table = createVisualizationTable(data.records, cutoff, "number");
|
||||
popup(table);
|
||||
});
|
||||
}
|
||||
|
@ -45,15 +45,8 @@ function formatTime(secs) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
var originalAppTitle = undefined;
|
||||
|
||||
onUiLoaded(function() {
|
||||
originalAppTitle = document.title;
|
||||
});
|
||||
|
||||
function setTitle(progress) {
|
||||
var title = originalAppTitle;
|
||||
var title = 'Stable Diffusion';
|
||||
|
||||
if (opts.show_progress_in_title && progress) {
|
||||
title = '[' + progress.trim() + '] ' + title;
|
||||
@ -76,26 +69,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
|
||||
var dateStart = new Date();
|
||||
var wasEverActive = false;
|
||||
var parentProgressbar = progressbarContainer.parentNode;
|
||||
var wakeLock = null;
|
||||
|
||||
var requestWakeLock = async function() {
|
||||
if (!opts.prevent_screen_sleep_during_generation || wakeLock) return;
|
||||
try {
|
||||
wakeLock = await navigator.wakeLock.request('screen');
|
||||
} catch (err) {
|
||||
console.error('Wake Lock is not supported.');
|
||||
}
|
||||
};
|
||||
|
||||
var releaseWakeLock = async function() {
|
||||
if (!opts.prevent_screen_sleep_during_generation || !wakeLock) return;
|
||||
try {
|
||||
await wakeLock.release();
|
||||
wakeLock = null;
|
||||
} catch (err) {
|
||||
console.error('Wake Lock release failed', err);
|
||||
}
|
||||
};
|
||||
var parentGallery = gallery ? gallery.parentNode : null;
|
||||
|
||||
var divProgress = document.createElement('div');
|
||||
divProgress.className = 'progressDiv';
|
||||
@ -106,28 +80,32 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
|
||||
divProgress.appendChild(divInner);
|
||||
parentProgressbar.insertBefore(divProgress, progressbarContainer);
|
||||
|
||||
var livePreview = null;
|
||||
if (parentGallery) {
|
||||
var livePreview = document.createElement('div');
|
||||
livePreview.className = 'livePreview';
|
||||
parentGallery.insertBefore(livePreview, gallery);
|
||||
}
|
||||
|
||||
var removeProgressBar = function() {
|
||||
releaseWakeLock();
|
||||
if (!divProgress) return;
|
||||
|
||||
setTitle("");
|
||||
parentProgressbar.removeChild(divProgress);
|
||||
if (gallery && livePreview) gallery.removeChild(livePreview);
|
||||
if (parentGallery) parentGallery.removeChild(livePreview);
|
||||
atEnd();
|
||||
|
||||
divProgress = null;
|
||||
};
|
||||
|
||||
var funProgress = function(id_task) {
|
||||
requestWakeLock();
|
||||
request("./internal/progress", {id_task: id_task, live_preview: false}, function(res) {
|
||||
var fun = function(id_task, id_live_preview) {
|
||||
request("./internal/progress", {id_task: id_task, id_live_preview: id_live_preview}, function(res) {
|
||||
if (res.completed) {
|
||||
removeProgressBar();
|
||||
return;
|
||||
}
|
||||
|
||||
var rect = progressbarContainer.getBoundingClientRect();
|
||||
|
||||
if (rect.width) {
|
||||
divProgress.style.width = rect.width + "px";
|
||||
}
|
||||
|
||||
let progressText = "";
|
||||
|
||||
divInner.style.width = ((res.progress || 0) * 100.0) + '%';
|
||||
@ -141,6 +119,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
|
||||
progressText += " ETA: " + formatTime(res.eta);
|
||||
}
|
||||
|
||||
|
||||
setTitle(progressText);
|
||||
|
||||
if (res.textinfo && res.textinfo.indexOf("\n") == -1) {
|
||||
@ -163,33 +142,16 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
|
||||
return;
|
||||
}
|
||||
|
||||
if (onProgress) {
|
||||
onProgress(res);
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
funProgress(id_task, res.id_live_preview);
|
||||
}, opts.live_preview_refresh_period || 500);
|
||||
}, function() {
|
||||
removeProgressBar();
|
||||
});
|
||||
};
|
||||
|
||||
var funLivePreview = function(id_task, id_live_preview) {
|
||||
request("./internal/progress", {id_task: id_task, id_live_preview: id_live_preview}, function(res) {
|
||||
if (!divProgress) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (res.live_preview && gallery) {
|
||||
rect = gallery.getBoundingClientRect();
|
||||
if (rect.width) {
|
||||
livePreview.style.width = rect.width + "px";
|
||||
livePreview.style.height = rect.height + "px";
|
||||
}
|
||||
|
||||
var img = new Image();
|
||||
img.onload = function() {
|
||||
if (!livePreview) {
|
||||
livePreview = document.createElement('div');
|
||||
livePreview.className = 'livePreview';
|
||||
gallery.insertBefore(livePreview, gallery.firstElementChild);
|
||||
}
|
||||
|
||||
livePreview.appendChild(img);
|
||||
if (livePreview.childElementCount > 2) {
|
||||
livePreview.removeChild(livePreview.firstElementChild);
|
||||
@ -198,18 +160,18 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
|
||||
img.src = res.live_preview;
|
||||
}
|
||||
|
||||
|
||||
if (onProgress) {
|
||||
onProgress(res);
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
funLivePreview(id_task, res.id_live_preview);
|
||||
fun(id_task, res.id_live_preview);
|
||||
}, opts.live_preview_refresh_period || 500);
|
||||
}, function() {
|
||||
removeProgressBar();
|
||||
});
|
||||
};
|
||||
|
||||
funProgress(id_task, 0);
|
||||
|
||||
if (gallery) {
|
||||
funLivePreview(id_task, 0);
|
||||
}
|
||||
|
||||
fun(id_task, 0);
|
||||
}
|
||||
|
@ -1,205 +0,0 @@
|
||||
(function() {
|
||||
const GRADIO_MIN_WIDTH = 320;
|
||||
const PAD = 16;
|
||||
const DEBOUNCE_TIME = 100;
|
||||
const DOUBLE_TAP_DELAY = 200; //ms
|
||||
|
||||
const R = {
|
||||
tracking: false,
|
||||
parent: null,
|
||||
parentWidth: null,
|
||||
leftCol: null,
|
||||
leftColStartWidth: null,
|
||||
screenX: null,
|
||||
lastTapTime: null,
|
||||
};
|
||||
|
||||
let resizeTimer;
|
||||
let parents = [];
|
||||
|
||||
function setLeftColGridTemplate(el, width) {
|
||||
el.style.gridTemplateColumns = `${width}px 16px 1fr`;
|
||||
}
|
||||
|
||||
function displayResizeHandle(parent) {
|
||||
if (!parent.needHideOnMoblie) {
|
||||
return true;
|
||||
}
|
||||
if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) {
|
||||
parent.style.display = 'flex';
|
||||
parent.resizeHandle.style.display = "none";
|
||||
return false;
|
||||
} else {
|
||||
parent.style.display = 'grid';
|
||||
parent.resizeHandle.style.display = "block";
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
function afterResize(parent) {
|
||||
if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != parent.style.originalGridTemplateColumns) {
|
||||
const oldParentWidth = R.parentWidth;
|
||||
const newParentWidth = parent.offsetWidth;
|
||||
const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]);
|
||||
|
||||
const ratio = newParentWidth / oldParentWidth;
|
||||
|
||||
const newWidthL = Math.max(Math.floor(ratio * widthL), parent.minLeftColWidth);
|
||||
setLeftColGridTemplate(parent, newWidthL);
|
||||
|
||||
R.parentWidth = newParentWidth;
|
||||
}
|
||||
}
|
||||
|
||||
function setup(parent) {
|
||||
|
||||
function onDoubleClick(evt) {
|
||||
evt.preventDefault();
|
||||
evt.stopPropagation();
|
||||
|
||||
parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns;
|
||||
}
|
||||
|
||||
const leftCol = parent.firstElementChild;
|
||||
const rightCol = parent.lastElementChild;
|
||||
|
||||
parents.push(parent);
|
||||
|
||||
parent.style.display = 'grid';
|
||||
parent.style.gap = '0';
|
||||
let leftColTemplate = "";
|
||||
if (parent.children[0].style.flexGrow) {
|
||||
leftColTemplate = `${parent.children[0].style.flexGrow}fr`;
|
||||
parent.minLeftColWidth = GRADIO_MIN_WIDTH;
|
||||
parent.minRightColWidth = GRADIO_MIN_WIDTH;
|
||||
parent.needHideOnMoblie = true;
|
||||
} else {
|
||||
leftColTemplate = parent.children[0].style.flexBasis;
|
||||
parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2) / 2;
|
||||
parent.minRightColWidth = 0;
|
||||
parent.needHideOnMoblie = false;
|
||||
}
|
||||
|
||||
if (!leftColTemplate) {
|
||||
leftColTemplate = '1fr';
|
||||
}
|
||||
|
||||
const gridTemplateColumns = `${leftColTemplate} ${PAD}px ${parent.children[1].style.flexGrow}fr`;
|
||||
parent.style.gridTemplateColumns = gridTemplateColumns;
|
||||
parent.style.originalGridTemplateColumns = gridTemplateColumns;
|
||||
|
||||
const resizeHandle = document.createElement('div');
|
||||
resizeHandle.classList.add('resize-handle');
|
||||
parent.insertBefore(resizeHandle, rightCol);
|
||||
parent.resizeHandle = resizeHandle;
|
||||
|
||||
['mousedown', 'touchstart'].forEach((eventType) => {
|
||||
resizeHandle.addEventListener(eventType, (evt) => {
|
||||
if (eventType.startsWith('mouse')) {
|
||||
if (evt.button !== 0) return;
|
||||
} else {
|
||||
if (evt.changedTouches.length !== 1) return;
|
||||
|
||||
const currentTime = new Date().getTime();
|
||||
if (R.lastTapTime && currentTime - R.lastTapTime <= DOUBLE_TAP_DELAY) {
|
||||
onDoubleClick(evt);
|
||||
return;
|
||||
}
|
||||
|
||||
R.lastTapTime = currentTime;
|
||||
}
|
||||
|
||||
evt.preventDefault();
|
||||
evt.stopPropagation();
|
||||
|
||||
document.body.classList.add('resizing');
|
||||
|
||||
R.tracking = true;
|
||||
R.parent = parent;
|
||||
R.parentWidth = parent.offsetWidth;
|
||||
R.leftCol = leftCol;
|
||||
R.leftColStartWidth = leftCol.offsetWidth;
|
||||
if (eventType.startsWith('mouse')) {
|
||||
R.screenX = evt.screenX;
|
||||
} else {
|
||||
R.screenX = evt.changedTouches[0].screenX;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
resizeHandle.addEventListener('dblclick', onDoubleClick);
|
||||
|
||||
afterResize(parent);
|
||||
}
|
||||
|
||||
['mousemove', 'touchmove'].forEach((eventType) => {
|
||||
window.addEventListener(eventType, (evt) => {
|
||||
if (eventType.startsWith('mouse')) {
|
||||
if (evt.button !== 0) return;
|
||||
} else {
|
||||
if (evt.changedTouches.length !== 1) return;
|
||||
}
|
||||
|
||||
if (R.tracking) {
|
||||
if (eventType.startsWith('mouse')) {
|
||||
evt.preventDefault();
|
||||
}
|
||||
evt.stopPropagation();
|
||||
|
||||
let delta = 0;
|
||||
if (eventType.startsWith('mouse')) {
|
||||
delta = R.screenX - evt.screenX;
|
||||
} else {
|
||||
delta = R.screenX - evt.changedTouches[0].screenX;
|
||||
}
|
||||
const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - R.parent.minRightColWidth - PAD), R.parent.minLeftColWidth);
|
||||
setLeftColGridTemplate(R.parent, leftColWidth);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
['mouseup', 'touchend'].forEach((eventType) => {
|
||||
window.addEventListener(eventType, (evt) => {
|
||||
if (eventType.startsWith('mouse')) {
|
||||
if (evt.button !== 0) return;
|
||||
} else {
|
||||
if (evt.changedTouches.length !== 1) return;
|
||||
}
|
||||
|
||||
if (R.tracking) {
|
||||
evt.preventDefault();
|
||||
evt.stopPropagation();
|
||||
|
||||
R.tracking = false;
|
||||
|
||||
document.body.classList.remove('resizing');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
window.addEventListener('resize', () => {
|
||||
clearTimeout(resizeTimer);
|
||||
|
||||
resizeTimer = setTimeout(function() {
|
||||
for (const parent of parents) {
|
||||
afterResize(parent);
|
||||
}
|
||||
}, DEBOUNCE_TIME);
|
||||
});
|
||||
|
||||
setupResizeHandle = setup;
|
||||
})();
|
||||
|
||||
|
||||
function setupAllResizeHandles() {
|
||||
for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) {
|
||||
if (!elem.querySelector('.resize-handle') && !elem.children[0].classList.contains("hidden")) {
|
||||
setupResizeHandle(elem);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
onUiLoaded(setupAllResizeHandles);
|
||||
|
@ -1,71 +0,0 @@
|
||||
let settingsExcludeTabsFromShowAll = {
|
||||
settings_tab_defaults: 1,
|
||||
settings_tab_sysinfo: 1,
|
||||
settings_tab_actions: 1,
|
||||
settings_tab_licenses: 1,
|
||||
};
|
||||
|
||||
function settingsShowAllTabs() {
|
||||
gradioApp().querySelectorAll('#settings > div').forEach(function(elem) {
|
||||
if (settingsExcludeTabsFromShowAll[elem.id]) return;
|
||||
|
||||
elem.style.display = "block";
|
||||
});
|
||||
}
|
||||
|
||||
function settingsShowOneTab() {
|
||||
gradioApp().querySelector('#settings_show_one_page').click();
|
||||
}
|
||||
|
||||
onUiLoaded(function() {
|
||||
var edit = gradioApp().querySelector('#settings_search');
|
||||
var editTextarea = gradioApp().querySelector('#settings_search > label > input');
|
||||
var buttonShowAllPages = gradioApp().getElementById('settings_show_all_pages');
|
||||
var settings_tabs = gradioApp().querySelector('#settings div');
|
||||
|
||||
onEdit('settingsSearch', editTextarea, 250, function() {
|
||||
var searchText = (editTextarea.value || "").trim().toLowerCase();
|
||||
|
||||
gradioApp().querySelectorAll('#settings > div[id^=settings_] div[id^=column_settings_] > *').forEach(function(elem) {
|
||||
var visible = elem.textContent.trim().toLowerCase().indexOf(searchText) != -1;
|
||||
elem.style.display = visible ? "" : "none";
|
||||
});
|
||||
|
||||
if (searchText != "") {
|
||||
settingsShowAllTabs();
|
||||
} else {
|
||||
settingsShowOneTab();
|
||||
}
|
||||
});
|
||||
|
||||
settings_tabs.insertBefore(edit, settings_tabs.firstChild);
|
||||
settings_tabs.appendChild(buttonShowAllPages);
|
||||
|
||||
|
||||
buttonShowAllPages.addEventListener("click", settingsShowAllTabs);
|
||||
});
|
||||
|
||||
|
||||
onOptionsChanged(function() {
|
||||
if (gradioApp().querySelector('#settings .settings-category')) return;
|
||||
|
||||
var sectionMap = {};
|
||||
gradioApp().querySelectorAll('#settings > div > button').forEach(function(x) {
|
||||
sectionMap[x.textContent.trim()] = x;
|
||||
});
|
||||
|
||||
opts._categories.forEach(function(x) {
|
||||
var section = localization[x[0]] ?? x[0];
|
||||
var category = localization[x[1]] ?? x[1];
|
||||
|
||||
var span = document.createElement('SPAN');
|
||||
span.textContent = category;
|
||||
span.className = 'settings-category';
|
||||
|
||||
var sectionElem = sectionMap[section];
|
||||
if (!sectionElem) return;
|
||||
|
||||
sectionElem.parentElement.insertBefore(span, sectionElem);
|
||||
});
|
||||
});
|
||||
|
@ -1,87 +0,0 @@
|
||||
let promptTokenCountUpdateFunctions = {};
|
||||
|
||||
function update_txt2img_tokens(...args) {
|
||||
// Called from Gradio
|
||||
update_token_counter("txt2img_token_button");
|
||||
update_token_counter("txt2img_negative_token_button");
|
||||
if (args.length == 2) {
|
||||
return args[0];
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
||||
function update_img2img_tokens(...args) {
|
||||
// Called from Gradio
|
||||
update_token_counter("img2img_token_button");
|
||||
update_token_counter("img2img_negative_token_button");
|
||||
if (args.length == 2) {
|
||||
return args[0];
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
||||
function update_token_counter(button_id) {
|
||||
promptTokenCountUpdateFunctions[button_id]?.();
|
||||
}
|
||||
|
||||
|
||||
function recalculatePromptTokens(name) {
|
||||
promptTokenCountUpdateFunctions[name]?.();
|
||||
}
|
||||
|
||||
function recalculate_prompts_txt2img() {
|
||||
// Called from Gradio
|
||||
recalculatePromptTokens('txt2img_prompt');
|
||||
recalculatePromptTokens('txt2img_neg_prompt');
|
||||
return Array.from(arguments);
|
||||
}
|
||||
|
||||
function recalculate_prompts_img2img() {
|
||||
// Called from Gradio
|
||||
recalculatePromptTokens('img2img_prompt');
|
||||
recalculatePromptTokens('img2img_neg_prompt');
|
||||
return Array.from(arguments);
|
||||
}
|
||||
|
||||
function setupTokenCounting(id, id_counter, id_button) {
|
||||
var prompt = gradioApp().getElementById(id);
|
||||
var counter = gradioApp().getElementById(id_counter);
|
||||
var textarea = gradioApp().querySelector(`#${id} > label > textarea`);
|
||||
|
||||
if (counter.parentElement == prompt.parentElement) {
|
||||
return;
|
||||
}
|
||||
|
||||
prompt.parentElement.insertBefore(counter, prompt);
|
||||
prompt.parentElement.style.position = "relative";
|
||||
|
||||
var func = onEdit(id, textarea, 800, function() {
|
||||
if (counter.classList.contains("token-counter-visible")) {
|
||||
gradioApp().getElementById(id_button)?.click();
|
||||
}
|
||||
});
|
||||
promptTokenCountUpdateFunctions[id] = func;
|
||||
promptTokenCountUpdateFunctions[id_button] = func;
|
||||
}
|
||||
|
||||
function toggleTokenCountingVisibility(id, id_counter, id_button) {
|
||||
var counter = gradioApp().getElementById(id_counter);
|
||||
|
||||
counter.style.display = opts.disable_token_counters ? "none" : "block";
|
||||
counter.classList.toggle("token-counter-visible", !opts.disable_token_counters);
|
||||
}
|
||||
|
||||
function runCodeForTokenCounters(fun) {
|
||||
fun('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button');
|
||||
fun('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button');
|
||||
fun('img2img_prompt', 'img2img_token_counter', 'img2img_token_button');
|
||||
fun('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button');
|
||||
}
|
||||
|
||||
onUiLoaded(function() {
|
||||
runCodeForTokenCounters(setupTokenCounting);
|
||||
});
|
||||
|
||||
onOptionsChanged(function() {
|
||||
runCodeForTokenCounters(toggleTokenCountingVisibility);
|
||||
});
|
218
javascript/ui.js
218
javascript/ui.js
@ -19,19 +19,28 @@ function all_gallery_buttons() {
|
||||
}
|
||||
|
||||
function selected_gallery_button() {
|
||||
return all_gallery_buttons().find(elem => elem.classList.contains('selected')) ?? null;
|
||||
var allCurrentButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnail-item.thumbnail-small.selected');
|
||||
var visibleCurrentButton = null;
|
||||
allCurrentButtons.forEach(function(elem) {
|
||||
if (elem.parentElement.offsetParent) {
|
||||
visibleCurrentButton = elem;
|
||||
}
|
||||
});
|
||||
return visibleCurrentButton;
|
||||
}
|
||||
|
||||
function selected_gallery_index() {
|
||||
return all_gallery_buttons().findIndex(elem => elem.classList.contains('selected'));
|
||||
}
|
||||
var buttons = all_gallery_buttons();
|
||||
var button = selected_gallery_button();
|
||||
|
||||
function gallery_container_buttons(gallery_container) {
|
||||
return gradioApp().querySelectorAll(`#${gallery_container} .thumbnail-item.thumbnail-small`);
|
||||
}
|
||||
var result = -1;
|
||||
buttons.forEach(function(v, i) {
|
||||
if (v == button) {
|
||||
result = i;
|
||||
}
|
||||
});
|
||||
|
||||
function selected_gallery_index_id(gallery_container) {
|
||||
return Array.from(gallery_container_buttons(gallery_container)).findIndex(elem => elem.classList.contains('selected'));
|
||||
return result;
|
||||
}
|
||||
|
||||
function extract_image_from_gallery(gallery) {
|
||||
@ -127,35 +136,27 @@ function create_submit_args(args) {
|
||||
return res;
|
||||
}
|
||||
|
||||
function setSubmitButtonsVisibility(tabname, showInterrupt, showSkip, showInterrupting) {
|
||||
gradioApp().getElementById(tabname + '_interrupt').style.display = showInterrupt ? "block" : "none";
|
||||
gradioApp().getElementById(tabname + '_skip').style.display = showSkip ? "block" : "none";
|
||||
gradioApp().getElementById(tabname + '_interrupting').style.display = showInterrupting ? "block" : "none";
|
||||
}
|
||||
|
||||
function showSubmitButtons(tabname, show) {
|
||||
setSubmitButtonsVisibility(tabname, !show, !show, false);
|
||||
}
|
||||
|
||||
function showSubmitInterruptingPlaceholder(tabname) {
|
||||
setSubmitButtonsVisibility(tabname, false, true, true);
|
||||
gradioApp().getElementById(tabname + '_interrupt').style.display = show ? "none" : "block";
|
||||
gradioApp().getElementById(tabname + '_skip').style.display = show ? "none" : "block";
|
||||
}
|
||||
|
||||
function showRestoreProgressButton(tabname, show) {
|
||||
var button = gradioApp().getElementById(tabname + "_restore_progress");
|
||||
if (!button) return;
|
||||
button.style.setProperty('display', show ? 'flex' : 'none', 'important');
|
||||
|
||||
button.style.display = show ? "flex" : "none";
|
||||
}
|
||||
|
||||
function submit() {
|
||||
showSubmitButtons('txt2img', false);
|
||||
|
||||
var id = randomId();
|
||||
localSet("txt2img_task_id", id);
|
||||
localStorage.setItem("txt2img_task_id", id);
|
||||
|
||||
requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() {
|
||||
showSubmitButtons('txt2img', true);
|
||||
localRemove("txt2img_task_id");
|
||||
localStorage.removeItem("txt2img_task_id");
|
||||
showRestoreProgressButton('txt2img', false);
|
||||
});
|
||||
|
||||
@ -166,23 +167,15 @@ function submit() {
|
||||
return res;
|
||||
}
|
||||
|
||||
function submit_txt2img_upscale() {
|
||||
var res = submit(...arguments);
|
||||
|
||||
res[2] = selected_gallery_index();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
function submit_img2img() {
|
||||
showSubmitButtons('img2img', false);
|
||||
|
||||
var id = randomId();
|
||||
localSet("img2img_task_id", id);
|
||||
localStorage.setItem("img2img_task_id", id);
|
||||
|
||||
requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() {
|
||||
showSubmitButtons('img2img', true);
|
||||
localRemove("img2img_task_id");
|
||||
localStorage.removeItem("img2img_task_id");
|
||||
showRestoreProgressButton('img2img', false);
|
||||
});
|
||||
|
||||
@ -194,29 +187,13 @@ function submit_img2img() {
|
||||
return res;
|
||||
}
|
||||
|
||||
function submit_extras() {
|
||||
showSubmitButtons('extras', false);
|
||||
|
||||
var id = randomId();
|
||||
|
||||
requestProgress(id, gradioApp().getElementById('extras_gallery_container'), gradioApp().getElementById('extras_gallery'), function() {
|
||||
showSubmitButtons('extras', true);
|
||||
});
|
||||
|
||||
var res = create_submit_args(arguments);
|
||||
|
||||
res[0] = id;
|
||||
|
||||
console.log(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
function restoreProgressTxt2img() {
|
||||
showRestoreProgressButton("txt2img", false);
|
||||
var id = localGet("txt2img_task_id");
|
||||
var id = localStorage.getItem("txt2img_task_id");
|
||||
|
||||
id = localStorage.getItem("txt2img_task_id");
|
||||
|
||||
if (id) {
|
||||
showSubmitInterruptingPlaceholder('txt2img');
|
||||
requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() {
|
||||
showSubmitButtons('txt2img', true);
|
||||
}, null, 0);
|
||||
@ -228,10 +205,9 @@ function restoreProgressTxt2img() {
|
||||
function restoreProgressImg2img() {
|
||||
showRestoreProgressButton("img2img", false);
|
||||
|
||||
var id = localGet("img2img_task_id");
|
||||
var id = localStorage.getItem("img2img_task_id");
|
||||
|
||||
if (id) {
|
||||
showSubmitInterruptingPlaceholder('img2img');
|
||||
requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() {
|
||||
showSubmitButtons('img2img', true);
|
||||
}, null, 0);
|
||||
@ -241,33 +217,9 @@ function restoreProgressImg2img() {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Configure the width and height elements on `tabname` to accept
|
||||
* pasting of resolutions in the form of "width x height".
|
||||
*/
|
||||
function setupResolutionPasting(tabname) {
|
||||
var width = gradioApp().querySelector(`#${tabname}_width input[type=number]`);
|
||||
var height = gradioApp().querySelector(`#${tabname}_height input[type=number]`);
|
||||
for (const el of [width, height]) {
|
||||
el.addEventListener('paste', function(event) {
|
||||
var pasteData = event.clipboardData.getData('text/plain');
|
||||
var parsed = pasteData.match(/^\s*(\d+)\D+(\d+)\s*$/);
|
||||
if (parsed) {
|
||||
width.value = parsed[1];
|
||||
height.value = parsed[2];
|
||||
updateInput(width);
|
||||
updateInput(height);
|
||||
event.preventDefault();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
onUiLoaded(function() {
|
||||
showRestoreProgressButton('txt2img', localGet("txt2img_task_id"));
|
||||
showRestoreProgressButton('img2img', localGet("img2img_task_id"));
|
||||
setupResolutionPasting('txt2img');
|
||||
setupResolutionPasting('img2img');
|
||||
showRestoreProgressButton('txt2img', localStorage.getItem("txt2img_task_id"));
|
||||
showRestoreProgressButton('img2img', localStorage.getItem("img2img_task_id"));
|
||||
});
|
||||
|
||||
|
||||
@ -296,8 +248,29 @@ function confirm_clear_prompt(prompt, negative_prompt) {
|
||||
}
|
||||
|
||||
|
||||
var promptTokecountUpdateFuncs = {};
|
||||
|
||||
function recalculatePromptTokens(name) {
|
||||
if (promptTokecountUpdateFuncs[name]) {
|
||||
promptTokecountUpdateFuncs[name]();
|
||||
}
|
||||
}
|
||||
|
||||
function recalculate_prompts_txt2img() {
|
||||
recalculatePromptTokens('txt2img_prompt');
|
||||
recalculatePromptTokens('txt2img_neg_prompt');
|
||||
return Array.from(arguments);
|
||||
}
|
||||
|
||||
function recalculate_prompts_img2img() {
|
||||
recalculatePromptTokens('img2img_prompt');
|
||||
recalculatePromptTokens('img2img_neg_prompt');
|
||||
return Array.from(arguments);
|
||||
}
|
||||
|
||||
|
||||
var opts = {};
|
||||
onAfterUiUpdate(function() {
|
||||
onUiUpdate(function() {
|
||||
if (Object.keys(opts).length != 0) return;
|
||||
|
||||
var json_elem = gradioApp().getElementById('settings_json');
|
||||
@ -307,7 +280,6 @@ onAfterUiUpdate(function() {
|
||||
var jsdata = textarea.value;
|
||||
opts = JSON.parse(jsdata);
|
||||
|
||||
executeCallbacks(optionsAvailableCallbacks); /*global optionsAvailableCallbacks*/
|
||||
executeCallbacks(optionsChangedCallbacks); /*global optionsChangedCallbacks*/
|
||||
|
||||
Object.defineProperty(textarea, 'value', {
|
||||
@ -329,6 +301,44 @@ onAfterUiUpdate(function() {
|
||||
});
|
||||
|
||||
json_elem.parentElement.style.display = "none";
|
||||
|
||||
function registerTextarea(id, id_counter, id_button) {
|
||||
var prompt = gradioApp().getElementById(id);
|
||||
var counter = gradioApp().getElementById(id_counter);
|
||||
var textarea = gradioApp().querySelector("#" + id + " > label > textarea");
|
||||
|
||||
if (counter.parentElement == prompt.parentElement) {
|
||||
return;
|
||||
}
|
||||
|
||||
prompt.parentElement.insertBefore(counter, prompt);
|
||||
prompt.parentElement.style.position = "relative";
|
||||
|
||||
promptTokecountUpdateFuncs[id] = function() {
|
||||
update_token_counter(id_button);
|
||||
};
|
||||
textarea.addEventListener("input", promptTokecountUpdateFuncs[id]);
|
||||
}
|
||||
|
||||
registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button');
|
||||
registerTextarea('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button');
|
||||
registerTextarea('img2img_prompt', 'img2img_token_counter', 'img2img_token_button');
|
||||
registerTextarea('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button');
|
||||
|
||||
var show_all_pages = gradioApp().getElementById('settings_show_all_pages');
|
||||
var settings_tabs = gradioApp().querySelector('#settings div');
|
||||
if (show_all_pages && settings_tabs) {
|
||||
settings_tabs.appendChild(show_all_pages);
|
||||
show_all_pages.onclick = function() {
|
||||
gradioApp().querySelectorAll('#settings > div').forEach(function(elem) {
|
||||
if (elem.id == "settings_tab_licenses") {
|
||||
return;
|
||||
}
|
||||
|
||||
elem.style.display = "block";
|
||||
});
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
onOptionsChanged(function() {
|
||||
@ -344,10 +354,37 @@ onOptionsChanged(function() {
|
||||
});
|
||||
|
||||
let txt2img_textarea, img2img_textarea = undefined;
|
||||
let wait_time = 800;
|
||||
let token_timeouts = {};
|
||||
|
||||
function update_txt2img_tokens(...args) {
|
||||
update_token_counter("txt2img_token_button");
|
||||
if (args.length == 2) {
|
||||
return args[0];
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
||||
function update_img2img_tokens(...args) {
|
||||
update_token_counter(
|
||||
"img2img_token_button"
|
||||
);
|
||||
if (args.length == 2) {
|
||||
return args[0];
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
||||
function update_token_counter(button_id) {
|
||||
if (token_timeouts[button_id]) {
|
||||
clearTimeout(token_timeouts[button_id]);
|
||||
}
|
||||
token_timeouts[button_id] = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
|
||||
}
|
||||
|
||||
function restart_reload() {
|
||||
document.body.style.backgroundColor = "var(--background-fill-primary)";
|
||||
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
|
||||
|
||||
var requestPing = function() {
|
||||
requestGet("./internal/ping", {}, function(data) {
|
||||
location.reload();
|
||||
@ -417,20 +454,3 @@ function switchWidthHeight(tabname) {
|
||||
updateInput(height);
|
||||
return [];
|
||||
}
|
||||
|
||||
|
||||
var onEditTimers = {};
|
||||
|
||||
// calls func after afterMs milliseconds has passed since the input elem has been edited by user
|
||||
function onEdit(editId, elem, afterMs, func) {
|
||||
var edited = function() {
|
||||
var existingTimer = onEditTimers[editId];
|
||||
if (existingTimer) clearTimeout(existingTimer);
|
||||
|
||||
onEditTimers[editId] = setTimeout(func, afterMs);
|
||||
};
|
||||
|
||||
elem.addEventListener("input", edited);
|
||||
|
||||
return edited;
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ onOptionsChanged(function() {
|
||||
function settingsHintsShowQuicksettings() {
|
||||
requestGet("./internal/quicksettings-hint", {}, function(data) {
|
||||
var table = document.createElement('table');
|
||||
table.className = 'popup-table';
|
||||
table.className = 'settings-value-table';
|
||||
|
||||
data.forEach(function(obj) {
|
||||
var tr = document.createElement('tr');
|
||||
|
16
launch.py
16
launch.py
@ -1,5 +1,6 @@
|
||||
from modules import launch_utils
|
||||
|
||||
|
||||
args = launch_utils.args
|
||||
python = launch_utils.python
|
||||
git = launch_utils.git
|
||||
@ -17,7 +18,6 @@ run_pip = launch_utils.run_pip
|
||||
check_run_python = launch_utils.check_run_python
|
||||
git_clone = launch_utils.git_clone
|
||||
git_pull_recursive = launch_utils.git_pull_recursive
|
||||
list_extensions = launch_utils.list_extensions
|
||||
run_extension_installer = launch_utils.run_extension_installer
|
||||
prepare_environment = launch_utils.prepare_environment
|
||||
configure_for_tests = launch_utils.configure_for_tests
|
||||
@ -25,18 +25,8 @@ start = launch_utils.start
|
||||
|
||||
|
||||
def main():
|
||||
if args.dump_sysinfo:
|
||||
filename = launch_utils.dump_sysinfo()
|
||||
|
||||
print(f"Sysinfo saved as {filename}. Exiting...")
|
||||
|
||||
exit(0)
|
||||
|
||||
launch_utils.startup_timer.record("initial startup")
|
||||
|
||||
with launch_utils.startup_timer.subcategory("prepare environment"):
|
||||
if not args.skip_prepare_environment:
|
||||
prepare_environment()
|
||||
if not args.skip_prepare_environment:
|
||||
prepare_environment()
|
||||
|
||||
if args.test_server:
|
||||
configure_for_tests()
|
||||
|
@ -1,11 +1,8 @@
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
import uvicorn
|
||||
import ipaddress
|
||||
import requests
|
||||
import gradio as gr
|
||||
from threading import Lock
|
||||
from io import BytesIO
|
||||
@ -17,21 +14,29 @@ from fastapi.encoders import jsonable_encoder
|
||||
from secrets import compare_digest
|
||||
|
||||
import modules.shared as shared
|
||||
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items, script_callbacks, infotext_utils, sd_models, sd_schedulers
|
||||
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
|
||||
from modules.api import models
|
||||
from modules.shared import opts
|
||||
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
|
||||
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
|
||||
from modules.textual_inversion.preprocess import preprocess
|
||||
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
|
||||
from PIL import PngImagePlugin
|
||||
from PIL import PngImagePlugin,Image
|
||||
from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights
|
||||
from modules.sd_models_config import find_checkpoint_config_near_filename
|
||||
from modules.realesrgan_model import get_realesrgan_models
|
||||
from modules import devices
|
||||
from typing import Any
|
||||
from typing import Dict, List, Any
|
||||
import piexif
|
||||
import piexif.helper
|
||||
from contextlib import closing
|
||||
from modules.progress import create_task_id, add_task_to_queue, start_task, finish_task, current_task
|
||||
|
||||
|
||||
def upscaler_to_index(name: str):
|
||||
try:
|
||||
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") from e
|
||||
|
||||
|
||||
def script_name_to_index(name, scripts):
|
||||
try:
|
||||
@ -43,7 +48,7 @@ def script_name_to_index(name, scripts):
|
||||
def validate_sampler_name(name):
|
||||
config = sd_samplers.all_samplers_map.get(name, None)
|
||||
if config is None:
|
||||
raise HTTPException(status_code=400, detail="Sampler not found")
|
||||
raise HTTPException(status_code=404, detail="Sampler not found")
|
||||
|
||||
return name
|
||||
|
||||
@ -55,45 +60,11 @@ def setUpscalers(req: dict):
|
||||
return reqDict
|
||||
|
||||
|
||||
def verify_url(url):
|
||||
"""Returns True if the url refers to a global resource."""
|
||||
|
||||
import socket
|
||||
from urllib.parse import urlparse
|
||||
try:
|
||||
parsed_url = urlparse(url)
|
||||
domain_name = parsed_url.netloc
|
||||
host = socket.gethostbyname_ex(domain_name)
|
||||
for ip in host[2]:
|
||||
ip_addr = ipaddress.ip_address(ip)
|
||||
if not ip_addr.is_global:
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def decode_base64_to_image(encoding):
|
||||
if encoding.startswith("http://") or encoding.startswith("https://"):
|
||||
if not opts.api_enable_requests:
|
||||
raise HTTPException(status_code=500, detail="Requests not allowed")
|
||||
|
||||
if opts.api_forbid_local_requests and not verify_url(encoding):
|
||||
raise HTTPException(status_code=500, detail="Request to local resource not allowed")
|
||||
|
||||
headers = {'user-agent': opts.api_useragent} if opts.api_useragent else {}
|
||||
response = requests.get(encoding, timeout=30, headers=headers)
|
||||
try:
|
||||
image = images.read(BytesIO(response.content))
|
||||
return image
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail="Invalid image url") from e
|
||||
|
||||
if encoding.startswith("data:image/"):
|
||||
encoding = encoding.split(";")[1].split(",")[1]
|
||||
try:
|
||||
image = images.read(BytesIO(base64.b64decode(encoding)))
|
||||
image = Image.open(BytesIO(base64.b64decode(encoding)))
|
||||
return image
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail="Invalid encoded image") from e
|
||||
@ -101,8 +72,7 @@ def decode_base64_to_image(encoding):
|
||||
|
||||
def encode_pil_to_base64(image):
|
||||
with io.BytesIO() as output_bytes:
|
||||
if isinstance(image, str):
|
||||
return image
|
||||
|
||||
if opts.samples_format.lower() == 'png':
|
||||
use_metadata = False
|
||||
metadata = PngImagePlugin.PngInfo()
|
||||
@ -113,8 +83,6 @@ def encode_pil_to_base64(image):
|
||||
image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality)
|
||||
|
||||
elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"):
|
||||
if image.mode in ("RGBA", "P"):
|
||||
image = image.convert("RGB")
|
||||
parameters = image.info.get('parameters', None)
|
||||
exif_bytes = piexif.dump({
|
||||
"Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") }
|
||||
@ -133,16 +101,15 @@ def encode_pil_to_base64(image):
|
||||
|
||||
|
||||
def api_middleware(app: FastAPI):
|
||||
rich_available = False
|
||||
rich_available = True
|
||||
try:
|
||||
if os.environ.get('WEBUI_RICH_EXCEPTIONS', None) is not None:
|
||||
import anyio # importing just so it can be placed on silent list
|
||||
import starlette # importing just so it can be placed on silent list
|
||||
from rich.console import Console
|
||||
console = Console()
|
||||
rich_available = True
|
||||
import anyio # importing just so it can be placed on silent list
|
||||
import starlette # importing just so it can be placed on silent list
|
||||
from rich.console import Console
|
||||
console = Console()
|
||||
except Exception:
|
||||
pass
|
||||
import traceback
|
||||
rich_available = False
|
||||
|
||||
@app.middleware("http")
|
||||
async def log_and_time(req: Request, call_next):
|
||||
@ -153,14 +120,14 @@ def api_middleware(app: FastAPI):
|
||||
endpoint = req.scope.get('path', 'err')
|
||||
if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'):
|
||||
print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format(
|
||||
t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
|
||||
code=res.status_code,
|
||||
ver=req.scope.get('http_version', '0.0'),
|
||||
cli=req.scope.get('client', ('0:0.0.0', 0))[0],
|
||||
prot=req.scope.get('scheme', 'err'),
|
||||
method=req.scope.get('method', 'err'),
|
||||
endpoint=endpoint,
|
||||
duration=duration,
|
||||
t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
|
||||
code = res.status_code,
|
||||
ver = req.scope.get('http_version', '0.0'),
|
||||
cli = req.scope.get('client', ('0:0.0.0', 0))[0],
|
||||
prot = req.scope.get('scheme', 'err'),
|
||||
method = req.scope.get('method', 'err'),
|
||||
endpoint = endpoint,
|
||||
duration = duration,
|
||||
))
|
||||
return res
|
||||
|
||||
@ -171,13 +138,12 @@ def api_middleware(app: FastAPI):
|
||||
"body": vars(e).get('body', ''),
|
||||
"errors": str(e),
|
||||
}
|
||||
if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
|
||||
message = f"API error: {request.method}: {request.url} {err}"
|
||||
if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
|
||||
print(f"API error: {request.method}: {request.url} {err}")
|
||||
if rich_available:
|
||||
print(message)
|
||||
console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
|
||||
else:
|
||||
errors.report(message, exc_info=True)
|
||||
traceback.print_exc()
|
||||
return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
|
||||
|
||||
@app.middleware("http")
|
||||
@ -220,57 +186,29 @@ class Api:
|
||||
self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel)
|
||||
self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
|
||||
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem])
|
||||
self.add_api_route("/sdapi/v1/schedulers", self.get_schedulers, methods=["GET"], response_model=list[models.SchedulerItem])
|
||||
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem])
|
||||
self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem])
|
||||
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem])
|
||||
self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=list[models.SDVaeItem])
|
||||
self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=list[models.HypernetworkItem])
|
||||
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=list[models.FaceRestorerItem])
|
||||
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=list[models.RealesrganItem])
|
||||
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=list[models.PromptStyleItem])
|
||||
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
|
||||
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
|
||||
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
|
||||
self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
|
||||
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
|
||||
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
|
||||
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem])
|
||||
self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
|
||||
self.add_api_route("/sdapi/v1/refresh-embeddings", self.refresh_embeddings, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
|
||||
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
|
||||
self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
|
||||
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
|
||||
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
|
||||
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
|
||||
self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList)
|
||||
self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=list[models.ScriptInfo])
|
||||
self.add_api_route("/sdapi/v1/extensions", self.get_extensions_list, methods=["GET"], response_model=list[models.ExtensionItem])
|
||||
|
||||
if shared.cmd_opts.api_server_stop:
|
||||
self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/server-restart", self.restart_webui, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/server-stop", self.stop_webui, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo])
|
||||
|
||||
self.default_script_arg_txt2img = []
|
||||
self.default_script_arg_img2img = []
|
||||
|
||||
txt2img_script_runner = scripts.scripts_txt2img
|
||||
img2img_script_runner = scripts.scripts_img2img
|
||||
|
||||
if not txt2img_script_runner.scripts or not img2img_script_runner.scripts:
|
||||
ui.create_ui()
|
||||
|
||||
if not txt2img_script_runner.scripts:
|
||||
txt2img_script_runner.initialize_scripts(False)
|
||||
if not self.default_script_arg_txt2img:
|
||||
self.default_script_arg_txt2img = self.init_default_script_args(txt2img_script_runner)
|
||||
|
||||
if not img2img_script_runner.scripts:
|
||||
img2img_script_runner.initialize_scripts(True)
|
||||
if not self.default_script_arg_img2img:
|
||||
self.default_script_arg_img2img = self.init_default_script_args(img2img_script_runner)
|
||||
|
||||
|
||||
|
||||
def add_api_route(self, path: str, endpoint, **kwargs):
|
||||
if shared.cmd_opts.api_auth:
|
||||
return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs)
|
||||
@ -332,20 +270,15 @@ class Api:
|
||||
script_args[script.args_from:script.args_to] = ui_default_values
|
||||
return script_args
|
||||
|
||||
def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner, *, input_script_args=None):
|
||||
def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner):
|
||||
script_args = default_script_args.copy()
|
||||
|
||||
if input_script_args is not None:
|
||||
for index, value in input_script_args.items():
|
||||
script_args[index] = value
|
||||
|
||||
# position 0 in script_arg is the idx+1 of the selectable script that is going to be run when using scripts.scripts_*2img.run()
|
||||
if selectable_scripts:
|
||||
script_args[selectable_scripts.args_from:selectable_scripts.args_to] = request.script_args
|
||||
script_args[0] = selectable_idx + 1
|
||||
|
||||
# Now check for always on scripts
|
||||
if request.alwayson_scripts:
|
||||
if request.alwayson_scripts and (len(request.alwayson_scripts) > 0):
|
||||
for alwayson_script_name in request.alwayson_scripts.keys():
|
||||
alwayson_script = self.get_script(alwayson_script_name, script_runner)
|
||||
if alwayson_script is None:
|
||||
@ -360,138 +293,53 @@ class Api:
|
||||
script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx]
|
||||
return script_args
|
||||
|
||||
def apply_infotext(self, request, tabname, *, script_runner=None, mentioned_script_args=None):
|
||||
"""Processes `infotext` field from the `request`, and sets other fields of the `request` according to what's in infotext.
|
||||
|
||||
If request already has a field set, and that field is encountered in infotext too, the value from infotext is ignored.
|
||||
|
||||
Additionally, fills `mentioned_script_args` dict with index: value pairs for script arguments read from infotext.
|
||||
"""
|
||||
|
||||
if not request.infotext:
|
||||
return {}
|
||||
|
||||
possible_fields = infotext_utils.paste_fields[tabname]["fields"]
|
||||
set_fields = request.model_dump(exclude_unset=True) if hasattr(request, "request") else request.dict(exclude_unset=True) # pydantic v1/v2 have different names for this
|
||||
params = infotext_utils.parse_generation_parameters(request.infotext)
|
||||
|
||||
def get_field_value(field, params):
|
||||
value = field.function(params) if field.function else params.get(field.label)
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if field.api in request.__fields__:
|
||||
target_type = request.__fields__[field.api].type_
|
||||
else:
|
||||
target_type = type(field.component.value)
|
||||
|
||||
if target_type == type(None):
|
||||
return None
|
||||
|
||||
if isinstance(value, dict) and value.get('__type__') == 'generic_update': # this is a gradio.update rather than a value
|
||||
value = value.get('value')
|
||||
|
||||
if value is not None and not isinstance(value, target_type):
|
||||
value = target_type(value)
|
||||
|
||||
return value
|
||||
|
||||
for field in possible_fields:
|
||||
if not field.api:
|
||||
continue
|
||||
|
||||
if field.api in set_fields:
|
||||
continue
|
||||
|
||||
value = get_field_value(field, params)
|
||||
if value is not None:
|
||||
setattr(request, field.api, value)
|
||||
|
||||
if request.override_settings is None:
|
||||
request.override_settings = {}
|
||||
|
||||
overridden_settings = infotext_utils.get_override_settings(params)
|
||||
for _, setting_name, value in overridden_settings:
|
||||
if setting_name not in request.override_settings:
|
||||
request.override_settings[setting_name] = value
|
||||
|
||||
if script_runner is not None and mentioned_script_args is not None:
|
||||
indexes = {v: i for i, v in enumerate(script_runner.inputs)}
|
||||
script_fields = ((field, indexes[field.component]) for field in possible_fields if field.component in indexes)
|
||||
|
||||
for field, index in script_fields:
|
||||
value = get_field_value(field, params)
|
||||
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
mentioned_script_args[index] = value
|
||||
|
||||
return params
|
||||
|
||||
def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
|
||||
task_id = txt2imgreq.force_task_id or create_task_id("txt2img")
|
||||
|
||||
script_runner = scripts.scripts_txt2img
|
||||
|
||||
infotext_script_args = {}
|
||||
self.apply_infotext(txt2imgreq, "txt2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
|
||||
|
||||
if not script_runner.scripts:
|
||||
script_runner.initialize_scripts(False)
|
||||
ui.create_ui()
|
||||
if not self.default_script_arg_txt2img:
|
||||
self.default_script_arg_txt2img = self.init_default_script_args(script_runner)
|
||||
selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
|
||||
sampler, scheduler = sd_samplers.get_sampler_and_scheduler(txt2imgreq.sampler_name or txt2imgreq.sampler_index, txt2imgreq.scheduler)
|
||||
|
||||
populate = txt2imgreq.copy(update={ # Override __init__ params
|
||||
"sampler_name": validate_sampler_name(sampler),
|
||||
"sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
|
||||
"do_not_save_samples": not txt2imgreq.save_images,
|
||||
"do_not_save_grid": not txt2imgreq.save_images,
|
||||
})
|
||||
if populate.sampler_name:
|
||||
populate.sampler_index = None # prevent a warning later on
|
||||
|
||||
if not populate.scheduler and scheduler != "Automatic":
|
||||
populate.scheduler = scheduler
|
||||
|
||||
args = vars(populate)
|
||||
args.pop('script_name', None)
|
||||
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
|
||||
args.pop('alwayson_scripts', None)
|
||||
args.pop('infotext', None)
|
||||
|
||||
script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner, input_script_args=infotext_script_args)
|
||||
script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner)
|
||||
|
||||
send_images = args.pop('send_images', True)
|
||||
args.pop('save_images', None)
|
||||
|
||||
add_task_to_queue(task_id)
|
||||
|
||||
with self.queue_lock:
|
||||
with closing(StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)) as p:
|
||||
p.is_api = True
|
||||
p.scripts = script_runner
|
||||
p.outpath_grids = opts.outdir_txt2img_grids
|
||||
p.outpath_samples = opts.outdir_txt2img_samples
|
||||
p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)
|
||||
p.scripts = script_runner
|
||||
p.outpath_grids = opts.outdir_txt2img_grids
|
||||
p.outpath_samples = opts.outdir_txt2img_samples
|
||||
|
||||
try:
|
||||
shared.state.begin(job="scripts_txt2img")
|
||||
start_task(task_id)
|
||||
if selectable_scripts is not None:
|
||||
p.script_args = script_args
|
||||
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
|
||||
else:
|
||||
p.script_args = tuple(script_args) # Need to pass args as tuple here
|
||||
processed = process_images(p)
|
||||
finish_task(task_id)
|
||||
finally:
|
||||
shared.state.end()
|
||||
shared.total_tqdm.clear()
|
||||
shared.state.begin()
|
||||
if selectable_scripts is not None:
|
||||
p.script_args = script_args
|
||||
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
|
||||
else:
|
||||
p.script_args = tuple(script_args) # Need to pass args as tuple here
|
||||
processed = process_images(p)
|
||||
shared.state.end()
|
||||
|
||||
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
|
||||
|
||||
return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
|
||||
|
||||
def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
|
||||
task_id = img2imgreq.force_task_id or create_task_id("img2img")
|
||||
|
||||
init_images = img2imgreq.init_images
|
||||
if init_images is None:
|
||||
raise HTTPException(status_code=404, detail="Init image not found")
|
||||
@ -501,15 +349,15 @@ class Api:
|
||||
mask = decode_base64_to_image(mask)
|
||||
|
||||
script_runner = scripts.scripts_img2img
|
||||
|
||||
infotext_script_args = {}
|
||||
self.apply_infotext(img2imgreq, "img2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
|
||||
|
||||
if not script_runner.scripts:
|
||||
script_runner.initialize_scripts(True)
|
||||
ui.create_ui()
|
||||
if not self.default_script_arg_img2img:
|
||||
self.default_script_arg_img2img = self.init_default_script_args(script_runner)
|
||||
selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
|
||||
sampler, scheduler = sd_samplers.get_sampler_and_scheduler(img2imgreq.sampler_name or img2imgreq.sampler_index, img2imgreq.scheduler)
|
||||
|
||||
populate = img2imgreq.copy(update={ # Override __init__ params
|
||||
"sampler_name": validate_sampler_name(sampler),
|
||||
"sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
|
||||
"do_not_save_samples": not img2imgreq.save_images,
|
||||
"do_not_save_grid": not img2imgreq.save_images,
|
||||
"mask": mask,
|
||||
@ -517,44 +365,32 @@ class Api:
|
||||
if populate.sampler_name:
|
||||
populate.sampler_index = None # prevent a warning later on
|
||||
|
||||
if not populate.scheduler and scheduler != "Automatic":
|
||||
populate.scheduler = scheduler
|
||||
|
||||
args = vars(populate)
|
||||
args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
|
||||
args.pop('script_name', None)
|
||||
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
|
||||
args.pop('alwayson_scripts', None)
|
||||
args.pop('infotext', None)
|
||||
|
||||
script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner, input_script_args=infotext_script_args)
|
||||
script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner)
|
||||
|
||||
send_images = args.pop('send_images', True)
|
||||
args.pop('save_images', None)
|
||||
|
||||
add_task_to_queue(task_id)
|
||||
|
||||
with self.queue_lock:
|
||||
with closing(StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)) as p:
|
||||
p.init_images = [decode_base64_to_image(x) for x in init_images]
|
||||
p.is_api = True
|
||||
p.scripts = script_runner
|
||||
p.outpath_grids = opts.outdir_img2img_grids
|
||||
p.outpath_samples = opts.outdir_img2img_samples
|
||||
p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)
|
||||
p.init_images = [decode_base64_to_image(x) for x in init_images]
|
||||
p.scripts = script_runner
|
||||
p.outpath_grids = opts.outdir_img2img_grids
|
||||
p.outpath_samples = opts.outdir_img2img_samples
|
||||
|
||||
try:
|
||||
shared.state.begin(job="scripts_img2img")
|
||||
start_task(task_id)
|
||||
if selectable_scripts is not None:
|
||||
p.script_args = script_args
|
||||
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
|
||||
else:
|
||||
p.script_args = tuple(script_args) # Need to pass args as tuple here
|
||||
processed = process_images(p)
|
||||
finish_task(task_id)
|
||||
finally:
|
||||
shared.state.end()
|
||||
shared.total_tqdm.clear()
|
||||
shared.state.begin()
|
||||
if selectable_scripts is not None:
|
||||
p.script_args = script_args
|
||||
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
|
||||
else:
|
||||
p.script_args = tuple(script_args) # Need to pass args as tuple here
|
||||
processed = process_images(p)
|
||||
shared.state.end()
|
||||
|
||||
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
|
||||
|
||||
@ -586,6 +422,9 @@ class Api:
|
||||
return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
|
||||
|
||||
def pnginfoapi(self, req: models.PNGInfoRequest):
|
||||
if(not req.image.strip()):
|
||||
return models.PNGInfoResponse(info="")
|
||||
|
||||
image = decode_base64_to_image(req.image.strip())
|
||||
if image is None:
|
||||
return models.PNGInfoResponse(info="")
|
||||
@ -594,10 +433,9 @@ class Api:
|
||||
if geninfo is None:
|
||||
geninfo = ""
|
||||
|
||||
params = infotext_utils.parse_generation_parameters(geninfo)
|
||||
script_callbacks.infotext_pasted_callback(geninfo, params)
|
||||
items = {**{'parameters': geninfo}, **items}
|
||||
|
||||
return models.PNGInfoResponse(info=geninfo, items=items, parameters=params)
|
||||
return models.PNGInfoResponse(info=geninfo, items=items)
|
||||
|
||||
def progressapi(self, req: models.ProgressRequest = Depends()):
|
||||
# copy from check_progress_call of ui.py
|
||||
@ -625,7 +463,7 @@ class Api:
|
||||
if shared.state.current_image and not req.skip_current_image:
|
||||
current_image = encode_pil_to_base64(shared.state.current_image)
|
||||
|
||||
return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo, current_task=current_task)
|
||||
return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
|
||||
|
||||
def interrogateapi(self, interrogatereq: models.InterrogateRequest):
|
||||
image_b64 = interrogatereq.image
|
||||
@ -652,12 +490,12 @@ class Api:
|
||||
return {}
|
||||
|
||||
def unloadapi(self):
|
||||
sd_models.unload_model_weights()
|
||||
unload_model_weights()
|
||||
|
||||
return {}
|
||||
|
||||
def reloadapi(self):
|
||||
sd_models.send_model_to_device(shared.sd_model)
|
||||
reload_model_weights()
|
||||
|
||||
return {}
|
||||
|
||||
@ -675,13 +513,9 @@ class Api:
|
||||
|
||||
return options
|
||||
|
||||
def set_config(self, req: dict[str, Any]):
|
||||
checkpoint_name = req.get("sd_model_checkpoint", None)
|
||||
if checkpoint_name is not None and checkpoint_name not in sd_models.checkpoint_aliases:
|
||||
raise RuntimeError(f"model {checkpoint_name!r} not found")
|
||||
|
||||
def set_config(self, req: Dict[str, Any]):
|
||||
for k, v in req.items():
|
||||
shared.opts.set(k, v, is_api=True)
|
||||
shared.opts.set(k, v)
|
||||
|
||||
shared.opts.save(shared.config_filename)
|
||||
return
|
||||
@ -692,17 +526,6 @@ class Api:
|
||||
def get_samplers(self):
|
||||
return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers]
|
||||
|
||||
def get_schedulers(self):
|
||||
return [
|
||||
{
|
||||
"name": scheduler.name,
|
||||
"label": scheduler.label,
|
||||
"aliases": scheduler.aliases,
|
||||
"default_rho": scheduler.default_rho,
|
||||
"need_inner_model": scheduler.need_inner_model,
|
||||
}
|
||||
for scheduler in sd_schedulers.schedulers]
|
||||
|
||||
def get_upscalers(self):
|
||||
return [
|
||||
{
|
||||
@ -715,21 +538,8 @@ class Api:
|
||||
for upscaler in shared.sd_upscalers
|
||||
]
|
||||
|
||||
def get_latent_upscale_modes(self):
|
||||
return [
|
||||
{
|
||||
"name": upscale_mode,
|
||||
}
|
||||
for upscale_mode in [*(shared.latent_upscale_modes or {})]
|
||||
]
|
||||
|
||||
def get_sd_models(self):
|
||||
import modules.sd_models as sd_models
|
||||
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in sd_models.checkpoints_list.values()]
|
||||
|
||||
def get_sd_vaes(self):
|
||||
import modules.sd_vae as sd_vae
|
||||
return [{"model_name": x, "filename": sd_vae.vae_dict[x]} for x in sd_vae.vae_dict.keys()]
|
||||
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
|
||||
|
||||
def get_hypernetworks(self):
|
||||
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
|
||||
@ -768,43 +578,49 @@ class Api:
|
||||
"skipped": convert_embeddings(db.skipped_embeddings),
|
||||
}
|
||||
|
||||
def refresh_embeddings(self):
|
||||
with self.queue_lock:
|
||||
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
|
||||
|
||||
def refresh_checkpoints(self):
|
||||
with self.queue_lock:
|
||||
shared.refresh_checkpoints()
|
||||
|
||||
def refresh_vae(self):
|
||||
with self.queue_lock:
|
||||
shared_items.refresh_vae_list()
|
||||
shared.refresh_checkpoints()
|
||||
|
||||
def create_embedding(self, args: dict):
|
||||
try:
|
||||
shared.state.begin(job="create_embedding")
|
||||
shared.state.begin()
|
||||
filename = create_embedding(**args) # create empty embedding
|
||||
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
|
||||
shared.state.end()
|
||||
return models.CreateResponse(info=f"create embedding filename: {filename}")
|
||||
except AssertionError as e:
|
||||
return models.TrainResponse(info=f"create embedding error: {e}")
|
||||
finally:
|
||||
shared.state.end()
|
||||
|
||||
return models.TrainResponse(info=f"create embedding error: {e}")
|
||||
|
||||
def create_hypernetwork(self, args: dict):
|
||||
try:
|
||||
shared.state.begin(job="create_hypernetwork")
|
||||
shared.state.begin()
|
||||
filename = create_hypernetwork(**args) # create empty embedding
|
||||
shared.state.end()
|
||||
return models.CreateResponse(info=f"create hypernetwork filename: {filename}")
|
||||
except AssertionError as e:
|
||||
return models.TrainResponse(info=f"create hypernetwork error: {e}")
|
||||
finally:
|
||||
shared.state.end()
|
||||
return models.TrainResponse(info=f"create hypernetwork error: {e}")
|
||||
|
||||
def preprocess(self, args: dict):
|
||||
try:
|
||||
shared.state.begin()
|
||||
preprocess(**args) # quick operation unless blip/booru interrogation is enabled
|
||||
shared.state.end()
|
||||
return models.PreprocessResponse(info = 'preprocess complete')
|
||||
except KeyError as e:
|
||||
shared.state.end()
|
||||
return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
|
||||
except AssertionError as e:
|
||||
shared.state.end()
|
||||
return models.PreprocessResponse(info=f"preprocess error: {e}")
|
||||
except FileNotFoundError as e:
|
||||
shared.state.end()
|
||||
return models.PreprocessResponse(info=f'preprocess error: {e}')
|
||||
|
||||
def train_embedding(self, args: dict):
|
||||
try:
|
||||
shared.state.begin(job="train_embedding")
|
||||
shared.state.begin()
|
||||
apply_optimizations = shared.opts.training_xattention_optimizations
|
||||
error = None
|
||||
filename = ''
|
||||
@ -817,15 +633,15 @@ class Api:
|
||||
finally:
|
||||
if not apply_optimizations:
|
||||
sd_hijack.apply_optimizations()
|
||||
shared.state.end()
|
||||
return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
|
||||
except Exception as msg:
|
||||
return models.TrainResponse(info=f"train embedding error: {msg}")
|
||||
finally:
|
||||
except AssertionError as msg:
|
||||
shared.state.end()
|
||||
return models.TrainResponse(info=f"train embedding error: {msg}")
|
||||
|
||||
def train_hypernetwork(self, args: dict):
|
||||
try:
|
||||
shared.state.begin(job="train_hypernetwork")
|
||||
shared.state.begin()
|
||||
shared.loaded_hypernetworks = []
|
||||
apply_optimizations = shared.opts.training_xattention_optimizations
|
||||
error = None
|
||||
@ -843,10 +659,9 @@ class Api:
|
||||
sd_hijack.apply_optimizations()
|
||||
shared.state.end()
|
||||
return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
|
||||
except Exception as exc:
|
||||
return models.TrainResponse(info=f"train embedding error: {exc}")
|
||||
finally:
|
||||
except AssertionError:
|
||||
shared.state.end()
|
||||
return models.TrainResponse(info=f"train embedding error: {error}")
|
||||
|
||||
def get_memory(self):
|
||||
try:
|
||||
@ -883,46 +698,6 @@ class Api:
|
||||
cuda = {'error': f'{err}'}
|
||||
return models.MemoryResponse(ram=ram, cuda=cuda)
|
||||
|
||||
def get_extensions_list(self):
|
||||
from modules import extensions
|
||||
extensions.list_extensions()
|
||||
ext_list = []
|
||||
for ext in extensions.extensions:
|
||||
ext: extensions.Extension
|
||||
ext.read_info_from_repo()
|
||||
if ext.remote is not None:
|
||||
ext_list.append({
|
||||
"name": ext.name,
|
||||
"remote": ext.remote,
|
||||
"branch": ext.branch,
|
||||
"commit_hash":ext.commit_hash,
|
||||
"commit_date":ext.commit_date,
|
||||
"version":ext.version,
|
||||
"enabled":ext.enabled
|
||||
})
|
||||
return ext_list
|
||||
|
||||
def launch(self, server_name, port, root_path):
|
||||
def launch(self, server_name, port):
|
||||
self.app.include_router(self.router)
|
||||
uvicorn.run(
|
||||
self.app,
|
||||
host=server_name,
|
||||
port=port,
|
||||
timeout_keep_alive=shared.cmd_opts.timeout_keep_alive,
|
||||
root_path=root_path,
|
||||
ssl_keyfile=shared.cmd_opts.tls_keyfile,
|
||||
ssl_certfile=shared.cmd_opts.tls_certfile
|
||||
)
|
||||
|
||||
def kill_webui(self):
|
||||
restart.stop_program()
|
||||
|
||||
def restart_webui(self):
|
||||
if restart.is_restartable():
|
||||
restart.restart_program()
|
||||
return Response(status_code=501)
|
||||
|
||||
def stop_webui(request):
|
||||
shared.state.server_command = "stop"
|
||||
return Response("Stopping.")
|
||||
|
||||
uvicorn.run(self.app, host=server_name, port=port)
|
||||
|
@ -1,10 +1,11 @@
|
||||
import inspect
|
||||
|
||||
from pydantic import BaseModel, Field, create_model
|
||||
from typing import Any, Optional, Literal
|
||||
from typing import Any, Optional
|
||||
from typing_extensions import Literal
|
||||
from inflection import underscore
|
||||
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
|
||||
from modules.shared import sd_upscalers, opts, parser
|
||||
from typing import Dict, List
|
||||
|
||||
API_NOT_ALLOWED = [
|
||||
"self",
|
||||
@ -48,12 +49,10 @@ class PydanticModelGenerator:
|
||||
additional_fields = None,
|
||||
):
|
||||
def field_type_generator(k, v):
|
||||
# field_type = str if not overrides.get(k) else overrides[k]["type"]
|
||||
# print(k, v.annotation, v.default)
|
||||
field_type = v.annotation
|
||||
|
||||
if field_type == 'Image':
|
||||
# images are sent as base64 strings via API
|
||||
field_type = 'str'
|
||||
|
||||
return Optional[field_type]
|
||||
|
||||
def merge_class_params(class_):
|
||||
@ -63,6 +62,7 @@ class PydanticModelGenerator:
|
||||
parameters = {**parameters, **inspect.signature(classes.__init__).parameters}
|
||||
return parameters
|
||||
|
||||
|
||||
self._model_name = model_name
|
||||
self._class_data = merge_class_params(class_instance)
|
||||
|
||||
@ -71,7 +71,7 @@ class PydanticModelGenerator:
|
||||
field=underscore(k),
|
||||
field_alias=k,
|
||||
field_type=field_type_generator(k, v),
|
||||
field_value=None if isinstance(v.default, property) else v.default
|
||||
field_value=v.default
|
||||
)
|
||||
for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED
|
||||
]
|
||||
@ -107,8 +107,6 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
|
||||
{"key": "send_images", "type": bool, "default": True},
|
||||
{"key": "save_images", "type": bool, "default": False},
|
||||
{"key": "alwayson_scripts", "type": dict, "default": {}},
|
||||
{"key": "force_task_id", "type": str, "default": None},
|
||||
{"key": "infotext", "type": str, "default": None},
|
||||
]
|
||||
).generate_model()
|
||||
|
||||
@ -126,18 +124,16 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
|
||||
{"key": "send_images", "type": bool, "default": True},
|
||||
{"key": "save_images", "type": bool, "default": False},
|
||||
{"key": "alwayson_scripts", "type": dict, "default": {}},
|
||||
{"key": "force_task_id", "type": str, "default": None},
|
||||
{"key": "infotext", "type": str, "default": None},
|
||||
]
|
||||
).generate_model()
|
||||
|
||||
class TextToImageResponse(BaseModel):
|
||||
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
|
||||
images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
|
||||
parameters: dict
|
||||
info: str
|
||||
|
||||
class ImageToImageResponse(BaseModel):
|
||||
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
|
||||
images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
|
||||
parameters: dict
|
||||
info: str
|
||||
|
||||
@ -147,7 +143,7 @@ class ExtrasBaseRequest(BaseModel):
|
||||
gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
|
||||
codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
|
||||
codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
|
||||
upscaling_resize: float = Field(default=2, title="Upscaling Factor", gt=0, description="By how much to upscale the image, only used when resize_mode=0.")
|
||||
upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.")
|
||||
upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
|
||||
upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
|
||||
upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?")
|
||||
@ -170,18 +166,17 @@ class FileData(BaseModel):
|
||||
name: str = Field(title="File name")
|
||||
|
||||
class ExtrasBatchImagesRequest(ExtrasBaseRequest):
|
||||
imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
|
||||
imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
|
||||
|
||||
class ExtrasBatchImagesResponse(ExtraBaseResponse):
|
||||
images: list[str] = Field(title="Images", description="The generated images in base64 format.")
|
||||
images: List[str] = Field(title="Images", description="The generated images in base64 format.")
|
||||
|
||||
class PNGInfoRequest(BaseModel):
|
||||
image: str = Field(title="Image", description="The base64 encoded PNG image")
|
||||
|
||||
class PNGInfoResponse(BaseModel):
|
||||
info: str = Field(title="Image info", description="A string with the parameters used to generate the image")
|
||||
items: dict = Field(title="Items", description="A dictionary containing all the other fields the image had")
|
||||
parameters: dict = Field(title="Parameters", description="A dictionary with parsed generation info fields")
|
||||
items: dict = Field(title="Items", description="An object containing all the info the image had")
|
||||
|
||||
class ProgressRequest(BaseModel):
|
||||
skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
|
||||
@ -206,13 +201,17 @@ class TrainResponse(BaseModel):
|
||||
class CreateResponse(BaseModel):
|
||||
info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.")
|
||||
|
||||
class PreprocessResponse(BaseModel):
|
||||
info: str = Field(title="Preprocess info", description="Response string from preprocessing task.")
|
||||
|
||||
fields = {}
|
||||
for key, metadata in opts.data_labels.items():
|
||||
value = opts.data.get(key)
|
||||
optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any
|
||||
optType = opts.typemap.get(type(metadata.default), type(value))
|
||||
|
||||
if metadata is not None:
|
||||
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
|
||||
if (metadata is not None):
|
||||
fields.update({key: (Optional[optType], Field(
|
||||
default=metadata.default ,description=metadata.label))})
|
||||
else:
|
||||
fields.update({key: (Optional[optType], Field())})
|
||||
|
||||
@ -232,15 +231,8 @@ FlagsModel = create_model("Flags", **flags)
|
||||
|
||||
class SamplerItem(BaseModel):
|
||||
name: str = Field(title="Name")
|
||||
aliases: list[str] = Field(title="Aliases")
|
||||
options: dict[str, str] = Field(title="Options")
|
||||
|
||||
class SchedulerItem(BaseModel):
|
||||
name: str = Field(title="Name")
|
||||
label: str = Field(title="Label")
|
||||
aliases: Optional[list[str]] = Field(title="Aliases")
|
||||
default_rho: Optional[float] = Field(title="Default Rho")
|
||||
need_inner_model: Optional[bool] = Field(title="Needs Inner Model")
|
||||
aliases: List[str] = Field(title="Aliases")
|
||||
options: Dict[str, str] = Field(title="Options")
|
||||
|
||||
class UpscalerItem(BaseModel):
|
||||
name: str = Field(title="Name")
|
||||
@ -249,9 +241,6 @@ class UpscalerItem(BaseModel):
|
||||
model_url: Optional[str] = Field(title="URL")
|
||||
scale: Optional[float] = Field(title="Scale")
|
||||
|
||||
class LatentUpscalerModeItem(BaseModel):
|
||||
name: str = Field(title="Name")
|
||||
|
||||
class SDModelItem(BaseModel):
|
||||
title: str = Field(title="Title")
|
||||
model_name: str = Field(title="Model Name")
|
||||
@ -260,10 +249,6 @@ class SDModelItem(BaseModel):
|
||||
filename: str = Field(title="Filename")
|
||||
config: Optional[str] = Field(title="Config file")
|
||||
|
||||
class SDVaeItem(BaseModel):
|
||||
model_name: str = Field(title="Model Name")
|
||||
filename: str = Field(title="Filename")
|
||||
|
||||
class HypernetworkItem(BaseModel):
|
||||
name: str = Field(title="Name")
|
||||
path: Optional[str] = Field(title="Path")
|
||||
@ -282,6 +267,10 @@ class PromptStyleItem(BaseModel):
|
||||
prompt: Optional[str] = Field(title="Prompt")
|
||||
negative_prompt: Optional[str] = Field(title="Negative Prompt")
|
||||
|
||||
class ArtistItem(BaseModel):
|
||||
name: str = Field(title="Name")
|
||||
score: float = Field(title="Score")
|
||||
category: str = Field(title="Category")
|
||||
|
||||
class EmbeddingItem(BaseModel):
|
||||
step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
|
||||
@ -291,8 +280,8 @@ class EmbeddingItem(BaseModel):
|
||||
vectors: int = Field(title="Vectors", description="The number of vectors in the embedding")
|
||||
|
||||
class EmbeddingsResponse(BaseModel):
|
||||
loaded: dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
|
||||
skipped: dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
|
||||
loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
|
||||
skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
|
||||
|
||||
class MemoryResponse(BaseModel):
|
||||
ram: dict = Field(title="RAM", description="System memory stats")
|
||||
@ -310,20 +299,11 @@ class ScriptArg(BaseModel):
|
||||
minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI")
|
||||
maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI")
|
||||
step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI")
|
||||
choices: Optional[list[str]] = Field(default=None, title="Choices", description="Possible values for the argument")
|
||||
choices: Optional[List[str]] = Field(default=None, title="Choices", description="Possible values for the argument")
|
||||
|
||||
|
||||
class ScriptInfo(BaseModel):
|
||||
name: str = Field(default=None, title="Name", description="Script name")
|
||||
is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script")
|
||||
is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script")
|
||||
args: list[ScriptArg] = Field(title="Arguments", description="List of script's arguments")
|
||||
|
||||
class ExtensionItem(BaseModel):
|
||||
name: str = Field(title="Name", description="Extension name")
|
||||
remote: str = Field(title="Remote", description="Extension Repository URL")
|
||||
branch: str = Field(title="Branch", description="Extension Repository Branch")
|
||||
commit_hash: str = Field(title="Commit Hash", description="Extension Repository Commit Hash")
|
||||
version: str = Field(title="Version", description="Extension Version")
|
||||
commit_date: str = Field(title="Commit Date", description="Extension Repository Commit Date")
|
||||
enabled: bool = Field(title="Enabled", description="Flag specifying whether this extension is enabled")
|
||||
args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments")
|
||||
|
123
modules/cache.py
123
modules/cache.py
@ -1,123 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import threading
|
||||
|
||||
import diskcache
|
||||
import tqdm
|
||||
|
||||
from modules.paths import data_path, script_path
|
||||
|
||||
cache_filename = os.environ.get('SD_WEBUI_CACHE_FILE', os.path.join(data_path, "cache.json"))
|
||||
cache_dir = os.environ.get('SD_WEBUI_CACHE_DIR', os.path.join(data_path, "cache"))
|
||||
caches = {}
|
||||
cache_lock = threading.Lock()
|
||||
|
||||
|
||||
def dump_cache():
|
||||
"""old function for dumping cache to disk; does nothing since diskcache."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def make_cache(subsection: str) -> diskcache.Cache:
|
||||
return diskcache.Cache(
|
||||
os.path.join(cache_dir, subsection),
|
||||
size_limit=2**32, # 4 GB, culling oldest first
|
||||
disk_min_file_size=2**18, # keep up to 256KB in Sqlite
|
||||
)
|
||||
|
||||
|
||||
def convert_old_cached_data():
|
||||
try:
|
||||
with open(cache_filename, "r", encoding="utf8") as file:
|
||||
data = json.load(file)
|
||||
except FileNotFoundError:
|
||||
return
|
||||
except Exception:
|
||||
os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json"))
|
||||
print('[ERROR] issue occurred while trying to read cache.json; old cache has been moved to tmp/cache.json')
|
||||
return
|
||||
|
||||
total_count = sum(len(keyvalues) for keyvalues in data.values())
|
||||
|
||||
with tqdm.tqdm(total=total_count, desc="converting cache") as progress:
|
||||
for subsection, keyvalues in data.items():
|
||||
cache_obj = caches.get(subsection)
|
||||
if cache_obj is None:
|
||||
cache_obj = make_cache(subsection)
|
||||
caches[subsection] = cache_obj
|
||||
|
||||
for key, value in keyvalues.items():
|
||||
cache_obj[key] = value
|
||||
progress.update(1)
|
||||
|
||||
|
||||
def cache(subsection):
|
||||
"""
|
||||
Retrieves or initializes a cache for a specific subsection.
|
||||
|
||||
Parameters:
|
||||
subsection (str): The subsection identifier for the cache.
|
||||
|
||||
Returns:
|
||||
diskcache.Cache: The cache data for the specified subsection.
|
||||
"""
|
||||
|
||||
cache_obj = caches.get(subsection)
|
||||
if not cache_obj:
|
||||
with cache_lock:
|
||||
if not os.path.exists(cache_dir) and os.path.isfile(cache_filename):
|
||||
convert_old_cached_data()
|
||||
|
||||
cache_obj = caches.get(subsection)
|
||||
if not cache_obj:
|
||||
cache_obj = make_cache(subsection)
|
||||
caches[subsection] = cache_obj
|
||||
|
||||
return cache_obj
|
||||
|
||||
|
||||
def cached_data_for_file(subsection, title, filename, func):
|
||||
"""
|
||||
Retrieves or generates data for a specific file, using a caching mechanism.
|
||||
|
||||
Parameters:
|
||||
subsection (str): The subsection of the cache to use.
|
||||
title (str): The title of the data entry in the subsection of the cache.
|
||||
filename (str): The path to the file to be checked for modifications.
|
||||
func (callable): A function that generates the data if it is not available in the cache.
|
||||
|
||||
Returns:
|
||||
dict or None: The cached or generated data, or None if data generation fails.
|
||||
|
||||
The `cached_data_for_file` function implements a caching mechanism for data stored in files.
|
||||
It checks if the data associated with the given `title` is present in the cache and compares the
|
||||
modification time of the file with the cached modification time. If the file has been modified,
|
||||
the cache is considered invalid and the data is regenerated using the provided `func`.
|
||||
Otherwise, the cached data is returned.
|
||||
|
||||
If the data generation fails, None is returned to indicate the failure. Otherwise, the generated
|
||||
or cached data is returned as a dictionary.
|
||||
"""
|
||||
|
||||
existing_cache = cache(subsection)
|
||||
ondisk_mtime = os.path.getmtime(filename)
|
||||
|
||||
entry = existing_cache.get(title)
|
||||
if entry:
|
||||
cached_mtime = entry.get("mtime", 0)
|
||||
if ondisk_mtime > cached_mtime:
|
||||
entry = None
|
||||
|
||||
if not entry or 'value' not in entry:
|
||||
value = func()
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
entry = {'mtime': ondisk_mtime, 'value': value}
|
||||
existing_cache[title] = entry
|
||||
|
||||
dump_cache()
|
||||
|
||||
return entry['value']
|
@ -1,11 +1,12 @@
|
||||
import os.path
|
||||
from functools import wraps
|
||||
import html
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
import time
|
||||
|
||||
from modules import shared, progress, errors, devices, fifo_lock, profiling
|
||||
from modules import shared, progress
|
||||
|
||||
queue_lock = fifo_lock.FIFOLock()
|
||||
queue_lock = threading.Lock()
|
||||
|
||||
|
||||
def wrap_queued_call(func):
|
||||
@ -19,18 +20,17 @@ def wrap_queued_call(func):
|
||||
|
||||
|
||||
def wrap_gradio_gpu_call(func, extra_outputs=None):
|
||||
@wraps(func)
|
||||
def f(*args, **kwargs):
|
||||
|
||||
# if the first argument is a string that says "task(...)", it is treated as a job id
|
||||
if args and type(args[0]) == str and args[0].startswith("task(") and args[0].endswith(")"):
|
||||
if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")":
|
||||
id_task = args[0]
|
||||
progress.add_task_to_queue(id_task)
|
||||
else:
|
||||
id_task = None
|
||||
|
||||
with queue_lock:
|
||||
shared.state.begin(job=id_task)
|
||||
shared.state.begin()
|
||||
progress.start_task(id_task)
|
||||
|
||||
try:
|
||||
@ -47,23 +47,6 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
|
||||
|
||||
|
||||
def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
|
||||
@wraps(func)
|
||||
def f(*args, **kwargs):
|
||||
try:
|
||||
res = func(*args, **kwargs)
|
||||
finally:
|
||||
shared.state.skipped = False
|
||||
shared.state.interrupted = False
|
||||
shared.state.stopping_generation = False
|
||||
shared.state.job_count = 0
|
||||
shared.state.job = ""
|
||||
return res
|
||||
|
||||
return wrap_gradio_call_no_job(f, extra_outputs, add_stats)
|
||||
|
||||
|
||||
def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
|
||||
@wraps(func)
|
||||
def f(*args, extra_outputs_array=extra_outputs, **kwargs):
|
||||
run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats
|
||||
if run_memmon:
|
||||
@ -73,14 +56,19 @@ def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
|
||||
try:
|
||||
res = list(func(*args, **kwargs))
|
||||
except Exception as e:
|
||||
# When printing out our debug argument list,
|
||||
# do not print out more than a 100 KB of text
|
||||
max_debug_str_len = 131072
|
||||
message = "Error completing request"
|
||||
arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
|
||||
if len(arg_str) > max_debug_str_len:
|
||||
arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
|
||||
errors.report(f"{message}\n{arg_str}", exc_info=True)
|
||||
# When printing out our debug argument list, do not print out more than a MB of text
|
||||
max_debug_str_len = 131072 # (1024*1024)/8
|
||||
|
||||
print("Error completing request", file=sys.stderr)
|
||||
argStr = f"Arguments: {args} {kwargs}"
|
||||
print(argStr[:max_debug_str_len], file=sys.stderr)
|
||||
if len(argStr) > max_debug_str_len:
|
||||
print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr)
|
||||
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
||||
shared.state.job = ""
|
||||
shared.state.job_count = 0
|
||||
|
||||
if extra_outputs_array is None:
|
||||
extra_outputs_array = [None, '']
|
||||
@ -88,7 +76,9 @@ def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
|
||||
error_message = f'{type(e).__name__}: {e}'
|
||||
res = extra_outputs_array + [f"<div class='error'>{html.escape(error_message)}</div>"]
|
||||
|
||||
devices.torch_gc()
|
||||
shared.state.skipped = False
|
||||
shared.state.interrupted = False
|
||||
shared.state.job_count = 0
|
||||
|
||||
if not add_stats:
|
||||
return tuple(res)
|
||||
@ -96,9 +86,9 @@ def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
|
||||
elapsed = time.perf_counter() - t
|
||||
elapsed_m = int(elapsed // 60)
|
||||
elapsed_s = elapsed % 60
|
||||
elapsed_text = f"{elapsed_s:.1f} sec."
|
||||
elapsed_text = f"{elapsed_s:.2f}s"
|
||||
if elapsed_m > 0:
|
||||
elapsed_text = f"{elapsed_m} min. "+elapsed_text
|
||||
elapsed_text = f"{elapsed_m}m "+elapsed_text
|
||||
|
||||
if run_memmon:
|
||||
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
|
||||
@ -106,27 +96,14 @@ def wrap_gradio_call_no_job(func, extra_outputs=None, add_stats=False):
|
||||
reserved_peak = mem_stats['reserved_peak']
|
||||
sys_peak = mem_stats['system_peak']
|
||||
sys_total = mem_stats['total']
|
||||
sys_pct = sys_peak/max(sys_total, 1) * 100
|
||||
sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
|
||||
|
||||
toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)"
|
||||
toltip_r = "Reserved: total amount of video memory allocated by the Torch library "
|
||||
toltip_sys = "System: peak amount of video memory allocated by all running programs, out of total capacity"
|
||||
|
||||
text_a = f"<abbr title='{toltip_a}'>A</abbr>: <span class='measurement'>{active_peak/1024:.2f} GB</span>"
|
||||
text_r = f"<abbr title='{toltip_r}'>R</abbr>: <span class='measurement'>{reserved_peak/1024:.2f} GB</span>"
|
||||
text_sys = f"<abbr title='{toltip_sys}'>Sys</abbr>: <span class='measurement'>{sys_peak/1024:.1f}/{sys_total/1024:g} GB</span> ({sys_pct:.1f}%)"
|
||||
|
||||
vram_html = f"<p class='vram'>{text_a}, <wbr>{text_r}, <wbr>{text_sys}</p>"
|
||||
vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
|
||||
else:
|
||||
vram_html = ''
|
||||
|
||||
if shared.opts.profiling_enable and os.path.exists(shared.opts.profiling_filename):
|
||||
profiling_html = f"<p class='profile'> [ <a href='{profiling.webpath()}' download>Profile</a> ] </p>"
|
||||
else:
|
||||
profiling_html = ''
|
||||
|
||||
# last item is always HTML
|
||||
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr><span class='measurement'>{elapsed_text}</span></p>{vram_html}{profiling_html}</div>"
|
||||
res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed_text}</p>{vram_html}</div>"
|
||||
|
||||
return tuple(res)
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
from modules.paths_internal import normalized_filepath, models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401
|
||||
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
@ -11,51 +11,44 @@ parser.add_argument("--skip-python-version-check", action='store_true', help="la
|
||||
parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch.py argument: do not check if CUDA is able to work properly")
|
||||
parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed")
|
||||
parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
|
||||
parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup")
|
||||
parser.add_argument("--update-check", action='store_true', help="launch.py argument: chck for updates at startup")
|
||||
parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
|
||||
parser.add_argument("--log-startup", action='store_true', help="launch.py argument: print a detailed log of what's happening at startup")
|
||||
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
|
||||
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
|
||||
parser.add_argument("--dump-sysinfo", action='store_true', help="launch.py argument: dump limited sysinfo file (without information about extensions, options) to disk and quit")
|
||||
parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None)
|
||||
parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
|
||||
parser.add_argument("--data-dir", type=normalized_filepath, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
|
||||
parser.add_argument("--models-dir", type=normalized_filepath, default=None, help="base path where models are stored; overrides --data-dir")
|
||||
parser.add_argument("--config", type=normalized_filepath, default=sd_default_config, help="path to config which constructs model",)
|
||||
parser.add_argument("--ckpt", type=normalized_filepath, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
|
||||
parser.add_argument("--ckpt-dir", type=normalized_filepath, default=None, help="Path to directory with stable diffusion checkpoints")
|
||||
parser.add_argument("--vae-dir", type=normalized_filepath, default=None, help="Path to directory with VAE files")
|
||||
parser.add_argument("--gfpgan-dir", type=normalized_filepath, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
|
||||
parser.add_argument("--gfpgan-model", type=normalized_filepath, help="GFPGAN model file name", default=None)
|
||||
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
|
||||
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
|
||||
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
|
||||
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
|
||||
parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files")
|
||||
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
|
||||
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
|
||||
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
|
||||
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
|
||||
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
|
||||
parser.add_argument("--max-batch-count", type=int, default=16, help="does not do anything")
|
||||
parser.add_argument("--embeddings-dir", type=normalized_filepath, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
|
||||
parser.add_argument("--textual-inversion-templates-dir", type=normalized_filepath, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
|
||||
parser.add_argument("--hypernetwork-dir", type=normalized_filepath, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
|
||||
parser.add_argument("--localizations-dir", type=normalized_filepath, default=os.path.join(script_path, 'localizations'), help="localizations directory")
|
||||
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
|
||||
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
|
||||
parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
|
||||
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
|
||||
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
|
||||
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
|
||||
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
|
||||
parser.add_argument("--medvram-sdxl", action='store_true', help="enable --medvram optimization just for SDXL models")
|
||||
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
|
||||
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
|
||||
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything")
|
||||
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
|
||||
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
|
||||
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "half", "autocast"], default="autocast")
|
||||
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
|
||||
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
|
||||
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
|
||||
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
|
||||
parser.add_argument("--ngrok-region", type=str, help="does not do anything.", default="")
|
||||
parser.add_argument("--ngrok-options", type=json.loads, help='The options to pass to ngrok in JSON format, e.g.: \'{"authtoken_from_env":true, "basic_auth":"user:password", "oauth_provider":"google", "oauth_allow_emails":"user@asdf.com"}\'', default=dict())
|
||||
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
|
||||
parser.add_argument("--codeformer-models-path", type=normalized_filepath, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
|
||||
parser.add_argument("--gfpgan-models-path", type=normalized_filepath, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
|
||||
parser.add_argument("--esrgan-models-path", type=normalized_filepath, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
|
||||
parser.add_argument("--bsrgan-models-path", type=normalized_filepath, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
|
||||
parser.add_argument("--realesrgan-models-path", type=normalized_filepath, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
|
||||
parser.add_argument("--dat-models-path", type=normalized_filepath, help="Path to directory with DAT model file(s).", default=os.path.join(models_path, 'DAT'))
|
||||
parser.add_argument("--clip-models-path", type=normalized_filepath, help="Path to directory with CLIP model file(s).", default=None)
|
||||
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
|
||||
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
|
||||
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
|
||||
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
|
||||
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
|
||||
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
|
||||
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
|
||||
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
|
||||
parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
|
||||
@ -72,31 +65,27 @@ parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="pre
|
||||
parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
|
||||
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
|
||||
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
|
||||
parser.add_argument("--use-ipex", action="store_true", help="use Intel XPU as torch device")
|
||||
parser.add_argument("--disable-model-loading-ram-optimization", action='store_true', help="disable an optimization that reduces RAM use when loading a model")
|
||||
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
|
||||
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
|
||||
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
|
||||
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(data_path, 'ui-config.json'))
|
||||
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
|
||||
parser.add_argument("--freeze-settings", action='store_true', help="disable editing of all settings globally", default=False)
|
||||
parser.add_argument("--freeze-settings-in-sections", type=str, help='disable editing settings in specific sections of the settings page by specifying a comma-delimited list such like "saving-images,upscaling". The list of setting names can be found in the modules/shared_options.py file', default=None)
|
||||
parser.add_argument("--freeze-specific-settings", type=str, help='disable editing of individual settings by specifying a comma-delimited list like "samples_save,samples_format". The list of setting names can be found in the config.json file', default=None)
|
||||
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
|
||||
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json'))
|
||||
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
|
||||
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
|
||||
parser.add_argument("--gradio-auth-path", type=normalized_filepath, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
|
||||
parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
|
||||
parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
|
||||
parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
|
||||
parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it", default=[data_path])
|
||||
parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it")
|
||||
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
|
||||
parser.add_argument("--styles-file", type=str, action='append', help="path or wildcard path of styles files, allow multiple entries.", default=[])
|
||||
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
|
||||
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
|
||||
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
|
||||
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
|
||||
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
|
||||
parser.add_argument("--enable-console-prompts", action='store_true', help="does not do anything", default=False) # Legacy compatibility, use as default value shared.opts.enable_console_prompts
|
||||
parser.add_argument('--vae-path', type=normalized_filepath, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
|
||||
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
|
||||
parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
|
||||
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
|
||||
parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
|
||||
parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
|
||||
@ -112,17 +101,9 @@ parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, req
|
||||
parser.add_argument("--disable-tls-verify", action="store_false", help="When passed, enables the use of self-signed certificates.", default=None)
|
||||
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
|
||||
parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True)
|
||||
parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the default in earlier versions")
|
||||
parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions")
|
||||
parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers")
|
||||
parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
|
||||
parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)
|
||||
parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy')
|
||||
parser.add_argument('--add-stop-route', action='store_true', help='does not do anything')
|
||||
parser.add_argument('--api-server-stop', action='store_true', help='enable server stop/restart/kill via api')
|
||||
parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn')
|
||||
parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False)
|
||||
parser.add_argument("--disable-extra-extensions", action='store_true', help="prevent all extensions except built-in from running regardless of any other settings", default=False)
|
||||
parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui")
|
||||
parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system")
|
||||
parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system')
|
||||
parser.add_argument("--no-prompt-history", action='store_true', help="disable read prompt from last generation feature; settings this argument will not create '--data_path/params.txt' file")
|
||||
parser.add_argument('--add-stop-route', action='store_true', help='add /_stop route to stop server')
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user