Added support for Docker. Container can easily be started with docker compose

This commit is contained in:
Lukas 2024-11-27 16:24:43 +01:00
parent 82a973c043
commit df33b6749a
7 changed files with 54 additions and 2 deletions

2
.dockerignore Normal file
View File

@ -0,0 +1,2 @@
models
models/

19
Dockerfile Normal file
View File

@ -0,0 +1,19 @@
FROM python:3.10-bookworm
WORKDIR /webui
RUN apt-get update && \
apt-get install ffmpeg libsm6 libxext6 dos2unix google-perftools -y
COPY . .
RUN dos2unix ./webui.sh ./webui-user.sh
RUN groupadd --system --gid 1000 webui && \
useradd webui --uid 1000 --gid 1000 --create-home --shell /bin/bash && \
chown -R webui:webui .
USER 1000:1000
RUN ./webui.sh --prepare-environment-only --skip-torch-cuda-test
CMD [ "./webui.sh", "--skip-prepare-environment" ]

View File

@ -104,6 +104,16 @@ Alternatively, use online services (like Google Colab):
- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services)
### Running with Docker
DISCLAIMER: This currently only works with NVIDIA GPUs
You need to have [Docker](https://www.docker.com/) installed on your system. Then clone this repository and execute `docker compose up` in the root of the repository. The first time you execute this command will take a long time as all the dependencies are installed. Subsequent runs of the command should start up the webui pretty much instantly. To stop the webui press CTRL+C and wait a few seconds.
Models are provided to the Docker container using a bind mount. This means that if you add a new model to the models directory it should be available in the webui after a checkpoint refresh without needing to rebuild or restart the container.
The server will be accessible at [localhost:7860](localhost:7860)
### Installation on Windows 10/11 with NVidia-GPUs using release package
1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract its contents.
2. Run `update.bat`.

16
compose.yml Normal file
View File

@ -0,0 +1,16 @@
services:
webui:
build: .
volumes:
- type: bind
source: ./models
target: /webui/models
ports:
- 7860:7860
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]

View File

@ -34,6 +34,9 @@ def main():
launch_utils.startup_timer.record("initial startup")
if args.prepare_environment_only:
print("Setting up requirements wihout starting server as --setup-only flag was passed")
with launch_utils.startup_timer.subcategory("prepare environment"):
if not args.skip_prepare_environment:
prepare_environment()
@ -41,7 +44,8 @@ def main():
if args.test_server:
configure_for_tests()
start()
if not args.prepare_environment_only:
start()
if __name__ == "__main__":

View File

@ -126,3 +126,4 @@ parser.add_argument("--skip-load-model-at-start", action='store_true', help="if
parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system")
parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system')
parser.add_argument("--no-prompt-history", action='store_true', help="disable read prompt from last generation feature; settings this argument will not create '--data_path/params.txt' file")
parser.add_argument("--prepare-environment-only", action='store_true', help="launch.py argument: only prepare environment without launching webui run with --skip-torch-cuda-test")

View File

@ -10,7 +10,7 @@
#clone_dir="stable-diffusion-webui"
# Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention"
#export COMMANDLINE_ARGS=""
export COMMANDLINE_ARGS="--listen"
# python3 executable
#python_cmd="python3"