mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-04-24 22:09:00 +08:00
Merge pull request #3722 from evshiron/feat/progress-api
prototype progress api
This commit is contained in:
commit
060ee5d3a7
@ -1,12 +1,40 @@
|
|||||||
|
import time
|
||||||
import uvicorn
|
import uvicorn
|
||||||
from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image
|
from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image
|
||||||
from fastapi import APIRouter, HTTPException
|
from fastapi import APIRouter, Depends, HTTPException
|
||||||
import modules.shared as shared
|
import modules.shared as shared
|
||||||
|
from modules import devices
|
||||||
from modules.api.models import *
|
from modules.api.models import *
|
||||||
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
|
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
|
||||||
from modules.sd_samplers import all_samplers
|
from modules.sd_samplers import all_samplers
|
||||||
from modules.extras import run_extras, run_pnginfo
|
from modules.extras import run_extras, run_pnginfo
|
||||||
|
|
||||||
|
# copy from wrap_gradio_gpu_call of webui.py
|
||||||
|
# because queue lock will be acquired in api handlers
|
||||||
|
# and time start needs to be set
|
||||||
|
# the function has been modified into two parts
|
||||||
|
|
||||||
|
def before_gpu_call():
|
||||||
|
devices.torch_gc()
|
||||||
|
|
||||||
|
shared.state.sampling_step = 0
|
||||||
|
shared.state.job_count = -1
|
||||||
|
shared.state.job_no = 0
|
||||||
|
shared.state.job_timestamp = shared.state.get_job_timestamp()
|
||||||
|
shared.state.current_latent = None
|
||||||
|
shared.state.current_image = None
|
||||||
|
shared.state.current_image_sampling_step = 0
|
||||||
|
shared.state.skipped = False
|
||||||
|
shared.state.interrupted = False
|
||||||
|
shared.state.textinfo = None
|
||||||
|
shared.state.time_start = time.time()
|
||||||
|
|
||||||
|
def after_gpu_call():
|
||||||
|
shared.state.job = ""
|
||||||
|
shared.state.job_count = 0
|
||||||
|
|
||||||
|
devices.torch_gc()
|
||||||
|
|
||||||
def upscaler_to_index(name: str):
|
def upscaler_to_index(name: str):
|
||||||
try:
|
try:
|
||||||
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
|
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
|
||||||
@ -33,6 +61,7 @@ class Api:
|
|||||||
self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
|
self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
|
||||||
self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
|
self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
|
||||||
self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
|
self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
|
||||||
|
self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
|
||||||
|
|
||||||
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
|
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
|
||||||
sampler_index = sampler_to_index(txt2imgreq.sampler_index)
|
sampler_index = sampler_to_index(txt2imgreq.sampler_index)
|
||||||
@ -49,8 +78,10 @@ class Api:
|
|||||||
)
|
)
|
||||||
p = StableDiffusionProcessingTxt2Img(**vars(populate))
|
p = StableDiffusionProcessingTxt2Img(**vars(populate))
|
||||||
# Override object param
|
# Override object param
|
||||||
|
before_gpu_call()
|
||||||
with self.queue_lock:
|
with self.queue_lock:
|
||||||
processed = process_images(p)
|
processed = process_images(p)
|
||||||
|
after_gpu_call()
|
||||||
|
|
||||||
b64images = list(map(encode_pil_to_base64, processed.images))
|
b64images = list(map(encode_pil_to_base64, processed.images))
|
||||||
|
|
||||||
@ -89,8 +120,10 @@ class Api:
|
|||||||
|
|
||||||
p.init_images = imgs
|
p.init_images = imgs
|
||||||
# Override object param
|
# Override object param
|
||||||
|
before_gpu_call()
|
||||||
with self.queue_lock:
|
with self.queue_lock:
|
||||||
processed = process_images(p)
|
processed = process_images(p)
|
||||||
|
after_gpu_call()
|
||||||
|
|
||||||
b64images = list(map(encode_pil_to_base64, processed.images))
|
b64images = list(map(encode_pil_to_base64, processed.images))
|
||||||
|
|
||||||
@ -134,6 +167,32 @@ class Api:
|
|||||||
|
|
||||||
return PNGInfoResponse(info=result[1])
|
return PNGInfoResponse(info=result[1])
|
||||||
|
|
||||||
|
def progressapi(self, req: ProgressRequest = Depends()):
|
||||||
|
# copy from check_progress_call of ui.py
|
||||||
|
|
||||||
|
if shared.state.job_count == 0:
|
||||||
|
return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict())
|
||||||
|
|
||||||
|
# avoid dividing zero
|
||||||
|
progress = 0.01
|
||||||
|
|
||||||
|
if shared.state.job_count > 0:
|
||||||
|
progress += shared.state.job_no / shared.state.job_count
|
||||||
|
if shared.state.sampling_steps > 0:
|
||||||
|
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
|
||||||
|
|
||||||
|
time_since_start = time.time() - shared.state.time_start
|
||||||
|
eta = (time_since_start/progress)
|
||||||
|
eta_relative = eta-time_since_start
|
||||||
|
|
||||||
|
progress = min(progress, 1)
|
||||||
|
|
||||||
|
current_image = None
|
||||||
|
if shared.state.current_image and not req.skip_current_image:
|
||||||
|
current_image = encode_pil_to_base64(shared.state.current_image)
|
||||||
|
|
||||||
|
return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image)
|
||||||
|
|
||||||
def launch(self, server_name, port):
|
def launch(self, server_name, port):
|
||||||
self.app.include_router(self.router)
|
self.app.include_router(self.router)
|
||||||
uvicorn.run(self.app, host=server_name, port=port)
|
uvicorn.run(self.app, host=server_name, port=port)
|
||||||
|
@ -156,3 +156,12 @@ class PNGInfoRequest(BaseModel):
|
|||||||
|
|
||||||
class PNGInfoResponse(BaseModel):
|
class PNGInfoResponse(BaseModel):
|
||||||
info: str = Field(title="Image info", description="A string with all the info the image had")
|
info: str = Field(title="Image info", description="A string with all the info the image had")
|
||||||
|
|
||||||
|
class ProgressRequest(BaseModel):
|
||||||
|
skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
|
||||||
|
|
||||||
|
class ProgressResponse(BaseModel):
|
||||||
|
progress: float = Field(title="Progress", description="The progress with a range of 0 to 1")
|
||||||
|
eta_relative: float = Field(title="ETA in secs")
|
||||||
|
state: dict = Field(title="State", description="The current state snapshot")
|
||||||
|
current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
|
||||||
|
@ -147,6 +147,19 @@ class State:
|
|||||||
def get_job_timestamp(self):
|
def get_job_timestamp(self):
|
||||||
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp?
|
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp?
|
||||||
|
|
||||||
|
def dict(self):
|
||||||
|
obj = {
|
||||||
|
"skipped": self.skipped,
|
||||||
|
"interrupted": self.skipped,
|
||||||
|
"job": self.job,
|
||||||
|
"job_count": self.job_count,
|
||||||
|
"job_no": self.job_no,
|
||||||
|
"sampling_step": self.sampling_step,
|
||||||
|
"sampling_steps": self.sampling_steps,
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
state = State()
|
state = State()
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user