mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-04-17 02:19:01 +08:00
lint, add init_cuda_malloc()
This commit is contained in:
parent
e78be27e75
commit
0cc81464bb
@ -39,9 +39,9 @@ def get_gpu_names():
|
|||||||
else:
|
else:
|
||||||
gpu_names = set()
|
gpu_names = set()
|
||||||
out = subprocess.check_output(['nvidia-smi', '-L'])
|
out = subprocess.check_output(['nvidia-smi', '-L'])
|
||||||
for l in out.split(b'\n'):
|
for line in out.split(b'\n'):
|
||||||
if len(l) > 0:
|
if len(line) > 0:
|
||||||
gpu_names.add(l.decode('utf-8').split(' (UUID')[0])
|
gpu_names.add(line.decode('utf-8').split(' (UUID')[0])
|
||||||
return gpu_names
|
return gpu_names
|
||||||
|
|
||||||
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
|
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
|
||||||
@ -55,7 +55,7 @@ blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeFor
|
|||||||
def cuda_malloc_supported():
|
def cuda_malloc_supported():
|
||||||
try:
|
try:
|
||||||
names = get_gpu_names()
|
names = get_gpu_names()
|
||||||
except:
|
except Exception:
|
||||||
names = set()
|
names = set()
|
||||||
for x in names:
|
for x in names:
|
||||||
if "NVIDIA" in x:
|
if "NVIDIA" in x:
|
||||||
@ -82,11 +82,11 @@ if not args.cuda_malloc:
|
|||||||
version = module.__version__
|
version = module.__version__
|
||||||
if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
|
if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
|
||||||
args.cuda_malloc = cuda_malloc_supported()
|
args.cuda_malloc = cuda_malloc_supported()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def init_cuda_malloc():
|
||||||
if args.cuda_malloc and not args.disable_cuda_malloc:
|
if args.cuda_malloc and not args.disable_cuda_malloc:
|
||||||
env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None)
|
env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None)
|
||||||
if env_var is None:
|
if env_var is None:
|
||||||
env_var = "backend:cudaMallocAsync"
|
env_var = "backend:cudaMallocAsync"
|
||||||
|
3
webui.py
3
webui.py
@ -10,7 +10,8 @@ from modules import initialize
|
|||||||
startup_timer = timer.startup_timer
|
startup_timer = timer.startup_timer
|
||||||
startup_timer.record("launcher")
|
startup_timer.record("launcher")
|
||||||
|
|
||||||
import cuda_malloc
|
from cuda_malloc import init_cuda_malloc
|
||||||
|
init_cuda_malloc()
|
||||||
startup_timer.record("cuda_malloc")
|
startup_timer.record("cuda_malloc")
|
||||||
|
|
||||||
initialize.imports()
|
initialize.imports()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user