Merge pull request #12924 from catboxanon/fix/cudnn

More accurate check for enabling cuDNN benchmark on 16XX cards
This commit is contained in:
AUTOMATIC1111 2023-09-09 09:33:37 +03:00 committed by GitHub
commit 558baffa2c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -60,7 +60,8 @@ def enable_tf32():
# enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
device_id = (int(shared.cmd_opts.device_id) if shared.cmd_opts.device_id.isdigit() else 0) or torch.cuda.current_device()
if torch.cuda.get_device_capability(device_id) == (7, 5) and torch.cuda.get_device_name(device_id).startswith("NVIDIA GeForce GTX 16"):
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True