support 16xx GPU and 4G GPU inference

support 16xx GPU and 4G GPU inference
This commit is contained in:
RVC-Boss 2023-04-27 01:40:04 +08:00 committed by GitHub
parent 2ac8d553ab
commit a6cb4d3625
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 57 additions and 30 deletions

View File

@ -64,12 +64,25 @@ if not torch.cuda.is_available():
device = "cpu" device = "cpu"
is_half = False is_half = False
gpu_mem=None
if device not in ["cpu", "mps"]: if device not in ["cpu", "mps"]:
gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1])) i_device=int(device.split(":")[-1])
if "16" in gpu_name or "MX" in gpu_name: gpu_name = torch.cuda.get_device_name(i_device)
print("16系显卡/MX系显卡强制单精度") if "16" in gpu_name or "P40"in gpu_name.upper() or "1070"in gpu_name or "1080"in gpu_name:
print("16系显卡强制单精度")
is_half = False is_half = False
with open("configs/32k.json","r")as f:strr=f.read().replace("true","false")
with open("configs/32k.json","w")as f:f.write(strr)
with open("configs/40k.json","r")as f:strr=f.read().replace("true","false")
with open("configs/40k.json","w")as f:f.write(strr)
with open("configs/48k.json","r")as f:strr=f.read().replace("true","false")
with open("configs/48k.json","w")as f:f.write(strr)
with open("trainset_preprocess_pipeline_print.py","r")as f:strr=f.read().replace("3.7","3.0")
with open("trainset_preprocess_pipeline_print.py","w")as f:f.write(strr)
gpu_mem=int(torch.cuda.get_device_properties(i_device).total_memory/1024/1024/1024+0.4)
if(gpu_mem<=4):
with open("trainset_preprocess_pipeline_print.py","r")as f:strr=f.read().replace("3.7","3.0")
with open("trainset_preprocess_pipeline_print.py","w")as f:f.write(strr)
from multiprocessing import cpu_count from multiprocessing import cpu_count
if n_cpu == 0: if n_cpu == 0:
@ -86,3 +99,8 @@ else:
x_query = 6 x_query = 6
x_center = 38 x_center = 38
x_max = 41 x_max = 41
if(gpu_mem!=None and gpu_mem<=4):
x_pad = 1
x_query = 5
x_center = 30
x_max = 32

View File

@ -5,7 +5,7 @@ from subprocess import Popen
from time import sleep from time import sleep
import torch, os, traceback, sys, warnings, shutil, numpy as np import torch, os, traceback, sys, warnings, shutil, numpy as np
import faiss import faiss
from random import shuffle
now_dir = os.getcwd() now_dir = os.getcwd()
sys.path.append(now_dir) sys.path.append(now_dir)
tmp = os.path.join(now_dir, "TEMP") tmp = os.path.join(now_dir, "TEMP")
@ -23,6 +23,7 @@ i18n = I18nAuto()
ncpu = cpu_count() ncpu = cpu_count()
ngpu = torch.cuda.device_count() ngpu = torch.cuda.device_count()
gpu_infos = [] gpu_infos = []
mem=[]
if (not torch.cuda.is_available()) or ngpu == 0: if (not torch.cuda.is_available()) or ngpu == 0:
if_gpu_ok = False if_gpu_ok = False
else: else:
@ -48,11 +49,13 @@ else:
): # A10#A100#V100#A40#P40#M40#K80#A4500 ): # A10#A100#V100#A40#P40#M40#K80#A4500
if_gpu_ok = True # 至少有一张能用的N卡 if_gpu_ok = True # 至少有一张能用的N卡
gpu_infos.append("%s\t%s" % (i, gpu_name)) gpu_infos.append("%s\t%s" % (i, gpu_name))
gpu_info = ( mem.append(int(torch.cuda.get_device_properties(i).total_memory/1024/1024/1024+0.4))
"\n".join(gpu_infos) if if_gpu_ok == True and len(gpu_infos) > 0:
if if_gpu_ok == True and len(gpu_infos) > 0 gpu_info ="\n".join(gpu_infos)
else "很遗憾您这没有能用的显卡来支持您训练" default_batch_size=min(mem)//2
) else:
gpu_info = "很遗憾您这没有能用的显卡来支持您训练"
default_batch_size=1
gpus = "-".join([i[0] for i in gpu_infos]) gpus = "-".join([i[0] for i in gpu_infos])
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
from scipy.io import wavfile from scipy.io import wavfile
@ -564,15 +567,18 @@ def click_train(
) )
) )
if if_f0_3 == "": if if_f0_3 == "":
opt.append( for _ in range(2):
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" opt.append(
% (now_dir, sr2, now_dir, now_dir, now_dir, spk_id5) "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
) % (now_dir, sr2, now_dir, now_dir, now_dir, spk_id5)
)
else: else:
opt.append( for _ in range(2):
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s" opt.append(
% (now_dir, sr2, now_dir, spk_id5) "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s"
) % (now_dir, sr2, now_dir, spk_id5)
)
shuffle(opt)
with open("%s/filelist.txt" % exp_dir, "w") as f: with open("%s/filelist.txt" % exp_dir, "w") as f:
f.write("\n".join(opt)) f.write("\n".join(opt))
print("write filelist done") print("write filelist done")
@ -789,15 +795,18 @@ def train1key(
) )
) )
if if_f0_3 == "": if if_f0_3 == "":
opt.append( for _ in range(2):
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" opt.append(
% (now_dir, sr2, now_dir, now_dir, now_dir, spk_id5) "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
) % (now_dir, sr2, now_dir, now_dir, now_dir, spk_id5)
)
else: else:
opt.append( for _ in range(2):
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s" opt.append(
% (now_dir, sr2, now_dir, spk_id5) "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature256/mute.npy|%s"
) % (now_dir, sr2, now_dir, spk_id5)
)
shuffle(opt)
with open("%s/filelist.txt" % exp_dir, "w") as f: with open("%s/filelist.txt" % exp_dir, "w") as f:
f.write("\n".join(opt)) f.write("\n".join(opt))
yield get_info_str("write filelist done") yield get_info_str("write filelist done")
@ -1039,7 +1048,7 @@ with gr.Blocks() as app:
minimum=0, minimum=0,
maximum=1, maximum=1,
label="检索特征占比", label="检索特征占比",
value=0.65, value=0.76,
interactive=True, interactive=True,
) )
f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调")) f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"))
@ -1253,10 +1262,10 @@ with gr.Blocks() as app:
) )
batch_size12 = gr.Slider( batch_size12 = gr.Slider(
minimum=0, minimum=0,
maximum=32, maximum=40,
step=1, step=1,
label="每张显卡的batch_size", label="每张显卡的batch_size",
value=4, value=default_batch_size,
interactive=True, interactive=True,
) )
if_save_latest13 = gr.Radio( if_save_latest13 = gr.Radio(
@ -1270,7 +1279,7 @@ with gr.Blocks() as app:
"是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速" "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速"
), ),
choices=["", ""], choices=["", ""],
value="", value="",
interactive=True, interactive=True,
) )
with gr.Row(): with gr.Row():