fix: train step2a & add arg --port --pycmd --noparallel

This commit is contained in:
源文雨 2023-04-01 16:42:19 +08:00
parent 9d1eb265c3
commit a3089e6ead
7 changed files with 64 additions and 59 deletions

1
.gitignore vendored
View File

@ -3,3 +3,4 @@ __pycache__
/TEMP
*.pyd
hubert_base.pt
/logs

View File

@ -1,3 +1,10 @@
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=7865, help="Listen port")
parser.add_argument("--pycmd", type=str, default="python", help="Python command")
parser.add_argument("--colab", action='store_true', help="Launch in colab")
parser.add_argument("--noparallel", action='store_true', help="Disable parallel processing")
cmd_opts = parser.parse_args()
############离线VC参数
inp_root=r"白鹭霜华长条"#对输入目录下所有音频进行转换,别放非音频文件
opt_root=r"opt"#输出目录
@ -7,10 +14,15 @@ person=r"weights\洛天依v3.pt"#目前只有洛天依v3
device = "cuda:0"#填写cuda:x或cpux指代第几张卡只支持N卡加速
is_half=True#9-10-20-30-40系显卡无脑True不影响质量>=20显卡开启有加速
n_cpu=0#默认0用上所有线程写数字限制CPU资源使用
############python命令路径
python_cmd=cmd_opts.pycmd
listen_port=cmd_opts.port
iscolab=cmd_opts.colab
noparallel=cmd_opts.noparallel
############下头别动
import torch
if(torch.cuda.is_available()==False):
print("没有发现支持的N卡使用CPU进行推理")
print("没有发现支持的N卡, 使用CPU进行推理")
device="cpu"
is_half=False
if(device!="cpu"):

View File

@ -1,9 +1,10 @@
from multiprocessing import cpu_count
import threading
from time import sleep
from subprocess import Popen,PIPE,run as runn
from subprocess import Popen
from time import sleep
import torch, pdb, os,traceback,sys,warnings,shutil,numpy as np,faiss
import torch, os,traceback,sys,warnings,shutil,numpy as np
import faiss
#判断是否有能用来训练和加速推理的N卡
ncpu=cpu_count()
ngpu=torch.cuda.device_count()
@ -33,11 +34,9 @@ from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFs
from scipy.io import wavfile
from fairseq import checkpoint_utils
import gradio as gr
import librosa
import logging
from vc_infer_pipeline import VC
import soundfile as sf
from config import is_half,device,is_half
from config import is_half,device,is_half,python_cmd,listen_port,iscolab,noparallel
from infer_uvr5 import _audio_pre_
from my_utils import load_audio
from train.process_ckpt import show_info,change_info,merge,extract_small_model
@ -222,7 +221,7 @@ def preprocess_dataset(trainset_dir,exp_dir,sr,n_p=ncpu):
os.makedirs("%s/logs/%s"%(now_dir,exp_dir),exist_ok=True)
f = open("%s/logs/%s/preprocess.log"%(now_dir,exp_dir), "w")
f.close()
cmd="python trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s"%(trainset_dir,sr,n_p,now_dir,exp_dir)
cmd=python_cmd + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "%(trainset_dir,sr,n_p,now_dir,exp_dir)+str(noparallel)
print(cmd)
p = Popen(cmd, shell=True)#, stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir
###煞笔grpopen read都非得全跑完了再一次性读取不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
@ -242,7 +241,7 @@ def extract_f0_feature(gpus,n_p,f0method,if_f0,exp_dir):
f = open("%s/logs/%s/extract_f0_feature.log"%(now_dir,exp_dir), "w")
f.close()
if(if_f0==""):
cmd="python extract_f0_print.py %s/logs/%s %s %s"%(now_dir,exp_dir,n_p,f0method)
cmd=python_cmd + " extract_f0_print.py %s/logs/%s %s %s"%(now_dir,exp_dir,n_p,f0method)
print(cmd)
p = Popen(cmd, shell=True,cwd=now_dir)#, stdin=PIPE, stdout=PIPE,stderr=PIPE
###煞笔grpopen read都非得全跑完了再一次性读取不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
@ -266,7 +265,7 @@ def extract_f0_feature(gpus,n_p,f0method,if_f0,exp_dir):
leng=len(gpus)
ps=[]
for idx,n_g in enumerate(gpus):
cmd="python extract_feature_print.py %s %s %s %s/logs/%s"%(leng,idx,n_g,now_dir,exp_dir)
cmd=python_cmd + " extract_feature_print.py %s %s %s %s/logs/%s"%(leng,idx,n_g,now_dir,exp_dir)
print(cmd)
p = Popen(cmd, shell=True, cwd=now_dir)#, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
ps.append(p)
@ -305,8 +304,8 @@ def click_train(exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_si
with open("%s/filelist.txt"%exp_dir,"w")as f:f.write("\n".join(opt))
print("write filelist done")
#生成config#无需生成config
# cmd = "python train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
cmd = "python train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3==""else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==""else 0,1 if if_cache_gpu17==""else 0)
# cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3==""else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==""else 0,1 if if_cache_gpu17==""else 0)
print(cmd)
p = Popen(cmd, shell=True, cwd=now_dir)
p.wait()
@ -351,7 +350,7 @@ def train1key(exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0meth
os.makedirs("%s/logs/%s"%(now_dir,exp_dir1),exist_ok=True)
#########step1:处理数据
open("%s/logs/%s/preprocess.log"%(now_dir,exp_dir1), "w").close()
cmd="python trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s"%(trainset_dir4,sr_dict[sr2],ncpu,now_dir,exp_dir1)
cmd=python_cmd + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "%(trainset_dir4,sr_dict[sr2],ncpu,now_dir,exp_dir1)+str(noparallel)
yield get_info_str("step1:正在处理数据")
yield get_info_str(cmd)
p = Popen(cmd, shell=True)
@ -361,7 +360,7 @@ def train1key(exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0meth
open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir1), "w")
if(if_f0_3==""):
yield get_info_str("step2a:正在提取音高")
cmd="python extract_f0_print.py %s/logs/%s %s %s"%(now_dir,exp_dir1,np7,f0method8)
cmd=python_cmd + " extract_f0_print.py %s/logs/%s %s %s"%(now_dir,exp_dir1,np7,f0method8)
yield get_info_str(cmd)
p = Popen(cmd, shell=True,cwd=now_dir)
p.wait()
@ -373,7 +372,7 @@ def train1key(exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0meth
leng=len(gpus)
ps=[]
for idx,n_g in enumerate(gpus):
cmd="python extract_feature_print.py %s %s %s %s/logs/%s"%(leng,idx,n_g,now_dir,exp_dir1)
cmd=python_cmd + " extract_feature_print.py %s %s %s %s/logs/%s"%(leng,idx,n_g,now_dir,exp_dir1)
yield get_info_str(cmd)
p = Popen(cmd, shell=True, cwd=now_dir)#, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
ps.append(p)
@ -399,7 +398,7 @@ def train1key(exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0meth
opt.append("%s/%s.wav|%s/%s.npy|%s"%(gt_wavs_dir.replace("\\","\\\\"),name,co256_dir.replace("\\","\\\\"),name,spk_id5))
with open("%s/filelist.txt"%exp_dir,"w")as f:f.write("\n".join(opt))
yield get_info_str("write filelist done")
cmd = "python train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3==""else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==""else 0,1 if if_cache_gpu17==""else 0)
cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s -pg %s -pd %s -l %s -c %s" % (exp_dir1,sr2,1 if if_f0_3==""else 0,batch_size12,gpus16,total_epoch11,save_epoch10,pretrained_G14,pretrained_D15,1 if if_save_latest13==""else 0,1 if if_cache_gpu17==""else 0)
yield get_info_str(cmd)
p = Popen(cmd, shell=True, cwd=now_dir)
p.wait()
@ -630,11 +629,7 @@ with gr.Blocks() as app:
with gr.TabItem("点击查看交流、问题反馈群号"):
gr.Markdown(value="""xxxxx""")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--colab", action='store_true', help="Launch in colab")
cmd_opts = parser.parse_args()
if cmd_opts.colab:
if iscolab:
app.queue(concurrency_count=511, max_size=1022).launch(share=True)
else:
app.queue(concurrency_count=511, max_size=1022).launch(server_name="0.0.0.0",inbrowser=True,server_port=7865,quiet=True)
app.queue(concurrency_count=511, max_size=1022).launch(server_name="0.0.0.0",inbrowser=True,server_port=listen_port,quiet=True)

View File

@ -10,10 +10,7 @@ def load_audio(file,sr):
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
except Exception as e:
raise RuntimeError(f"Failed to load audio: {e}")
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
if __name__=='__main__' :
print(load_audio(r"C:\CloudMusic\宮野幸子,森下唯 - 月夜に謳う君 -LUNA-.mp3",16000).shape)

View File

@ -4,7 +4,6 @@ import numpy as np
# This function is obtained from librosa.
def get_rms(
y,
*,
frame_length=2048,
hop_length=512,
pad_mode="constant",

View File

@ -1,4 +1,4 @@
import sys,os,pdb,multiprocessing
import sys,os,multiprocessing
now_dir=os.getcwd()
sys.path.append(now_dir)
@ -6,20 +6,15 @@ inp_root = sys.argv[1]
sr = int(sys.argv[2])
n_p = int(sys.argv[3])
exp_dir = sys.argv[4]
import numpy as np,ffmpeg,os,traceback
noparallel = sys.argv[5] == "True"
import numpy as np,os,traceback
from slicer2 import Slicer
from joblib import Parallel, delayed
import librosa,traceback
from scipy.io import wavfile
import multiprocessing
from my_utils import load_audio
from time import sleep
f = open("%s/preprocess.log"%exp_dir, "a+")
def printt(strr):
print(strr)
f.write("%s\n" % strr)
f.flush()
mutex = multiprocessing.Lock()
class PreProcess():
def __init__(self,sr,exp_dir):
@ -40,10 +35,18 @@ class PreProcess():
self.exp_dir=exp_dir
self.gt_wavs_dir="%s/0_gt_wavs"%exp_dir
self.wavs16k_dir="%s/1_16k_wavs"%exp_dir
self.f = open("%s/preprocess.log"%exp_dir, "a+")
os.makedirs(self.exp_dir,exist_ok=True)
os.makedirs(self.gt_wavs_dir,exist_ok=True)
os.makedirs(self.wavs16k_dir,exist_ok=True)
def print(self, strr):
mutex.acquire()
print(strr)
self.f.write("%s\n" % strr)
self.f.flush()
mutex.release()
def norm_write(self,tmp_audio,idx0,idx1):
tmp_audio = (tmp_audio / np.abs(tmp_audio).max() * (self.max * self.alpha)) + (1 - self.alpha) * tmp_audio
wavfile.write("%s/%s_%s.wav" % (self.gt_wavs_dir, idx0, idx1), self.sr, (tmp_audio*32768).astype(np.int16))
@ -67,9 +70,9 @@ class PreProcess():
tmp_audio = audio[start:]
break
self.norm_write(tmp_audio, idx0, idx1)
printt("%s->Suc."%path)
self.print("%s->Suc."%path)
except:
printt("%s->%s"%(path,traceback.format_exc()))
self.print("%s->%s"%(path,traceback.format_exc()))
def pipeline_mp(self,infos):
for path, idx0 in infos:
@ -78,27 +81,24 @@ class PreProcess():
def pipeline_mp_inp_dir(self,inp_root,n_p):
try:
infos = [("%s/%s" % (inp_root, name), idx) for idx, name in enumerate(sorted(list(os.listdir(inp_root))))]
ps=[]
for i in range(n_p):
p=multiprocessing.Process(target=self.pipeline_mp,args=(infos[i::n_p],))
p.start()
ps.append(p)
for p in ps:p.join()
if noparallel:
for i in range(n_p): self.pipeline_mp(infos[i::n_p])
else:
ps=[]
for i in range(n_p):
p=multiprocessing.Process(target=self.pipeline_mp,args=(infos[i::n_p],))
p.start()
ps.append(p)
for p in ps:p.join()
except:
printt("Fail. %s"%traceback.format_exc())
self.print("Fail. %s"%traceback.format_exc())
def preprocess_trainset(inp_root, sr, n_p, exp_dir):
pp=PreProcess(sr,exp_dir)
pp.print("start preprocess")
pp.print(sys.argv)
pp.pipeline_mp_inp_dir(inp_root,n_p)
pp.print("end preprocess")
if __name__=='__main__':
# f = open("logs/log_preprocess.log", "w")
printt(sys.argv)
######################################################
# inp_root=r"E:\语音音频+标注\米津玄师\src"
# inp_root=r"E:\codes\py39\vits_vc_gpu_train\todo-songs"
# sr=40000
# n_p = 6
# exp_dir=r"E:\codes\py39\dataset\mi-test"
######################################################
printt("start preprocess")
pp=PreProcess(sr,exp_dir)
pp.pipeline_mp_inp_dir(inp_root,n_p)
printt("end preprocess")
preprocess_trainset(inp_root, sr, n_p, exp_dir)

View File

@ -1,6 +1,7 @@
MIT License
Copyright (c) 2023 liujing04
Copyright (c) 2023 源文雨
本软件及其相关代码以MIT协议开源作者不对软件具备任何控制力使用软件者、传播软件导出的声音者自负全责。
如不认可该条款,则不能使用或引用软件包内任何代码和文件。