mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2025-01-31 02:32:51 +08:00
优化print
This commit is contained in:
parent
ff1a711cad
commit
80bc765cbc
@ -79,7 +79,7 @@ def vc_single(sid,input_audio,f0_up_key,f0_file,f0_method,file_index,file_big_np
|
|||||||
if(hubert_model==None):load_hubert()
|
if(hubert_model==None):load_hubert()
|
||||||
if_f0 = cpt.get("f0", 1)
|
if_f0 = cpt.get("f0", 1)
|
||||||
audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
|
audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
|
||||||
print(times)
|
print("npy: ", times[0], "s, f0:", times[1], "s, infer: ", times[2], "s", sep='')
|
||||||
return "Success", (tgt_sr, audio_opt)
|
return "Success", (tgt_sr, audio_opt)
|
||||||
except:
|
except:
|
||||||
info=traceback.format_exc()
|
info=traceback.format_exc()
|
||||||
|
@ -72,7 +72,6 @@ class VC(object):
|
|||||||
"output_layer": 9, # layer 9
|
"output_layer": 9, # layer 9
|
||||||
}
|
}
|
||||||
t0 = ttime()
|
t0 = ttime()
|
||||||
print("vc npy start time:", t0)
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
logits = model.extract_features(**inputs)
|
logits = model.extract_features(**inputs)
|
||||||
feats = model.final_proj(logits[0])
|
feats = model.final_proj(logits[0])
|
||||||
@ -87,7 +86,6 @@ class VC(object):
|
|||||||
|
|
||||||
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
||||||
t1 = ttime()
|
t1 = ttime()
|
||||||
print("vc infer start time:", t1)
|
|
||||||
p_len = audio0.shape[0]//self.window
|
p_len = audio0.shape[0]//self.window
|
||||||
if(feats.shape[1]<p_len):
|
if(feats.shape[1]<p_len):
|
||||||
p_len=feats.shape[1]
|
p_len=feats.shape[1]
|
||||||
@ -103,7 +101,6 @@ class VC(object):
|
|||||||
del feats,p_len,padding_mask
|
del feats,p_len,padding_mask
|
||||||
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
||||||
t2 = ttime()
|
t2 = ttime()
|
||||||
print("vc infer end time:", t2)
|
|
||||||
times[0] += (t1 - t0)
|
times[0] += (t1 - t0)
|
||||||
times[2] += (t2 - t1)
|
times[2] += (t2 - t1)
|
||||||
return audio1
|
return audio1
|
||||||
@ -128,7 +125,6 @@ class VC(object):
|
|||||||
audio_opt=[]
|
audio_opt=[]
|
||||||
t=None
|
t=None
|
||||||
t1=ttime()
|
t1=ttime()
|
||||||
print("f0 start time:", t1)
|
|
||||||
audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode='reflect')
|
audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode='reflect')
|
||||||
p_len=audio_pad.shape[0]//self.window
|
p_len=audio_pad.shape[0]//self.window
|
||||||
inp_f0=None
|
inp_f0=None
|
||||||
@ -150,7 +146,6 @@ class VC(object):
|
|||||||
pitch = torch.tensor(pitch,device=self.device).unsqueeze(0).long()
|
pitch = torch.tensor(pitch,device=self.device).unsqueeze(0).long()
|
||||||
pitchf = torch.tensor(pitchf,device=self.device).unsqueeze(0).float()
|
pitchf = torch.tensor(pitchf,device=self.device).unsqueeze(0).float()
|
||||||
t2=ttime()
|
t2=ttime()
|
||||||
print("f0 end time:", t2)
|
|
||||||
times[1] += (t2 - t1)
|
times[1] += (t2 - t1)
|
||||||
for t in opt_ts:
|
for t in opt_ts:
|
||||||
t=t//self.window*self.window
|
t=t//self.window*self.window
|
||||||
|
Loading…
Reference in New Issue
Block a user