Format code (#274)

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
github-actions[bot] 2023-05-12 19:43:05 +00:00 committed by GitHub
parent 568378761b
commit af41184320
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 90 additions and 49 deletions

View File

@ -18,9 +18,12 @@ from fairseq import checkpoint_utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():device="cuda"
elif torch.backends.mps.is_available():device="mps"
else:device="cpu"
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
f = open("%s/extract_f0_feature.log" % exp_dir, "a+")

View File

@ -156,13 +156,17 @@ def vc_single(
load_hubert()
if_f0 = cpt.get("f0", 1)
file_index = (
(
file_index.strip(" ")
.strip('"')
.strip("\n")
.strip('"')
.strip(" ")
.replace("trained", "added")
)if file_index!=""else file_index2 # 防止小白写错,自动帮他替换掉
)
if file_index != ""
else file_index2
) # 防止小白写错,自动帮他替换掉
# file_big_npy = (
# file_big_npy.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
# )
@ -184,10 +188,19 @@ def vc_single(
resample_sr,
f0_file=f0_file,
)
if(resample_sr>=16000 and tgt_sr!=resample_sr):
if resample_sr >= 16000 and tgt_sr != resample_sr:
tgt_sr = resample_sr
index_info="Using index:%s."%file_index if os.path.exists(file_index)else"Index not used."
return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss"%(index_info,times[0],times[1],times[2]), (tgt_sr, audio_opt)
index_info = (
"Using index:%s." % file_index
if os.path.exists(file_index)
else "Index not used."
)
return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss" % (
index_info,
times[0],
times[1],
times[2],
), (tgt_sr, audio_opt)
except:
info = traceback.format_exc()
print(info)
@ -376,7 +389,10 @@ def change_choices():
for name in files:
if name.endswith(".index") and "trained" not in name:
index_paths.append("%s/%s" % (root, name))
return {"choices": sorted(names), "__type__": "update"},{"choices": sorted(index_paths), "__type__": "update"}
return {"choices": sorted(names), "__type__": "update"}, {
"choices": sorted(index_paths),
"__type__": "update",
}
def clean():
@ -1115,7 +1131,9 @@ with gr.Blocks() as app:
choices=sorted(index_paths),
interactive=True,
)
refresh_button.click(fn=change_choices, inputs=[], outputs=[sid0, file_index2])
refresh_button.click(
fn=change_choices, inputs=[], outputs=[sid0, file_index2]
)
# file_big_npy1 = gr.Textbox(
# label=i18n("特征文件路径"),
# value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
@ -1154,7 +1172,7 @@ with gr.Blocks() as app:
# file_big_npy1,
index_rate1,
filter_radius0,
resample_sr0
resample_sr0,
],
[vc_output1, vc_output2],
)
@ -1237,7 +1255,7 @@ with gr.Blocks() as app:
# file_big_npy2,
index_rate2,
filter_radius1,
resample_sr1
resample_sr1,
],
[vc_output3],
)
@ -1600,13 +1618,13 @@ with gr.Blocks() as app:
tab_faq = i18n("常见问题解答")
with gr.TabItem(tab_faq):
try:
if(tab_faq=="常见问题解答"):
with open("docs/faq.md","r",encoding="utf8")as f:info=f.read()
if tab_faq == "常见问题解答":
with open("docs/faq.md", "r", encoding="utf8") as f:
info = f.read()
else:
with open("docs/faq_en.md", "r")as f:info = f.read()
gr.Markdown(
value=info
)
with open("docs/faq_en.md", "r") as f:
info = f.read()
gr.Markdown(value=info)
except:
gr.Markdown(traceback.format_exc())

View File

@ -9,6 +9,8 @@ from functools import lru_cache
bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
input_audio_path2wav = {}
@lru_cache
def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
audio = input_audio_path2wav[input_audio_path]
@ -22,6 +24,7 @@ def cache_harvest_f0(input_audio_path,fs,f0max,f0min,frame_period):
f0 = pyworld.stonemask(audio, f0, t, fs)
return f0
class VC(object):
def __init__(self, tgt_sr, config):
self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
@ -41,7 +44,16 @@ class VC(object):
self.t_max = self.sr * self.x_max # 免查询时长阈值
self.device = config.device
def get_f0(self, input_audio_path,x, p_len, f0_up_key, f0_method,filter_radius, inp_f0=None):
def get_f0(
self,
input_audio_path,
x,
p_len,
f0_up_key,
f0_method,
filter_radius,
inp_f0=None,
):
global input_audio_path2wav
time_step = self.window / self.sr * 1000
f0_min = 50
@ -67,7 +79,7 @@ class VC(object):
elif f0_method == "harvest":
input_audio_path2wav[input_audio_path] = x.astype(np.double)
f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
if(filter_radius>2):
if filter_radius > 2:
f0 = signal.medfilt(f0, 3)
f0 *= pow(2, f0_up_key / 12)
# with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
@ -255,7 +267,15 @@ class VC(object):
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
pitch, pitchf = None, None
if if_f0 == 1:
pitch, pitchf = self.get_f0(input_audio_path,audio_pad, p_len, f0_up_key, f0_method,filter_radius, inp_f0)
pitch, pitchf = self.get_f0(
input_audio_path,
audio_pad,
p_len,
f0_up_key,
f0_method,
filter_radius,
inp_f0,
)
pitch = pitch[:p_len]
pitchf = pitchf[:p_len]
if self.device == "mps":
@ -328,7 +348,7 @@ class VC(object):
)[self.t_pad_tgt : -self.t_pad_tgt]
)
audio_opt = np.concatenate(audio_opt)
if(resample_sr>=16000 and tgt_sr!=resample_sr):
if resample_sr >= 16000 and tgt_sr != resample_sr:
audio_opt = librosa.resample(
audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
)