mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2025-03-10 15:54:54 +08:00
parent
a071f1e089
commit
0eb6bb67be
13
infer-web.py
13
infer-web.py
@ -1119,12 +1119,12 @@ def change_info_(ckpt_path):
|
|||||||
from infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
|
from infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
|
||||||
|
|
||||||
|
|
||||||
def export_onnx(ModelPath, ExportedPath, MoeVS=True):
|
def export_onnx(ModelPath, ExportedPath):
|
||||||
cpt = torch.load(ModelPath, map_location="cpu")
|
cpt = torch.load(ModelPath, map_location="cpu")
|
||||||
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
|
||||||
hidden_channels = 256 if cpt.get("version","v1")=="v1"else 768#cpt["config"][-2] # hidden_channels,为768Vec做准备
|
vec_channels = 256 if cpt.get("version","v1")=="v1"else 768
|
||||||
|
|
||||||
test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
|
test_phone = torch.rand(1, 200, vec_channels) # hidden unit
|
||||||
test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
|
test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
|
||||||
test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
|
test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
|
||||||
test_pitchf = torch.rand(1, 200) # nsf基频
|
test_pitchf = torch.rand(1, 200) # nsf基频
|
||||||
@ -1160,7 +1160,7 @@ def export_onnx(ModelPath, ExportedPath, MoeVS=True):
|
|||||||
"rnd": [2],
|
"rnd": [2],
|
||||||
},
|
},
|
||||||
do_constant_folding=False,
|
do_constant_folding=False,
|
||||||
opset_version=16,
|
opset_version=13,
|
||||||
verbose=False,
|
verbose=False,
|
||||||
input_names=input_names,
|
input_names=input_names,
|
||||||
output_names=output_names,
|
output_names=output_names,
|
||||||
@ -1835,11 +1835,10 @@ with gr.Blocks() as app:
|
|||||||
label=i18n("Onnx输出路径"), value="", interactive=True
|
label=i18n("Onnx输出路径"), value="", interactive=True
|
||||||
)
|
)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
moevs = gr.Checkbox(label=i18n("MoeVS模型"), value=False,visible=False)
|
|
||||||
infoOnnx = gr.Label(label="info")
|
infoOnnx = gr.Label(label="info")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary")
|
butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary")
|
||||||
butOnnx.click(export_onnx, [ckpt_dir, onnx_dir, moevs], infoOnnx)
|
butOnnx.click(export_onnx, [ckpt_dir, onnx_dir], infoOnnx)
|
||||||
|
|
||||||
tab_faq = i18n("常见问题解答")
|
tab_faq = i18n("常见问题解答")
|
||||||
with gr.TabItem(tab_faq):
|
with gr.TabItem(tab_faq):
|
||||||
|
@ -3,7 +3,6 @@ import librosa
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import soundfile
|
import soundfile
|
||||||
|
|
||||||
|
|
||||||
class ContentVec:
|
class ContentVec:
|
||||||
def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
|
def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
|
||||||
print("load model(s) from {}".format(vec_path))
|
print("load model(s) from {}".format(vec_path))
|
||||||
@ -11,6 +10,8 @@ class ContentVec:
|
|||||||
providers = ["CPUExecutionProvider"]
|
providers = ["CPUExecutionProvider"]
|
||||||
elif device == "cuda":
|
elif device == "cuda":
|
||||||
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
||||||
|
elif device == "dml":
|
||||||
|
providers = ["DmlExecutionProvider"]
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Unsportted Device")
|
raise RuntimeError("Unsportted Device")
|
||||||
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
||||||
@ -68,6 +69,8 @@ class OnnxRVC:
|
|||||||
providers = ["CPUExecutionProvider"]
|
providers = ["CPUExecutionProvider"]
|
||||||
elif device == "cuda":
|
elif device == "cuda":
|
||||||
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
||||||
|
elif device == "dml":
|
||||||
|
providers = ["DmlExecutionProvider"]
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Unsportted Device")
|
raise RuntimeError("Unsportted Device")
|
||||||
self.model = onnxruntime.InferenceSession(model_path, providers=providers)
|
self.model = onnxruntime.InferenceSession(model_path, providers=providers)
|
||||||
|
Loading…
Reference in New Issue
Block a user