Onnx推理dml支持 (#556)

* Add files via upload

* Add files via upload
This commit is contained in:
Ναρουσέ·μ·γιουμεμί·Χινακάννα 2023-06-17 22:49:16 +08:00 committed by GitHub
parent a071f1e089
commit 0eb6bb67be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 10 additions and 8 deletions

View File

@ -1119,12 +1119,12 @@ def change_info_(ckpt_path):
from infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
def export_onnx(ModelPath, ExportedPath, MoeVS=True):
def export_onnx(ModelPath, ExportedPath):
cpt = torch.load(ModelPath, map_location="cpu")
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
hidden_channels = 256 if cpt.get("version","v1")=="v1"else 768#cpt["config"][-2] # hidden_channels为768Vec做准备
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
vec_channels = 256 if cpt.get("version","v1")=="v1"else 768
test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
test_phone = torch.rand(1, 200, vec_channels) # hidden unit
test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
test_pitchf = torch.rand(1, 200) # nsf基频
@ -1160,7 +1160,7 @@ def export_onnx(ModelPath, ExportedPath, MoeVS=True):
"rnd": [2],
},
do_constant_folding=False,
opset_version=16,
opset_version=13,
verbose=False,
input_names=input_names,
output_names=output_names,
@ -1835,11 +1835,10 @@ with gr.Blocks() as app:
label=i18n("Onnx输出路径"), value="", interactive=True
)
with gr.Row():
moevs = gr.Checkbox(label=i18n("MoeVS模型"), value=False,visible=False)
infoOnnx = gr.Label(label="info")
with gr.Row():
butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary")
butOnnx.click(export_onnx, [ckpt_dir, onnx_dir, moevs], infoOnnx)
butOnnx.click(export_onnx, [ckpt_dir, onnx_dir], infoOnnx)
tab_faq = i18n("常见问题解答")
with gr.TabItem(tab_faq):

View File

@ -3,7 +3,6 @@ import librosa
import numpy as np
import soundfile
class ContentVec:
def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
print("load model(s) from {}".format(vec_path))
@ -11,6 +10,8 @@ class ContentVec:
providers = ["CPUExecutionProvider"]
elif device == "cuda":
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
elif device == "dml":
providers = ["DmlExecutionProvider"]
else:
raise RuntimeError("Unsportted Device")
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
@ -68,6 +69,8 @@ class OnnxRVC:
providers = ["CPUExecutionProvider"]
elif device == "cuda":
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
elif device == "dml":
providers = ["DmlExecutionProvider"]
else:
raise RuntimeError("Unsportted Device")
self.model = onnxruntime.InferenceSession(model_path, providers=providers)