From 0eb6bb67be84fefbad6907010eb0b86ebc8167e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Sat, 17 Jun 2023 22:49:16 +0800 Subject: [PATCH] =?UTF-8?q?Onnx=E6=8E=A8=E7=90=86dml=E6=94=AF=E6=8C=81=20(?= =?UTF-8?q?#556)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add files via upload * Add files via upload --- infer-web.py | 13 ++++++------- infer_pack/onnx_inference.py | 5 ++++- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/infer-web.py b/infer-web.py index a3bac6d..e96fbb0 100644 --- a/infer-web.py +++ b/infer-web.py @@ -1119,12 +1119,12 @@ def change_info_(ckpt_path): from infer_pack.models_onnx import SynthesizerTrnMsNSFsidM -def export_onnx(ModelPath, ExportedPath, MoeVS=True): +def export_onnx(ModelPath, ExportedPath): cpt = torch.load(ModelPath, map_location="cpu") - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - hidden_channels = 256 if cpt.get("version","v1")=="v1"else 768#cpt["config"][-2] # hidden_channels,为768Vec做准备 + cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] + vec_channels = 256 if cpt.get("version","v1")=="v1"else 768 - test_phone = torch.rand(1, 200, hidden_channels) # hidden unit + test_phone = torch.rand(1, 200, vec_channels) # hidden unit test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用) test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹) test_pitchf = torch.rand(1, 200) # nsf基频 @@ -1160,7 +1160,7 @@ def export_onnx(ModelPath, ExportedPath, MoeVS=True): "rnd": [2], }, do_constant_folding=False, - opset_version=16, + opset_version=13, verbose=False, input_names=input_names, output_names=output_names, @@ -1835,11 +1835,10 @@ with gr.Blocks() as app: label=i18n("Onnx输出路径"), value="", interactive=True ) with gr.Row(): - moevs = gr.Checkbox(label=i18n("MoeVS模型"), value=False,visible=False) infoOnnx = gr.Label(label="info") with gr.Row(): butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary") - butOnnx.click(export_onnx, [ckpt_dir, onnx_dir, moevs], infoOnnx) + butOnnx.click(export_onnx, [ckpt_dir, onnx_dir], infoOnnx) tab_faq = i18n("常见问题解答") with gr.TabItem(tab_faq): diff --git a/infer_pack/onnx_inference.py b/infer_pack/onnx_inference.py index 09a4ed2..7502543 100644 --- a/infer_pack/onnx_inference.py +++ b/infer_pack/onnx_inference.py @@ -3,7 +3,6 @@ import librosa import numpy as np import soundfile - class ContentVec: def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None): print("load model(s) from {}".format(vec_path)) @@ -11,6 +10,8 @@ class ContentVec: providers = ["CPUExecutionProvider"] elif device == "cuda": providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] + elif device == "dml": + providers = ["DmlExecutionProvider"] else: raise RuntimeError("Unsportted Device") self.model = onnxruntime.InferenceSession(vec_path, providers=providers) @@ -68,6 +69,8 @@ class OnnxRVC: providers = ["CPUExecutionProvider"] elif device == "cuda": providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] + elif device == "dml": + providers = ["DmlExecutionProvider"] else: raise RuntimeError("Unsportted Device") self.model = onnxruntime.InferenceSession(model_path, providers=providers)