From e10d097782a3793e586b46e2e786eaa8ac849707 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=9D=CE=B1=CF=81=CE=BF=CF=85=CF=83=CE=AD=C2=B7=CE=BC?= =?UTF-8?q?=C2=B7=CE=B3=CE=B9=CE=BF=CF=85=CE=BC=CE=B5=CE=BC=CE=AF=C2=B7?= =?UTF-8?q?=CE=A7=CE=B9=CE=BD=CE=B1=CE=BA=CE=AC=CE=BD=CE=BD=CE=B1?= <40709280+NaruseMioShirakana@users.noreply.github.com> Date: Sat, 8 Apr 2023 23:36:25 +0800 Subject: [PATCH] Add files via upload --- export_onnx.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 export_onnx.py diff --git a/export_onnx.py b/export_onnx.py new file mode 100644 index 0000000..80f061b --- /dev/null +++ b/export_onnx.py @@ -0,0 +1,44 @@ +from infer_pack.models_onnx import SynthesizerTrnMs256NSFsid +import torch + +person = "Shiroha/shiroha.pth" +exported_path = "model.onnx" + + + +cpt = torch.load(person, map_location="cpu") +cpt["config"][-3]=cpt["weight"]["emb_g.weight"].shape[0]#n_spk +print(*cpt["config"]) +net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=False) +net_g.load_state_dict(cpt["weight"], strict=False) + +test_phone = torch.rand(1, 200, 256) +test_phone_lengths = torch.tensor([200]).long() +test_pitch = torch.randint(size=(1 ,200),low=5,high=255) +test_pitchf = torch.rand(1, 200) +test_ds = torch.LongTensor([0]) +test_rnd = torch.rand(1, 192, 200) +input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"] +output_names = ["audio", ] +device="cpu" +torch.onnx.export(net_g, + ( + test_phone.to(device), + test_phone_lengths.to(device), + test_pitch.to(device), + test_pitchf.to(device), + test_ds.to(device), + test_rnd.to(device) + ), + exported_path, + dynamic_axes={ + "phone": [1], + "pitch": [1], + "pitchf": [1], + "rnd": [2], + }, + do_constant_folding=False, + opset_version=16, + verbose=False, + input_names=input_names, + output_names=output_names) \ No newline at end of file