mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2025-05-06 20:01:37 +08:00
Add files via upload
This commit is contained in:
parent
fb1d4b1882
commit
e96ba57c9f
118
export_onnx.py
118
export_onnx.py
@ -1,47 +1,81 @@
|
|||||||
from infer_pack.models_onnx import SynthesizerTrnMs256NSFsid
|
from infer_pack.models_onnx_moess import SynthesizerTrnMs256NSFsidM
|
||||||
|
from infer_pack.models_onnx import SynthesizerTrnMs256NSFsidO
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
person = "Shiroha/shiroha.pth"
|
if __name__ == '__main__':
|
||||||
exported_path = "model.onnx"
|
MoeVS = True #模型是否为MoeVoiceStudio(原MoeSS)使用
|
||||||
|
|
||||||
|
ModelPath = "Shiroha/shiroha.pth" #模型路径
|
||||||
|
ExportedPath = "model.onnx" #输出路径
|
||||||
|
hidden_channels = 256 # hidden_channels,为768Vec做准备
|
||||||
|
cpt = torch.load(ModelPath, map_location="cpu")
|
||||||
|
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
||||||
|
print(*cpt["config"])
|
||||||
|
|
||||||
cpt = torch.load(person, map_location="cpu")
|
test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
|
||||||
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
|
||||||
print(*cpt["config"])
|
test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
|
||||||
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=False)
|
test_pitchf = torch.rand(1, 200) # nsf基频
|
||||||
net_g.load_state_dict(cpt["weight"], strict=False)
|
test_ds = torch.LongTensor([0]) # 说话人ID
|
||||||
|
test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子)
|
||||||
|
|
||||||
test_phone = torch.rand(1, 200, 256)
|
device = "cpu" #导出时设备(不影响使用模型)
|
||||||
test_phone_lengths = torch.tensor([200]).long()
|
|
||||||
test_pitch = torch.randint(size=(1, 200), low=5, high=255)
|
if MoeVS:
|
||||||
test_pitchf = torch.rand(1, 200)
|
net_g = SynthesizerTrnMs256NSFsidM(*cpt["config"], is_half=False) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
|
||||||
test_ds = torch.LongTensor([0])
|
net_g.load_state_dict(cpt["weight"], strict=False)
|
||||||
test_rnd = torch.rand(1, 192, 200)
|
input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
|
||||||
input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
|
output_names = [
|
||||||
output_names = [
|
"audio",
|
||||||
"audio",
|
]
|
||||||
]
|
torch.onnx.export(
|
||||||
device = "cpu"
|
net_g,
|
||||||
torch.onnx.export(
|
(
|
||||||
net_g,
|
test_phone.to(device),
|
||||||
(
|
test_phone_lengths.to(device),
|
||||||
test_phone.to(device),
|
test_pitch.to(device),
|
||||||
test_phone_lengths.to(device),
|
test_pitchf.to(device),
|
||||||
test_pitch.to(device),
|
test_ds.to(device),
|
||||||
test_pitchf.to(device),
|
test_rnd.to(device),
|
||||||
test_ds.to(device),
|
),
|
||||||
test_rnd.to(device),
|
ExportedPath,
|
||||||
),
|
dynamic_axes={
|
||||||
exported_path,
|
"phone": [1],
|
||||||
dynamic_axes={
|
"pitch": [1],
|
||||||
"phone": [1],
|
"pitchf": [1],
|
||||||
"pitch": [1],
|
"rnd": [2],
|
||||||
"pitchf": [1],
|
},
|
||||||
"rnd": [2],
|
do_constant_folding=False,
|
||||||
},
|
opset_version=16,
|
||||||
do_constant_folding=False,
|
verbose=False,
|
||||||
opset_version=16,
|
input_names=input_names,
|
||||||
verbose=False,
|
output_names=output_names,
|
||||||
input_names=input_names,
|
)
|
||||||
output_names=output_names,
|
else:
|
||||||
)
|
net_g = SynthesizerTrnMs256NSFsidO(*cpt["config"], is_half=False) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
|
||||||
|
net_g.load_state_dict(cpt["weight"], strict=False)
|
||||||
|
input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds"]
|
||||||
|
output_names = [
|
||||||
|
"audio",
|
||||||
|
]
|
||||||
|
torch.onnx.export(
|
||||||
|
net_g,
|
||||||
|
(
|
||||||
|
test_phone.to(device),
|
||||||
|
test_phone_lengths.to(device),
|
||||||
|
test_pitch.to(device),
|
||||||
|
test_pitchf.to(device),
|
||||||
|
test_ds.to(device),
|
||||||
|
),
|
||||||
|
ExportedPath,
|
||||||
|
dynamic_axes={
|
||||||
|
"phone": [1],
|
||||||
|
"pitch": [1],
|
||||||
|
"pitchf": [1],
|
||||||
|
},
|
||||||
|
do_constant_folding=False,
|
||||||
|
opset_version=16,
|
||||||
|
verbose=False,
|
||||||
|
input_names=input_names,
|
||||||
|
output_names=output_names,
|
||||||
|
)
|
47
export_onnx_old.py
Normal file
47
export_onnx_old.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
from infer_pack.models_onnx_moess import SynthesizerTrnMs256NSFsidM
|
||||||
|
import torch
|
||||||
|
|
||||||
|
person = "Shiroha/shiroha.pth"
|
||||||
|
exported_path = "model.onnx"
|
||||||
|
|
||||||
|
|
||||||
|
cpt = torch.load(person, map_location="cpu")
|
||||||
|
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
||||||
|
print(*cpt["config"])
|
||||||
|
net_g = SynthesizerTrnMs256NSFsidM(*cpt["config"], is_half=False)
|
||||||
|
net_g.load_state_dict(cpt["weight"], strict=False)
|
||||||
|
|
||||||
|
test_phone = torch.rand(1, 200, 256)
|
||||||
|
test_phone_lengths = torch.tensor([200]).long()
|
||||||
|
test_pitch = torch.randint(size=(1, 200), low=5, high=255)
|
||||||
|
test_pitchf = torch.rand(1, 200)
|
||||||
|
test_ds = torch.LongTensor([0])
|
||||||
|
test_rnd = torch.rand(1, 192, 200)
|
||||||
|
input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
|
||||||
|
output_names = [
|
||||||
|
"audio",
|
||||||
|
]
|
||||||
|
device = "cpu"
|
||||||
|
torch.onnx.export(
|
||||||
|
net_g,
|
||||||
|
(
|
||||||
|
test_phone.to(device),
|
||||||
|
test_phone_lengths.to(device),
|
||||||
|
test_pitch.to(device),
|
||||||
|
test_pitchf.to(device),
|
||||||
|
test_ds.to(device),
|
||||||
|
test_rnd.to(device),
|
||||||
|
),
|
||||||
|
exported_path,
|
||||||
|
dynamic_axes={
|
||||||
|
"phone": [1],
|
||||||
|
"pitch": [1],
|
||||||
|
"pitchf": [1],
|
||||||
|
"rnd": [2],
|
||||||
|
},
|
||||||
|
do_constant_folding=False,
|
||||||
|
opset_version=16,
|
||||||
|
verbose=False,
|
||||||
|
input_names=input_names,
|
||||||
|
output_names=output_names,
|
||||||
|
)
|
Loading…
x
Reference in New Issue
Block a user