mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2025-05-08 04:51:44 +08:00
fix the receptive field of flow
This commit is contained in:
parent
7728caad54
commit
8e78f655e6
@ -794,15 +794,15 @@ class SynthesizerTrnMs256NSFsid(nn.Module):
|
|||||||
g = self.emb_g(sid).unsqueeze(-1)
|
g = self.emb_g(sid).unsqueeze(-1)
|
||||||
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
||||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||||
|
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||||
if skip_head is not None and return_length is not None:
|
if skip_head is not None and return_length is not None:
|
||||||
assert isinstance(skip_head, torch.Tensor)
|
assert isinstance(skip_head, torch.Tensor)
|
||||||
assert isinstance(return_length, torch.Tensor)
|
assert isinstance(return_length, torch.Tensor)
|
||||||
head = int(skip_head.item())
|
head = int(skip_head.item())
|
||||||
length = int(return_length.item())
|
length = int(return_length.item())
|
||||||
z_p = z_p[:, :, head : head + length]
|
z = z[:, :, head : head + length]
|
||||||
x_mask = x_mask[:, :, head : head + length]
|
x_mask = x_mask[:, :, head : head + length]
|
||||||
nsff0 = nsff0[:, head : head + length]
|
nsff0 = nsff0[:, head : head + length]
|
||||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
|
||||||
o = self.dec(z * x_mask, nsff0, g=g)
|
o = self.dec(z * x_mask, nsff0, g=g)
|
||||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||||
|
|
||||||
@ -956,15 +956,15 @@ class SynthesizerTrnMs768NSFsid(nn.Module):
|
|||||||
g = self.emb_g(sid).unsqueeze(-1)
|
g = self.emb_g(sid).unsqueeze(-1)
|
||||||
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
||||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||||
|
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||||
if skip_head is not None and return_length is not None:
|
if skip_head is not None and return_length is not None:
|
||||||
assert isinstance(skip_head, torch.Tensor)
|
assert isinstance(skip_head, torch.Tensor)
|
||||||
assert isinstance(return_length, torch.Tensor)
|
assert isinstance(return_length, torch.Tensor)
|
||||||
head = int(skip_head.item())
|
head = int(skip_head.item())
|
||||||
length = int(return_length.item())
|
length = int(return_length.item())
|
||||||
z_p = z_p[:, :, head : head + length]
|
z = z[:, :, head : head + length]
|
||||||
x_mask = x_mask[:, :, head : head + length]
|
x_mask = x_mask[:, :, head : head + length]
|
||||||
nsff0 = nsff0[:, head : head + length]
|
nsff0 = nsff0[:, head : head + length]
|
||||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
|
||||||
o = self.dec(z * x_mask, nsff0, g=g)
|
o = self.dec(z * x_mask, nsff0, g=g)
|
||||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||||
|
|
||||||
@ -1107,14 +1107,14 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
|
|||||||
g = self.emb_g(sid).unsqueeze(-1)
|
g = self.emb_g(sid).unsqueeze(-1)
|
||||||
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
||||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||||
|
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||||
if skip_head is not None and return_length is not None:
|
if skip_head is not None and return_length is not None:
|
||||||
assert isinstance(skip_head, torch.Tensor)
|
assert isinstance(skip_head, torch.Tensor)
|
||||||
assert isinstance(return_length, torch.Tensor)
|
assert isinstance(return_length, torch.Tensor)
|
||||||
head = int(skip_head.item())
|
head = int(skip_head.item())
|
||||||
length = int(return_length.item())
|
length = int(return_length.item())
|
||||||
z_p = z_p[:, :, head : head + length]
|
z = z[:, :, head : head + length]
|
||||||
x_mask = x_mask[:, :, head : head + length]
|
x_mask = x_mask[:, :, head : head + length]
|
||||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
|
||||||
o = self.dec(z * x_mask, g=g)
|
o = self.dec(z * x_mask, g=g)
|
||||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||||
|
|
||||||
@ -1257,14 +1257,14 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
|
|||||||
g = self.emb_g(sid).unsqueeze(-1)
|
g = self.emb_g(sid).unsqueeze(-1)
|
||||||
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
||||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||||
|
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||||
if skip_head is not None and return_length is not None:
|
if skip_head is not None and return_length is not None:
|
||||||
assert isinstance(skip_head, torch.Tensor)
|
assert isinstance(skip_head, torch.Tensor)
|
||||||
assert isinstance(return_length, torch.Tensor)
|
assert isinstance(return_length, torch.Tensor)
|
||||||
head = int(skip_head.item())
|
head = int(skip_head.item())
|
||||||
length = int(return_length.item())
|
length = int(return_length.item())
|
||||||
z_p = z_p[:, :, head : head + length]
|
z = z[:, :, head : head + length]
|
||||||
x_mask = x_mask[:, :, head : head + length]
|
x_mask = x_mask[:, :, head : head + length]
|
||||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
|
||||||
o = self.dec(z * x_mask, g=g)
|
o = self.dec(z * x_mask, g=g)
|
||||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user