mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2025-05-06 20:01:37 +08:00
chore(format): run black on dev
This commit is contained in:
parent
b06cdcce32
commit
35bfdccfb2
@ -356,7 +356,7 @@ if __name__ == "__main__":
|
|||||||
enable_events=True,
|
enable_events=True,
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
sg.Text(i18n("共振偏移")),
|
sg.Text(i18n("共振偏移")),
|
||||||
sg.Slider(
|
sg.Slider(
|
||||||
range=(-5, 5),
|
range=(-5, 5),
|
||||||
|
@ -249,12 +249,17 @@ class Generator(torch.nn.Module):
|
|||||||
if gin_channels != 0:
|
if gin_channels != 0:
|
||||||
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
||||||
|
|
||||||
def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None, n_res: Optional[torch.Tensor] = None):
|
def forward(
|
||||||
|
self,
|
||||||
|
x: torch.Tensor,
|
||||||
|
g: Optional[torch.Tensor] = None,
|
||||||
|
n_res: Optional[torch.Tensor] = None,
|
||||||
|
):
|
||||||
if n_res is not None:
|
if n_res is not None:
|
||||||
assert isinstance(n_res, torch.Tensor)
|
assert isinstance(n_res, torch.Tensor)
|
||||||
n = int(n_res.item())
|
n = int(n_res.item())
|
||||||
if n != x.shape[-1]:
|
if n != x.shape[-1]:
|
||||||
x = F.interpolate(x, size=n, mode='linear')
|
x = F.interpolate(x, size=n, mode="linear")
|
||||||
x = self.conv_pre(x)
|
x = self.conv_pre(x)
|
||||||
if g is not None:
|
if g is not None:
|
||||||
x = x + self.cond(g)
|
x = x + self.cond(g)
|
||||||
@ -533,16 +538,22 @@ class GeneratorNSF(torch.nn.Module):
|
|||||||
|
|
||||||
self.lrelu_slope = modules.LRELU_SLOPE
|
self.lrelu_slope = modules.LRELU_SLOPE
|
||||||
|
|
||||||
def forward(self, x, f0, g: Optional[torch.Tensor] = None, n_res: Optional[torch.Tensor] = None):
|
def forward(
|
||||||
|
self,
|
||||||
|
x,
|
||||||
|
f0,
|
||||||
|
g: Optional[torch.Tensor] = None,
|
||||||
|
n_res: Optional[torch.Tensor] = None,
|
||||||
|
):
|
||||||
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
||||||
har_source = har_source.transpose(1, 2)
|
har_source = har_source.transpose(1, 2)
|
||||||
if n_res is not None:
|
if n_res is not None:
|
||||||
assert isinstance(n_res, torch.Tensor)
|
assert isinstance(n_res, torch.Tensor)
|
||||||
n = int(n_res.item())
|
n = int(n_res.item())
|
||||||
if n * self.upp != har_source.shape[-1]:
|
if n * self.upp != har_source.shape[-1]:
|
||||||
har_source = F.interpolate(har_source, size=n*self.upp, mode='linear')
|
har_source = F.interpolate(har_source, size=n * self.upp, mode="linear")
|
||||||
if n != x.shape[-1]:
|
if n != x.shape[-1]:
|
||||||
x = F.interpolate(x, size=n, mode='linear')
|
x = F.interpolate(x, size=n, mode="linear")
|
||||||
x = self.conv_pre(x)
|
x = self.conv_pre(x)
|
||||||
if g is not None:
|
if g is not None:
|
||||||
x = x + self.cond(g)
|
x = x + self.cond(g)
|
||||||
|
@ -446,7 +446,9 @@ class RVC:
|
|||||||
new_freq=self.tgt_sr // 100,
|
new_freq=self.tgt_sr // 100,
|
||||||
dtype=torch.float32,
|
dtype=torch.float32,
|
||||||
).to(self.device)
|
).to(self.device)
|
||||||
infered_audio = self.resample_kernel[upp_res](infered_audio[: ,: return_length * upp_res])
|
infered_audio = self.resample_kernel[upp_res](
|
||||||
|
infered_audio[:, : return_length * upp_res]
|
||||||
|
)
|
||||||
t5 = ttime()
|
t5 = ttime()
|
||||||
printt(
|
printt(
|
||||||
"Spent time: fea = %.3fs, index = %.3fs, f0 = %.3fs, model = %.3fs",
|
"Spent time: fea = %.3fs, index = %.3fs, f0 = %.3fs, model = %.3fs",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user