From cfd984812804ddc9247d65b14c82cd32e56c1133 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 21 May 2023 19:19:53 +0800 Subject: [PATCH] Format code (#330) Co-authored-by: github-actions[bot] --- config.py | 2 ++ export_onnx.py | 1 - gui.py | 44 ++++++++++++++++++++++++++------------- i18n.py | 4 ++-- infer-web.py | 2 ++ infer_pack/models_onnx.py | 8 +++---- 6 files changed, 39 insertions(+), 22 deletions(-) diff --git a/config.py b/config.py index 8e6fa22..48187f5 100644 --- a/config.py +++ b/config.py @@ -2,6 +2,7 @@ import argparse import torch from multiprocessing import cpu_count + def config_file_change_fp32(): for config_file in ["32k.json", "40k.json", "48k.json"]: with open(f"configs/{config_file}", "r") as f: @@ -13,6 +14,7 @@ def config_file_change_fp32(): with open("trainset_preprocess_pipeline_print.py", "w") as f: f.write(strr) + class Config: def __init__(self): self.device = "cuda:0" diff --git a/export_onnx.py b/export_onnx.py index 2ec6509..34938fe 100644 --- a/export_onnx.py +++ b/export_onnx.py @@ -52,4 +52,3 @@ if __name__ == "__main__": input_names=input_names, output_names=output_names, ) - diff --git a/gui.py b/gui.py index e80f4de..852f0ca 100644 --- a/gui.py +++ b/gui.py @@ -1,4 +1,4 @@ -''' +""" 0416后的更新: 引入config中half 重建npy而不用填写 @@ -9,12 +9,14 @@ int16: 增加无索引支持 f0算法改harvest(怎么看就只有这个会影响CPU占用),但是不这么改效果不好 -''' +""" import os, sys, traceback + now_dir = os.getcwd() sys.path.append(now_dir) from config import Config -is_half=Config().is_half + +is_half = Config().is_half import PySimpleGUI as sg import sounddevice as sd import noisereduce as nr @@ -26,7 +28,12 @@ import torchaudio.transforms as tat import scipy.signal as signal # import matplotlib.pyplot as plt -from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono,SynthesizerTrnMs768NSFsid,SynthesizerTrnMs768NSFsid_nono +from infer_pack.models import ( + SynthesizerTrnMs256NSFsid, + SynthesizerTrnMs256NSFsid_nono, + SynthesizerTrnMs768NSFsid, + SynthesizerTrnMs768NSFsid_nono, +) from i18n import I18nAuto i18n = I18nAuto() @@ -63,7 +70,7 @@ class RVC: ) self.model = models[0] self.model = self.model.to(device) - if(is_half==True): + if is_half == True: self.model = self.model.half() else: self.model = self.model.float() @@ -75,21 +82,25 @@ class RVC: self.version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) + self.net_g = SynthesizerTrnMs256NSFsid( + *cpt["config"], is_half=config.is_half + ) else: self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) elif version == "v2": if if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) + self.net_g = SynthesizerTrnMs768NSFsid( + *cpt["config"], is_half=config.is_half + ) else: self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) del self.net_g.enc_q print(self.net_g.load_state_dict(cpt["weight"], strict=False)) self.net_g.eval().to(device) - if(is_half==True): - self.net_g=self.net_g.half() + if is_half == True: + self.net_g = self.net_g.half() else: - self.net_g=self.net_g.float() + self.net_g = self.net_g.float() except: print(traceback.format_exc()) @@ -151,15 +162,18 @@ class RVC: ####索引优化 try: - if hasattr(self, "index") and hasattr(self, "big_npy") and self.index_rate != 0: + if ( + hasattr(self, "index") + and hasattr(self, "big_npy") + and self.index_rate != 0 + ): npy = feats[0].cpu().numpy().astype("float32") score, ix = self.index.search(npy, k=8) weight = np.square(1 / score) weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum( - self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1 - ) - if(is_half==True):npy=npy.astype("float16") + npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) + if is_half == True: + npy = npy.astype("float16") feats = ( torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate + (1 - self.index_rate) * feats diff --git a/i18n.py b/i18n.py index 9deba42..37f310f 100644 --- a/i18n.py +++ b/i18n.py @@ -18,11 +18,11 @@ class I18nAuto: if not os.path.exists(f"./i18n/{language}.json"): language = "en_US" self.language = language - #print("Use Language:", language) + # print("Use Language:", language) self.language_map = load_language_list(language) def __call__(self, key): return self.language_map.get(key, key) - + def print(self): print("Use Language:", self.language) diff --git a/infer-web.py b/infer-web.py index 851c76c..305e03e 100644 --- a/infer-web.py +++ b/infer-web.py @@ -1069,6 +1069,8 @@ def change_info_(ckpt_path): from infer_pack.models_onnx import SynthesizerTrnMsNSFsidM + + def export_onnx(ModelPath, ExportedPath, MoeVS=True): cpt = torch.load(ModelPath, map_location="cpu") cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk diff --git a/infer_pack/models_onnx.py b/infer_pack/models_onnx.py index cd463a7..8cb1b18 100644 --- a/infer_pack/models_onnx.py +++ b/infer_pack/models_onnx.py @@ -633,14 +633,14 @@ class SynthesizerTrnMsNSFsidM(nn.Module): self.speaker_map = self.speaker_map.unsqueeze(0) def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] + if self.speaker_map is not None: # [N, S] * [S, B, 1, H] g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] + g = torch.sum(g, dim=1) # [N, 1, B, 1, H] + g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] else: g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1,2) + g = self.emb_g(g).transpose(1, 2) m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask