Add non-search inference support. (#82)

Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com>
This commit is contained in:
EntropyRiser 2023-04-17 20:49:42 +08:00 committed by GitHub
parent b0f8a4c7d1
commit 88a43e14d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

14
gui.py
View File

@ -16,7 +16,6 @@ from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFs
from i18n import I18nAuto
i18n = I18nAuto()
print(i18n.language_map)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@ -33,10 +32,11 @@ class RVC:
self.f0_max = 1100
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
self.index = faiss.read_index(index_path)
if index_rate !=0:
self.index = faiss.read_index(index_path)
self.big_npy = np.load(npy_path)
print('index search enabled')
self.index_rate = index_rate
"""NOT YET USED"""
self.big_npy = np.load(npy_path)
model_path = hubert_path
print("load model(s) from {}".format(model_path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
@ -112,8 +112,8 @@ class RVC:
####索引优化
if (
isinstance(self.index, type(None)) == False
and isinstance(self.big_npy, type(None)) == False
hasattr(self,'index')
and hasattr(self,'big_npy')
and self.index_rate != 0
):
npy = feats[0].cpu().numpy().astype("float32")
@ -123,6 +123,8 @@ class RVC:
torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate
+ (1 - self.index_rate) * feats
)
else:
print('index search FAIL or disabled')
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
torch.cuda.synchronize()