optimize(vc): use np.multiply instead of general *

This commit is contained in:
源文雨 2024-06-02 16:44:27 +09:00
parent 976869e5fb
commit 7e48279c6c
4 changed files with 8 additions and 4 deletions

View File

@ -32,7 +32,7 @@ version_config_list = [
def singleton_variable(func):
def wrapper(*args, **kwargs):
if not wrapper.instance:
if wrapper.instance is None:
wrapper.instance = func(*args, **kwargs)
return wrapper.instance

View File

@ -6,6 +6,10 @@ now_dir = os.getcwd()
sys.path.append(now_dir)
load_dotenv()
load_dotenv("sha256.env")
if sys.platform == "darwin":
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from infer.modules.vc import VC
from infer.modules.uvr5.modules import uvr
from infer.lib.train.process_ckpt import (

View File

@ -201,7 +201,7 @@ class VC:
self.version,
protect,
f0_file,
)
).astype(np.int16)
if self.tgt_sr != resample_sr >= 16000:
tgt_sr = resample_sr
else:

View File

@ -288,7 +288,7 @@ class Pipeline(object):
hasp = pitch is not None and pitchf is not None
arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)
audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()
del hasp, arg
del arg
del feats, p_len, padding_mask
if torch.cuda.is_available():
torch.cuda.empty_cache()
@ -469,7 +469,7 @@ class Pipeline(object):
max_int16 = 32768
if audio_max > 1:
max_int16 /= audio_max
audio_opt = (audio_opt * max_int16).astype(np.int16)
np.multiply(audio_opt, max_int16, audio_opt)
del pitch, pitchf, sid
if torch.cuda.is_available():
torch.cuda.empty_cache()