diff --git a/infer-web.py b/infer-web.py index 3eaf2fb..751bf0f 100644 --- a/infer-web.py +++ b/infer-web.py @@ -148,19 +148,20 @@ def get_vc(sid): global n_spk,tgt_sr,net_g,vc,cpt if(sid==[]): global hubert_model - print("clean_empty_cache") - del n_spk, vc, hubert_model,tgt_sr#,cpt - hubert_model = net_g=n_spk=vc=hubert_model=tgt_sr=None - torch.cuda.empty_cache() - ###楼下不这么折腾清理不干净 - if_f0 = cpt.get("f0", 1) - if (if_f0 == 1): - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - del net_g,cpt - torch.cuda.empty_cache() - cpt=None + if (hubert_model != None): # 考虑到轮询,需要加个判断看是否 sid 是由有模型切换到无模型的 + print("clean_empty_cache") + del net_g, n_spk, vc, hubert_model,tgt_sr#,cpt + hubert_model = net_g=n_spk=vc=hubert_model=tgt_sr=None + torch.cuda.empty_cache() + ###楼下不这么折腾清理不干净 + if_f0 = cpt.get("f0", 1) + if (if_f0 == 1): + net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) + else: + net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) + del net_g,cpt + torch.cuda.empty_cache() + cpt=None return {"visible": False, "__type__": "update"} person = "%s/%s" % (weight_root, sid) print("loading %s"%person)