From bf1170012564463e9b9f13e7174b1eb499bdcbd2 Mon Sep 17 00:00:00 2001 From: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Date: Sat, 3 Jun 2023 07:08:35 +0000 Subject: [PATCH] fix m1/m2 user training --- train_nsf_sim_cache_sid_load_pretrain.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train_nsf_sim_cache_sid_load_pretrain.py b/train_nsf_sim_cache_sid_load_pretrain.py index 1e53cd9..bc64e5f 100644 --- a/train_nsf_sim_cache_sid_load_pretrain.py +++ b/train_nsf_sim_cache_sid_load_pretrain.py @@ -9,7 +9,7 @@ import datetime hps = utils.get_hparams() os.environ["CUDA_VISIBLE_DEVICES"] = hps.gpus.replace("-", ",") n_gpus = len(hps.gpus.split("-")) -from random import shuffle +from random import shuffle,randint import traceback, json, argparse, itertools, math, torch, pdb torch.backends.cudnn.deterministic = False @@ -67,9 +67,9 @@ class EpochRecorder: def main(): n_gpus = torch.cuda.device_count() + if torch.cuda.is_available()==False and torch.backends.mps.is_available()==True:n_gpus = 1 os.environ["MASTER_ADDR"] = "localhost" - os.environ["MASTER_PORT"] = "51545" - + os.environ["MASTER_PORT"] = str(randint(20000,55555)) children = [] for i in range(n_gpus): subproc = mp.Process(