mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2024-12-29 19:15:04 +08:00
Merge pull request #1653 from RVC-Project/dev
chore(sync): merge dev into main
This commit is contained in:
commit
443a04bc66
@ -1 +1 @@
|
||||
{"pth_path": "assets/weights/kikiV1.pth", "index_path": "logs/kikiV1.index", "sg_input_device": "VoiceMeeter Output (VB-Audio Vo (MME)", "sg_output_device": "VoiceMeeter Input (VB-Audio Voi (MME)", "threhold": -45.0, "pitch": 2.0, "rms_mix_rate": 0.0, "index_rate": 0.0, "block_time": 0.52, "crossfade_length": 0.15, "extra_time": 2.46, "n_cpu": 6.0, "use_jit": false, "f0method": "rmvpe"}
|
||||
{"pth_path": "assets/weights/kikiV1.pth", "index_path": "logs/kikiV1.index", "sg_input_device": "VoiceMeeter Output (VB-Audio Vo (MME)", "sg_output_device": "VoiceMeeter Input (VB-Audio Voi (MME)", "sr_type": "sr_model", "threhold": -60.0, "pitch": 12.0, "rms_mix_rate": 0.5, "index_rate": 0.0, "block_time": 0.2, "crossfade_length": 0.08, "extra_time": 2.00, "n_cpu": 4.0, "use_jit": false, "use_pv": false, "f0method": "fcpe"}
|
@ -32,26 +32,25 @@ Realtime Voice Conversion GUI:go-realtime-gui.bat
|
||||
|
||||
![image](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/assets/129054828/143246a9-8b42-4dd1-a197-430ede4d15d7)
|
||||
|
||||
> The dataset for the pre-training model uses nearly 50 hours of high quality VCTK open source dataset.
|
||||
> The dataset for the pre-training model uses nearly 50 hours of high quality audio from the VCTK open source dataset.
|
||||
|
||||
> High quality licensed song datasets will be added to training-set one after another for your use, without worrying about copyright infringement.
|
||||
> High quality licensed song datasets will be added to the training-set often for your use, without having to worry about copyright infringement.
|
||||
|
||||
> Please look forward to the pretrained base model of RVCv3, which has larger parameters, more training data, better results, unchanged inference speed, and requires less training data for training.
|
||||
|
||||
## Summary
|
||||
This repository has the following features:
|
||||
## Features:
|
||||
+ Reduce tone leakage by replacing the source feature to training-set feature using top1 retrieval;
|
||||
+ Easy and fast training, even on relatively poor graphics cards;
|
||||
+ Training with a small amount of data also obtains relatively good results (>=10min low noise speech recommended);
|
||||
+ Supporting model fusion to change timbres (using ckpt processing tab->ckpt merge);
|
||||
+ Easy-to-use Webui interface;
|
||||
+ Use the UVR5 model to quickly separate vocals and instruments.
|
||||
+ Use the most powerful High-pitch Voice Extraction Algorithm [InterSpeech2023-RMVPE](#Credits) to prevent the muted sound problem. Provides the best results (significantly) and is faster, with even lower resource consumption than Crepe_full.
|
||||
+ AMD/Intel graphics cards acceleration supported.
|
||||
+ Easy + fast training, even on poor graphics cards;
|
||||
+ Training with a small amounts of data (>=10min low noise speech recommended);
|
||||
+ Model fusion to change timbres (using ckpt processing tab->ckpt merge);
|
||||
+ Easy-to-use WebUI;
|
||||
+ UVR5 model to quickly separate vocals and instruments;
|
||||
+ High-pitch Voice Extraction Algorithm [InterSpeech2023-RMVPE](#Credits) to prevent a muted sound problem. Provides the best results (significantly) and is faster with lower resource consumption than Crepe_full;
|
||||
+ AMD/Intel graphics cards acceleration supported;
|
||||
+ Intel ARC graphics cards acceleration with IPEX supported.
|
||||
|
||||
## Preparing the environment
|
||||
The following commands need to be executed in the environment of Python version 3.8 or higher.
|
||||
The following commands need to be executed with Python 3.8 or higher.
|
||||
|
||||
(Windows/Linux)
|
||||
First install the main dependencies through pip:
|
||||
@ -166,7 +165,7 @@ You might also need to set these environment variables (e.g. on a RX6700XT):
|
||||
export ROCM_PATH=/opt/rocm
|
||||
export HSA_OVERRIDE_GFX_VERSION=10.3.0
|
||||
````
|
||||
Also make sure your user is part of the `render` and `video` group:
|
||||
Make sure your user is part of the `render` and `video` group:
|
||||
````
|
||||
sudo usermod -aG render $USERNAME
|
||||
sudo usermod -aG video $USERNAME
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "Weight (w) for Model A:",
|
||||
"A模型路径": "Path to Model A:",
|
||||
"B模型路径": "Path to Model B:",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "C:\\Users\\Desktop\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:",
|
||||
"Index Rate": "Index Rate",
|
||||
"Onnx导出": "Export Onnx",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch processing for vocal accompaniment separation using the UVR5 model.<br>Example of a valid folder path format: D:\\path\\to\\input\\folder (copy it from the file manager address bar).<br>The model is divided into three categories:<br>1. Preserve vocals: Choose this option for audio without harmonies. It preserves vocals better than HP5. It includes two built-in models: HP2 and HP3. HP3 may slightly leak accompaniment but preserves vocals slightly better than HP2.<br>2. Preserve main vocals only: Choose this option for audio with harmonies. It may weaken the main vocals. It includes one built-in model: HP5.<br>3. De-reverb and de-delay models (by FoxJoy):<br> (1) MDX-Net: The best choice for stereo reverb removal but cannot remove mono reverb;<br> (234) DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverb and can remove mono reverb, but not very effectively for heavily reverberated high-frequency content.<br>De-reverb/de-delay notes:<br>1. The processing time for the DeEcho-DeReverb model is approximately twice as long as the other two DeEcho models.<br>2. The MDX-Net-Dereverb model is quite slow.<br>3. The recommended cleanest configuration is to apply MDX-Net first and then DeEcho-Aggressive.",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:",
|
||||
"伴奏人声分离&去混响&去回声": "Vocals/Accompaniment Separation & Reverberation Removal",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "Save name:",
|
||||
"保存的文件名, 默认空为和源文件同名": "Save file name (default: same as the source file):",
|
||||
"保存的模型名不带后缀": "Saved model name (without extension):",
|
||||
@ -38,11 +41,12 @@
|
||||
"加载模型": "Load model",
|
||||
"加载预训练底模D路径": "Load pre-trained base model D path:",
|
||||
"加载预训练底模G路径": "Load pre-trained base model G path:",
|
||||
"单次推理": "单次推理",
|
||||
"单次推理": "Single Inference",
|
||||
"卸载音色省显存": "Unload voice to save GPU memory:",
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:",
|
||||
"否": "No",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "Response threshold",
|
||||
"响度因子": "loudness factor",
|
||||
"处理数据": "Process data",
|
||||
@ -54,7 +58,7 @@
|
||||
"很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.",
|
||||
"性能设置": "Performance settings",
|
||||
"总训练轮数total_epoch": "Total training epochs (total_epoch):",
|
||||
"批量推理": "批量推理",
|
||||
"批量推理": "Batch Inference",
|
||||
"批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').",
|
||||
"指定输出主人声文件夹": "Specify the output folder for vocals:",
|
||||
"指定输出文件夹": "Specify output folder:",
|
||||
@ -120,7 +124,8 @@
|
||||
"选择.pth文件": "Select the .pth file",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "Sample length",
|
||||
"重载设备列表": "Reload device list",
|
||||
"音调设置": "Pitch settings",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "Un peso modelo para el modelo A.",
|
||||
"A模型路径": "Modelo A ruta.",
|
||||
"B模型路径": "Modelo B ruta.",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Archivo de curva F0, opcional, un tono por línea, en lugar de F0 predeterminado y cambio de tono",
|
||||
"Index Rate": "Tasa de índice",
|
||||
"Onnx导出": "Exportar Onnx",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Procesamiento por lotes para la separación de acompañamiento vocal utilizando el modelo UVR5.<br>Ejemplo de formato de ruta de carpeta válido: D:\\ruta\\a\\la\\carpeta\\de\\entrada (copiar desde la barra de direcciones del administrador de archivos).<br>El modelo se divide en tres categorías:<br>1. Preservar voces: Elija esta opción para audio sin armonías. Preserva las voces mejor que HP5. Incluye dos modelos incorporados: HP2 y HP3. HP3 puede filtrar ligeramente el acompañamiento pero conserva las voces un poco mejor que HP2.<br>2. Preservar solo voces principales: Elija esta opción para audio con armonías. Puede debilitar las voces principales. Incluye un modelo incorporado: HP5.<br>3. Modelos de des-reverberación y des-retardo (por FoxJoy):<br> (1) MDX-Net: La mejor opción para la eliminación de reverberación estéreo pero no puede eliminar la reverberación mono;<br> (234) DeEcho: Elimina efectos de retardo. El modo Agresivo elimina más a fondo que el modo Normal. DeReverb adicionalmente elimina la reverberación y puede eliminar la reverberación mono, pero no muy efectivamente para contenido de alta frecuencia fuertemente reverberado.<br>Notas de des-reverberación/des-retardo:<br>1. El tiempo de procesamiento para el modelo DeEcho-DeReverb es aproximadamente el doble que los otros dos modelos DeEcho.<br>2. El modelo MDX-Net-Dereverb es bastante lento.<br>3. La configuración más limpia recomendada es aplicar primero MDX-Net y luego DeEcho-Agresivo.",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Separe los números de identificación de la GPU con '-' al ingresarlos. Por ejemplo, '0-1-2' significa usar GPU 0, GPU 1 y GPU 2.",
|
||||
"伴奏人声分离&去混响&去回声": "Separación de voz acompañante & eliminación de reverberación & eco",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "Guardar nombre",
|
||||
"保存的文件名, 默认空为和源文件同名": "Nombre del archivo que se guardará, el valor predeterminado es el mismo que el nombre del archivo de origen",
|
||||
"保存的模型名不带后缀": "Nombre del modelo guardado sin extensión.",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "Cambio de tono (entero, número de semitonos, subir una octava +12 o bajar una octava -12)",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "Remuestreo posterior al proceso a la tasa de muestreo final, 0 significa no remuestrear",
|
||||
"否": "No",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "Umbral de respuesta",
|
||||
"响度因子": "factor de sonoridad",
|
||||
"处理数据": "Procesar datos",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleccione el algoritmo de extracción de tono, las voces de entrada se pueden acelerar con pm, harvest tiene buenos graves pero es muy lento, crepe es bueno pero se come las GPUs",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleccione el algoritmo de extracción de tono, use 'pm' para acelerar la entrada de canto, 'harvest' es bueno para los graves pero extremadamente lento, 'crepe' tiene buenos resultados pero consume GPU",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Seleccione el algoritmo de extracción de tono: la canción de entrada se puede acelerar con pm, la voz de alta calidad pero CPU pobre se puede acelerar con dio, harvest es mejor pero más lento, rmvpe es el mejor y se come ligeramente la CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "Longitud de muestreo",
|
||||
"重载设备列表": "Actualizar lista de dispositivos",
|
||||
"音调设置": "Ajuste de tono",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "Poids (w) pour le modèle A :",
|
||||
"A模型路径": "Chemin d'accès au modèle A :",
|
||||
"B模型路径": "Chemin d'accès au modèle B :",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Fichier de courbe F0 (facultatif). Une hauteur par ligne. Remplace la fréquence fondamentale par défaut et la modulation de la hauteur :",
|
||||
"Index Rate": "Taux d'indexation",
|
||||
"Onnx导出": "Exporter en ONNX",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Traitement en lot pour la séparation de la voix et de l'accompagnement vocal à l'aide du modèle UVR5.<br>Exemple d'un format de chemin de dossier valide : D:\\chemin\\vers\\dossier\\d'entrée (copiez-le depuis la barre d'adresse du gestionnaire de fichiers).<br>Le modèle est divisé en trois catégories :<br>1. Préserver la voix : Choisissez cette option pour l'audio sans harmonies. Elle préserve la voix mieux que HP5. Il comprend deux modèles intégrés : HP2 et HP3. HP3 peut légèrement laisser passer l'accompagnement mais préserve légèrement mieux la voix que HP2.<br>2. Préserver uniquement la voix principale : Choisissez cette option pour l'audio avec harmonies. Cela peut affaiblir la voix principale. Il comprend un modèle intégré : HP5.<br>3. Modèles de suppression de la réverbération et du délai (par FoxJoy) :<br> (1) MDX-Net : Le meilleur choix pour la suppression de la réverbération stéréo, mais ne peut pas supprimer la réverbération mono.<br> (234) DeEcho : Supprime les effets de délai. Le mode Aggressive supprime plus efficacement que le mode Normal. DeReverb supprime également la réverbération et peut supprimer la réverbération mono, mais pas très efficacement pour les contenus à haute fréquence fortement réverbérés.<br>Notes sur la suppression de la réverbération et du délai :<br>1. Le temps de traitement pour le modèle DeEcho-DeReverb est environ deux fois plus long que pour les autres deux modèles DeEcho.<br>2. Le modèle MDX-Net-Dereverb est assez lent.<br>3. La configuration la plus propre recommandée est d'appliquer d'abord MDX-Net, puis DeEcho-Aggressive.",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Entrez le(s) index GPU séparé(s) par '-', par exemple, 0-1-2 pour utiliser les GPU 0, 1 et 2 :",
|
||||
"伴奏人声分离&去混响&去回声": "Séparation des voix/accompagnement et suppression de la réverbération",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "Nom de sauvegarde :",
|
||||
"保存的文件名, 默认空为和源文件同名": "Nom du fichier de sauvegarde (par défaut : identique au nom du fichier source) :",
|
||||
"保存的模型名不带后缀": "Nom du modèle enregistré (sans extension) :",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "Transposer (entier, nombre de demi-tons, monter d'une octave : 12, descendre d'une octave : -12) :",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "Rééchantillonner l'audio de sortie en post-traitement à la fréquence d'échantillonnage finale. Réglez sur 0 pour ne pas effectuer de rééchantillonnage :",
|
||||
"否": "Non",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "Seuil de réponse",
|
||||
"响度因子": "Facteur de volume sonore",
|
||||
"处理数据": "Traitement des données",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Sélection de l'algorithme d'extraction de la hauteur, les voix d'entrée peuvent être accélérées avec pm, harvest a de bonnes basses mais est très lent, crepe est bon mais consomme beaucoup de ressources GPU.",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Sélectionnez l'algorithme d'extraction de la hauteur de ton (\"pm\" : extraction plus rapide mais parole de moindre qualité ; \"harvest\" : meilleure basse mais extrêmement lente ; \"crepe\" : meilleure qualité mais utilisation intensive du GPU), \"rmvpe\" : meilleure qualité et peu d'utilisation du GPU.",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "Sélection de l'algorithme d'extraction de la hauteur : la chanson d'entrée peut être traitée plus rapidement par pm, avec une voix de haute qualité mais un CPU médiocre, par dio, harvest est meilleur mais plus lent, rmvpe est le meilleur, mais consomme légèrement le CPU/GPU.",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "Longueur de l'échantillon",
|
||||
"重载设备列表": "Recharger la liste des dispositifs",
|
||||
"音调设置": "Réglages de la hauteur",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "Peso (w) per il modello A:",
|
||||
"A模型路径": "Percorso per il modello A:",
|
||||
"B模型路径": "Percorso per il modello B:",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "File curva F0 (opzionale). ",
|
||||
"Index Rate": "Tasso di indice",
|
||||
"Onnx导出": "Esporta Onnx",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Elaborazione batch per la separazione dell'accompagnamento vocale utilizzando il modello UVR5.<br>Esempio di un formato di percorso di cartella valido: D:\\path\\to\\input\\folder (copialo dalla barra degli indirizzi del file manager).<br>Il modello è suddiviso in tre categorie:<br>1. Conserva la voce: scegli questa opzione per l'audio senza armonie. <br>2. Mantieni solo la voce principale: scegli questa opzione per l'audio con armonie. <br>3. Modelli di de-riverbero e de-delay (di FoxJoy):<br> (1) MDX-Net: la scelta migliore per la rimozione del riverbero stereo ma non può rimuovere il riverbero mono;<br><br>Note di de-riverbero/de-delay:<br>1. Il tempo di elaborazione per il modello DeEcho-DeReverb è circa il doppio rispetto agli altri due modelli DeEcho.<br>2. Il modello MDX-Net-Dereverb è piuttosto lento.<br>3. La configurazione più pulita consigliata consiste nell'applicare prima MDX-Net e poi DeEcho-Aggressive.",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Inserisci gli indici GPU separati da '-', ad esempio 0-1-2 per utilizzare GPU 0, 1 e 2:",
|
||||
"伴奏人声分离&去混响&去回声": "Separazione voce/accompagnamento",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "Salva nome:",
|
||||
"保存的文件名, 默认空为和源文件同名": "Salva il nome del file (predefinito: uguale al file di origine):",
|
||||
"保存的模型名不带后缀": "Nome del modello salvato (senza estensione):",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "Trasposizione (numero intero, numero di semitoni, alza di un'ottava: 12, abbassa di un'ottava: -12):",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "Ricampiona l'audio di output in post-elaborazione alla frequenza di campionamento finale. ",
|
||||
"否": "NO",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "Soglia di risposta",
|
||||
"响度因子": "fattore di sonorità",
|
||||
"处理数据": "Processa dati",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Seleziona l'algoritmo di estrazione del tono (\"pm\": estrazione più veloce ma risultato di qualità inferiore; \"harvest\": bassi migliori ma estremamente lenti; \"crepe\": qualità migliore ma utilizzo intensivo della GPU):",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "Lunghezza del campione",
|
||||
"重载设备列表": "Ricaricare l'elenco dei dispositivi",
|
||||
"音调设置": "Impostazioni del tono",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "Aモデルの重み",
|
||||
"A模型路径": "Aモデルのパス",
|
||||
"B模型路径": "Bモデルのパス",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0(最低共振周波数)カーブファイル(オプション、1行に1ピッチ、デフォルトのF0(最低共振周波数)とエレベーションを置き換えます。)",
|
||||
"Index Rate": "Index Rate",
|
||||
"Onnx导出": "Onnxエクスポート",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "UVR5モデルを使用したボーカル伴奏の分離バッチ処理。<br>有効なフォルダーパスフォーマットの例: D:\\path\\to\\input\\folder (エクスプローラーのアドレスバーからコピーします)。<br>モデルは三つのカテゴリに分かれています:<br>1. ボーカルを保持: ハーモニーのないオーディオに対してこれを選択します。HP5よりもボーカルをより良く保持します。HP2とHP3の二つの内蔵モデルが含まれています。HP3は伴奏をわずかに漏らす可能性がありますが、HP2よりもわずかにボーカルをより良く保持します。<br>2. 主なボーカルのみを保持: ハーモニーのあるオーディオに対してこれを選択します。主なボーカルを弱める可能性があります。HP5の一つの内蔵モデルが含まれています。<br>3. ディリバーブとディレイモデル (by FoxJoy):<br> (1) MDX-Net: ステレオリバーブの除去に最適な選択肢ですが、モノリバーブは除去できません;<br> (234) DeEcho: ディレイ効果を除去します。AggressiveモードはNormalモードよりも徹底的に除去します。DeReverbはさらにリバーブを除去し、モノリバーブを除去することができますが、高周波のリバーブが強い内容に対しては非常に効果的ではありません。<br>ディリバーブ/ディレイに関する注意点:<br>1. DeEcho-DeReverbモデルの処理時間は、他の二つのDeEchoモデルの約二倍です。<br>2. MDX-Net-Dereverbモデルは非常に遅いです。<br>3. 推奨される最もクリーンな設定は、最初にMDX-Netを適用し、その後にDeEcho-Aggressiveを適用することです。",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "ハイフンで区切って使用するGPUの番号を入力します。例えば0-1-2はGPU0、GPU1、GPU2を使用します",
|
||||
"伴奏人声分离&去混响&去回声": "伴奏ボーカル分離&残響除去&エコー除去",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "保存ファイル名",
|
||||
"保存的文件名, 默认空为和源文件同名": "保存するファイル名、デフォルトでは空欄で元のファイル名と同じ名前になります",
|
||||
"保存的模型名不带后缀": "拡張子のない保存するモデル名",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "ピッチ変更(整数、半音数、上下オクターブ12-12)",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "最終的なサンプリングレートへのポストプロセッシングのリサンプリング リサンプリングしない場合は0",
|
||||
"否": "いいえ",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "反応閾値",
|
||||
"响度因子": "ラウドネス係数",
|
||||
"处理数据": "データ処理",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "ピッチ抽出アルゴリズムの選択、歌声はpmで高速化でき、harvestは低音が良いが信じられないほど遅く、crepeは良く動くがGPUを食います。",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "ピッチ抽出アルゴリズムの選択、歌声はpmで高速化でき、harvestは低音が良いが信じられないほど遅く、crepeは良く動くがGPUを喰います",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "ピッチ抽出アルゴリズムの選択:歌声はpmで高速化でき、入力した音声が高音質でCPUが貧弱な場合はdioで高速化でき、harvestの方が良いが遅く、rmvpeがベストだがCPU/GPUを若干食います。",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "サンプル長",
|
||||
"重载设备列表": "デバイスリストをリロードする",
|
||||
"音调设置": "音程設定",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "Весы (w) модели А:",
|
||||
"A模型路径": "Путь к модели А:",
|
||||
"B模型路径": "Путь к модели Б:",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Файл дуги F0 (не обязательно). Одна тональность на каждую строчку. Заменяет обычный F0 и модуляцию тональности:",
|
||||
"Index Rate": "Темп индекса",
|
||||
"Onnx导出": "Экспорт ONNX",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Пакетная обработка для разделения вокального сопровождения с использованием модели UVR5.<br>Пример допустимого формата пути к папке: D:\\path\\to\\input\\folder<br> Модель разделена на три категории:<br>1. Сохранить вокал: выберите этот вариант для звука без гармоний. Он сохраняет вокал лучше, чем HP5. Он включает в себя две встроенные модели: HP2 и HP3. HP3 может немного пропускать инструментал, но сохраняет вокал немного лучше, чем HP2.<br>2. Сохранить только основной вокал: выберите этот вариант для звука с гармониями. Это может ослабить основной вокал. Он включает одну встроенную модель: HP5.<br>3. Модели удаления реверберации и задержки (от FoxJoy):<br> (1) MDX-Net: лучший выбор для удаления стереореверберации, но он не может удалить монореверберацию;<br> (234) DeEcho: удаляет эффекты задержки. Агрессивный режим удаляет более тщательно, чем Нормальный режим. DeReverb дополнительно удаляет реверберацию и может удалять монореверберацию, но не очень эффективно для сильно реверберированного высокочастотного контента.<br>Примечания по удалению реверберации/задержки:<br>1. Время обработки для модели DeEcho-DeReverb примерно в два раза больше, чем для двух других моделей DeEcho.<br>2. Модель MDX-Net-Dereverb довольно медленная.<br>3. Рекомендуемая самая чистая конфигурация — сначала применить MDX-Net, а затем DeEcho-Aggressive.",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Введите, какие(-ую) GPU(-у) хотите использовать через '-', например 0-1-2, чтобы использовать GPU с номерами 0, 1 и 2:",
|
||||
"伴奏人声分离&去混响&去回声": "Разделение вокала/аккомпанемента и удаление эхо",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "Имя файла для сохранения:",
|
||||
"保存的文件名, 默认空为和源文件同名": "Название сохранённого файла (по умолчанию: такое же, как и у входного):",
|
||||
"保存的模型名不带后缀": "Имя файла модели для сохранения (без расширения):",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "Изменить высоту голоса (укажите количество полутонов; чтобы поднять голос на октаву, выберите 12, понизить на октаву — -12):",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "Изменить частоту дискретизации в выходном файле на финальную. Поставьте 0, чтобы ничего не изменялось:",
|
||||
"否": "Нет",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "Порог ответа",
|
||||
"响度因子": "коэффициент громкости",
|
||||
"处理数据": "Обработать данные",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Выберите алгоритм оценки высоты голоса ('pm': работает быстро, но даёт низкое качество речи; 'harvest': басы лучше, но работает очень медленно; 'crepe': лучшее качество, но сильно нагружает GPU; 'rmvpe': лучшее качество и минимальная нагрузка на GPU):",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "Длина сэмпла",
|
||||
"重载设备列表": "Обновить список устройств",
|
||||
"音调设置": "Настройка высоты звука",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "A Modeli Ağırlığı:",
|
||||
"A模型路径": "A Modeli Yolu:",
|
||||
"B模型路径": "B Modeli Yolu:",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 eğrisi dosyası (isteğe bağlı). Her satırda bir pitch değeri bulunur. Varsayılan F0 ve pitch modülasyonunu değiştirir:",
|
||||
"Index Rate": "Index Oranı",
|
||||
"Onnx导出": "Onnx Dışa Aktar",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch işleme kullanarak vokal eşlik ayrımı için UVR5 modeli kullanılır.<br>Geçerli bir klasör yol formatı örneği: D:\\path\\to\\input\\folder (dosya yöneticisi adres çubuğundan kopyalanır).<br>Model üç kategoriye ayrılır:<br>1. Vokalleri koru: Bu seçeneği, harmoni içermeyen sesler için kullanın. HP5'ten daha iyi bir şekilde vokalleri korur. İki dahili model içerir: HP2 ve HP3. HP3, eşlik sesini hafifçe sızdırabilir, ancak vokalleri HP2'den biraz daha iyi korur.<br>2. Sadece ana vokalleri koru: Bu seçeneği, harmoni içeren sesler için kullanın. Ana vokalleri zayıflatabilir. Bir dahili model içerir: HP5.<br>3. Reverb ve gecikme modelleri (FoxJoy tarafından):<br> (1) MDX-Net: Stereo reverb'i kaldırmak için en iyi seçenek, ancak mono reverb'i kaldıramaz;<br> (234) DeEcho: Gecikme efektlerini kaldırır. Agresif mod, Normal moda göre daha kapsamlı bir şekilde kaldırma yapar. DeReverb ayrıca reverb'i kaldırır ve mono reverb'i kaldırabilir, ancak yoğun yankılı yüksek frekanslı içerikler için çok etkili değildir.<br>Reverb/gecikme notları:<br>1. DeEcho-DeReverb modelinin işleme süresi diğer iki DeEcho modeline göre yaklaşık olarak iki kat daha uzundur.<br>2. MDX-Net-Dereverb modeli oldukça yavaştır.<br>3. Tavsiye edilen en temiz yapılandırma önce MDX-Net'i uygulamak ve ardından DeEcho-Aggressive uygulamaktır.",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "GPU indekslerini '-' ile ayırarak girin, örneğin 0-1-2, GPU 0, 1 ve 2'yi kullanmak için:",
|
||||
"伴奏人声分离&去混响&去回声": "Vokal/Müzik Ayrıştırma ve Yankı Giderme",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "Kaydetme Adı:",
|
||||
"保存的文件名, 默认空为和源文件同名": "Kaydedilecek dosya adı (varsayılan: kaynak dosya ile aynı):",
|
||||
"保存的模型名不带后缀": "Kaydedilecek model adı (uzantı olmadan):",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "Transpoze et (tamsayı, yarıton sayısıyla; bir oktav yükseltmek için: 12, bir oktav düşürmek için: -12):",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "Son işleme aşamasında çıktı sesini son örnekleme hızına yeniden örnekle. 0 değeri için yeniden örnekleme yapılmaz:",
|
||||
"否": "Hayır",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "Tepki eşiği",
|
||||
"响度因子": "ses yüksekliği faktörü",
|
||||
"处理数据": "Verileri işle",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "Pitch algoritmasını seçin ('pm': daha hızlı çıkarır ancak daha düşük kaliteli konuşma; 'harvest': daha iyi konuşma sesi ancak son derece yavaş; 'crepe': daha da iyi kalite ancak GPU yoğunluğu gerektirir):",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "Örnekleme uzunluğu",
|
||||
"重载设备列表": "Cihaz listesini yeniden yükle",
|
||||
"音调设置": "Pitch ayarları",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "A模型权重",
|
||||
"A模型路径": "A模型路径",
|
||||
"B模型路径": "B模型路径",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调",
|
||||
"Index Rate": "Index Rate",
|
||||
"Onnx导出": "Onnx导出",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2",
|
||||
"伴奏人声分离&去混响&去回声": "伴奏人声分离&去混响&去回声",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "保存名",
|
||||
"保存的文件名, 默认空为和源文件同名": "保存的文件名, 默认空为和源文件同名",
|
||||
"保存的模型名不带后缀": "保存的模型名不带后缀",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "变调(整数, 半音数量, 升八度12降八度-12)",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "后处理重采样至最终采样率,0为不进行重采样",
|
||||
"否": "否",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "响应阈值",
|
||||
"响度因子": "响度因子",
|
||||
"处理数据": "处理数据",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "采样长度",
|
||||
"重载设备列表": "重载设备列表",
|
||||
"音调设置": "音调设置",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "A模型權重",
|
||||
"A模型路径": "A模型路徑",
|
||||
"B模型路径": "B模型路徑",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調",
|
||||
"Index Rate": "Index Rate",
|
||||
"Onnx导出": "Onnx导出",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。<br>有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。<br>模型分為三類:<br>1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;<br>2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。<br>3. 消除混響和延遲模型(由FoxJoy提供):<br> (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;<br> (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。<br>消除混響/延遲注意事項:<br>1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;<br>2. MDX-Net-Dereverb模型相當慢;<br>3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2",
|
||||
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "儲存名",
|
||||
"保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名",
|
||||
"保存的模型名不带后缀": "儲存的模型名不帶副檔名",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣",
|
||||
"否": "否",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "響應閾值",
|
||||
"响度因子": "響度因子",
|
||||
"处理数据": "處理資料",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "取樣長度",
|
||||
"重载设备列表": "重載設備列表",
|
||||
"音调设置": "音調設定",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "A模型權重",
|
||||
"A模型路径": "A模型路徑",
|
||||
"B模型路径": "B模型路徑",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調",
|
||||
"Index Rate": "Index Rate",
|
||||
"Onnx导出": "Onnx导出",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。<br>有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。<br>模型分為三類:<br>1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;<br>2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。<br>3. 消除混響和延遲模型(由FoxJoy提供):<br> (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;<br> (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。<br>消除混響/延遲注意事項:<br>1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;<br>2. MDX-Net-Dereverb模型相當慢;<br>3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2",
|
||||
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "儲存名",
|
||||
"保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名",
|
||||
"保存的模型名不带后缀": "儲存的模型名不帶副檔名",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣",
|
||||
"否": "否",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "響應閾值",
|
||||
"响度因子": "響度因子",
|
||||
"处理数据": "處理資料",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "取樣長度",
|
||||
"重载设备列表": "重載設備列表",
|
||||
"音调设置": "音調設定",
|
||||
|
@ -3,6 +3,7 @@
|
||||
"A模型权重": "A模型權重",
|
||||
"A模型路径": "A模型路徑",
|
||||
"B模型路径": "B模型路徑",
|
||||
"E:\\语音音频+标注\\米津玄师\\src": "E:\\语音音频+标注\\米津玄师\\src",
|
||||
"F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調",
|
||||
"Index Rate": "Index Rate",
|
||||
"Onnx导出": "Onnx导出",
|
||||
@ -25,6 +26,8 @@
|
||||
"人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br> (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br> (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。<br>有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。<br>模型分為三類:<br>1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;<br>2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。<br>3. 消除混響和延遲模型(由FoxJoy提供):<br> (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;<br> (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。<br>消除混響/延遲注意事項:<br>1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;<br>2. MDX-Net-Dereverb模型相當慢;<br>3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。",
|
||||
"以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2",
|
||||
"伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲",
|
||||
"使用模型采样率": "使用模型采样率",
|
||||
"使用设备采样率": "使用设备采样率",
|
||||
"保存名": "儲存名",
|
||||
"保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名",
|
||||
"保存的模型名不带后缀": "儲存的模型名不帶副檔名",
|
||||
@ -43,6 +46,7 @@
|
||||
"变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)",
|
||||
"后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣",
|
||||
"否": "否",
|
||||
"启用相位声码器": "启用相位声码器",
|
||||
"响应阈值": "響應閾值",
|
||||
"响度因子": "響度因子",
|
||||
"处理数据": "處理資料",
|
||||
@ -121,6 +125,7 @@
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU",
|
||||
"选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU",
|
||||
"选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢,rmvpe效果最好且微吃CPU/GPU",
|
||||
"采样率:": "采样率:",
|
||||
"采样长度": "取樣長度",
|
||||
"重载设备列表": "重載設備列表",
|
||||
"音调设置": "音調設定",
|
||||
|
@ -1142,7 +1142,7 @@ with gr.Blocks(title="RVC WebUI") as app:
|
||||
)
|
||||
with gr.Row():
|
||||
trainset_dir4 = gr.Textbox(
|
||||
label=i18n("输入训练文件夹路径"), value="E:\\语音音频+标注\\米津玄师\\src"
|
||||
label=i18n("输入训练文件夹路径"), value=i18n("E:\\语音音频+标注\\米津玄师\\src")
|
||||
)
|
||||
spk_id5 = gr.Slider(
|
||||
minimum=0,
|
||||
|
@ -722,7 +722,8 @@ class SynthesizerTrnMs256NSFsid(nn.Module):
|
||||
def remove_weight_norm(self):
|
||||
self.dec.remove_weight_norm()
|
||||
self.flow.remove_weight_norm()
|
||||
self.enc_q.remove_weight_norm()
|
||||
if hasattr(self, "enc_q"):
|
||||
self.enc_q.remove_weight_norm()
|
||||
|
||||
def __prepare_scriptable__(self):
|
||||
for hook in self.dec._forward_pre_hooks.values():
|
||||
@ -783,17 +784,20 @@ class SynthesizerTrnMs256NSFsid(nn.Module):
|
||||
pitch: torch.Tensor,
|
||||
nsff0: torch.Tensor,
|
||||
sid: torch.Tensor,
|
||||
rate: Optional[torch.Tensor] = None,
|
||||
skip_head: Optional[torch.Tensor] = None,
|
||||
return_length: Optional[torch.Tensor] = None,
|
||||
):
|
||||
g = self.emb_g(sid).unsqueeze(-1)
|
||||
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||
if rate is not None:
|
||||
assert isinstance(rate, torch.Tensor)
|
||||
head = int(z_p.shape[2] * (1 - rate.item()))
|
||||
z_p = z_p[:, :, head:]
|
||||
x_mask = x_mask[:, :, head:]
|
||||
nsff0 = nsff0[:, head:]
|
||||
if skip_head is not None and return_length is not None:
|
||||
assert isinstance(skip_head, torch.Tensor)
|
||||
assert isinstance(return_length, torch.Tensor)
|
||||
head = int(skip_head.item())
|
||||
length = int(return_length.item())
|
||||
z_p = z_p[:, :, head : head + length]
|
||||
x_mask = x_mask[:, :, head : head + length]
|
||||
nsff0 = nsff0[:, head : head + length]
|
||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||
o = self.dec(z * x_mask, nsff0, g=g)
|
||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||
@ -887,7 +891,8 @@ class SynthesizerTrnMs768NSFsid(nn.Module):
|
||||
def remove_weight_norm(self):
|
||||
self.dec.remove_weight_norm()
|
||||
self.flow.remove_weight_norm()
|
||||
self.enc_q.remove_weight_norm()
|
||||
if hasattr(self, "enc_q"):
|
||||
self.enc_q.remove_weight_norm()
|
||||
|
||||
def __prepare_scriptable__(self):
|
||||
for hook in self.dec._forward_pre_hooks.values():
|
||||
@ -941,16 +946,20 @@ class SynthesizerTrnMs768NSFsid(nn.Module):
|
||||
pitch: torch.Tensor,
|
||||
nsff0: torch.Tensor,
|
||||
sid: torch.Tensor,
|
||||
rate: Optional[torch.Tensor] = None,
|
||||
skip_head: Optional[torch.Tensor] = None,
|
||||
return_length: Optional[torch.Tensor] = None,
|
||||
):
|
||||
g = self.emb_g(sid).unsqueeze(-1)
|
||||
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||
if rate is not None:
|
||||
head = int(z_p.shape[2] * (1.0 - rate.item()))
|
||||
z_p = z_p[:, :, head:]
|
||||
x_mask = x_mask[:, :, head:]
|
||||
nsff0 = nsff0[:, head:]
|
||||
if skip_head is not None and return_length is not None:
|
||||
assert isinstance(skip_head, torch.Tensor)
|
||||
assert isinstance(return_length, torch.Tensor)
|
||||
head = int(skip_head.item())
|
||||
length = int(return_length.item())
|
||||
z_p = z_p[:, :, head : head + length]
|
||||
x_mask = x_mask[:, :, head : head + length]
|
||||
nsff0 = nsff0[:, head : head + length]
|
||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||
o = self.dec(z * x_mask, nsff0, g=g)
|
||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||
@ -1041,7 +1050,8 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
|
||||
def remove_weight_norm(self):
|
||||
self.dec.remove_weight_norm()
|
||||
self.flow.remove_weight_norm()
|
||||
self.enc_q.remove_weight_norm()
|
||||
if hasattr(self, "enc_q"):
|
||||
self.enc_q.remove_weight_norm()
|
||||
|
||||
def __prepare_scriptable__(self):
|
||||
for hook in self.dec._forward_pre_hooks.values():
|
||||
@ -1087,15 +1097,19 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
|
||||
phone: torch.Tensor,
|
||||
phone_lengths: torch.Tensor,
|
||||
sid: torch.Tensor,
|
||||
rate: Optional[torch.Tensor] = None,
|
||||
skip_head: Optional[torch.Tensor] = None,
|
||||
return_length: Optional[torch.Tensor] = None,
|
||||
):
|
||||
g = self.emb_g(sid).unsqueeze(-1)
|
||||
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||
if rate is not None:
|
||||
head = int(z_p.shape[2] * (1.0 - rate.item()))
|
||||
z_p = z_p[:, :, head:]
|
||||
x_mask = x_mask[:, :, head:]
|
||||
if skip_head is not None and return_length is not None:
|
||||
assert isinstance(skip_head, torch.Tensor)
|
||||
assert isinstance(return_length, torch.Tensor)
|
||||
head = int(skip_head.item())
|
||||
length = int(return_length.item())
|
||||
z_p = z_p[:, :, head : head + length]
|
||||
x_mask = x_mask[:, :, head : head + length]
|
||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||
o = self.dec(z * x_mask, g=g)
|
||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||
@ -1186,7 +1200,8 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
|
||||
def remove_weight_norm(self):
|
||||
self.dec.remove_weight_norm()
|
||||
self.flow.remove_weight_norm()
|
||||
self.enc_q.remove_weight_norm()
|
||||
if hasattr(self, "enc_q"):
|
||||
self.enc_q.remove_weight_norm()
|
||||
|
||||
def __prepare_scriptable__(self):
|
||||
for hook in self.dec._forward_pre_hooks.values():
|
||||
@ -1232,15 +1247,19 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
|
||||
phone: torch.Tensor,
|
||||
phone_lengths: torch.Tensor,
|
||||
sid: torch.Tensor,
|
||||
rate: Optional[torch.Tensor] = None,
|
||||
skip_head: Optional[torch.Tensor] = None,
|
||||
return_length: Optional[torch.Tensor] = None,
|
||||
):
|
||||
g = self.emb_g(sid).unsqueeze(-1)
|
||||
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
||||
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
||||
if rate is not None:
|
||||
head = int(z_p.shape[2] * (1.0 - rate.item()))
|
||||
z_p = z_p[:, :, head:]
|
||||
x_mask = x_mask[:, :, head:]
|
||||
if skip_head is not None and return_length is not None:
|
||||
assert isinstance(skip_head, torch.Tensor)
|
||||
assert isinstance(return_length, torch.Tensor)
|
||||
head = int(skip_head.item())
|
||||
length = int(return_length.item())
|
||||
z_p = z_p[:, :, head : head + length]
|
||||
x_mask = x_mask[:, :, head : head + length]
|
||||
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
||||
o = self.dec(z * x_mask, g=g)
|
||||
return o, x_mask, (z, z_p, m_p, logs_p)
|
||||
|
@ -149,7 +149,7 @@ class ResidualCouplingBlock(nn.Module):
|
||||
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
||||
else:
|
||||
for flow in reversed(self.flows):
|
||||
x = flow(x, x_mask, g=g, reverse=reverse)
|
||||
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
||||
return x
|
||||
|
||||
def remove_weight_norm(self):
|
||||
|
@ -65,7 +65,7 @@ class HarvestF0Predictor(F0Predictor):
|
||||
p_len = wav.shape[0] // self.hop_length
|
||||
f0, t = pyworld.harvest(
|
||||
wav.astype(np.double),
|
||||
fs=self.hop_length,
|
||||
fs=self.sampling_rate,
|
||||
f0_ceil=self.f0_max,
|
||||
f0_floor=self.f0_min,
|
||||
frame_period=1000 * self.hop_length / self.sampling_rate,
|
||||
|
@ -34,4 +34,5 @@ def get_synthesizer(pth_path, device=torch.device("cpu")):
|
||||
net_g.load_state_dict(cpt["weight"], strict=False)
|
||||
net_g = net_g.float()
|
||||
net_g.eval().to(device)
|
||||
net_g.remove_weight_norm()
|
||||
return net_g, cpt
|
||||
|
@ -593,16 +593,18 @@ class RMVPE:
|
||||
|
||||
def infer_from_audio(self, audio, thred=0.03):
|
||||
# torch.cuda.synchronize()
|
||||
t0 = ttime()
|
||||
# t0 = ttime()
|
||||
if not torch.is_tensor(audio):
|
||||
audio = torch.from_numpy(audio)
|
||||
mel = self.mel_extractor(
|
||||
torch.from_numpy(audio).float().to(self.device).unsqueeze(0), center=True
|
||||
audio.float().to(self.device).unsqueeze(0), center=True
|
||||
)
|
||||
# print(123123123,mel.device.type)
|
||||
# torch.cuda.synchronize()
|
||||
t1 = ttime()
|
||||
# t1 = ttime()
|
||||
hidden = self.mel2hidden(mel)
|
||||
# torch.cuda.synchronize()
|
||||
t2 = ttime()
|
||||
# t2 = ttime()
|
||||
# print(234234,hidden.device.type)
|
||||
if "privateuseone" not in str(self.device):
|
||||
hidden = hidden.squeeze(0).cpu().numpy()
|
||||
@ -613,7 +615,7 @@ class RMVPE:
|
||||
|
||||
f0 = self.decode(hidden, thred=thred)
|
||||
# torch.cuda.synchronize()
|
||||
t3 = ttime()
|
||||
# t3 = ttime()
|
||||
# print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
|
||||
return f0
|
||||
|
||||
|
@ -52,11 +52,6 @@ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False)
|
||||
Returns:
|
||||
:: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram
|
||||
"""
|
||||
# Validation
|
||||
if torch.min(y) < -1.07:
|
||||
logger.debug("min value is %s", str(torch.min(y)))
|
||||
if torch.max(y) > 1.07:
|
||||
logger.debug("max value is %s", str(torch.max(y)))
|
||||
|
||||
# Window - Cache if needed
|
||||
global hann_window
|
||||
@ -86,11 +81,11 @@ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False)
|
||||
pad_mode="reflect",
|
||||
normalized=False,
|
||||
onesided=True,
|
||||
return_complex=False,
|
||||
return_complex=True,
|
||||
)
|
||||
|
||||
# Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame)
|
||||
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
||||
spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + 1e-6)
|
||||
return spec
|
||||
|
||||
|
||||
|
@ -312,10 +312,10 @@ def get_hparams(init=True):
|
||||
"-te", "--total_epoch", type=int, required=True, help="total_epoch"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path"
|
||||
"-pg", "--pretrainG", type=str, default="", help="Pretrained Generator path"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path"
|
||||
"-pd", "--pretrainD", type=str, default="", help="Pretrained Discriminator path"
|
||||
)
|
||||
parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -")
|
||||
parser.add_argument(
|
||||
|
@ -307,14 +307,14 @@ class AudioPreDeEcho:
|
||||
sf.write(
|
||||
os.path.join(
|
||||
ins_root,
|
||||
"instrument_{}_{}.{}".format(name, self.data["agg"], format),
|
||||
"vocal_{}_{}.{}".format(name, self.data["agg"], format),
|
||||
),
|
||||
(np.array(wav_instrument) * 32768).astype("int16"),
|
||||
self.mp.param["sr"],
|
||||
) #
|
||||
else:
|
||||
path = os.path.join(
|
||||
ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
|
||||
ins_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
|
||||
)
|
||||
sf.write(
|
||||
path,
|
||||
@ -344,14 +344,14 @@ class AudioPreDeEcho:
|
||||
sf.write(
|
||||
os.path.join(
|
||||
vocal_root,
|
||||
"vocal_{}_{}.{}".format(name, self.data["agg"], format),
|
||||
"instrument_{}_{}.{}".format(name, self.data["agg"], format),
|
||||
),
|
||||
(np.array(wav_vocals) * 32768).astype("int16"),
|
||||
self.mp.param["sr"],
|
||||
)
|
||||
else:
|
||||
path = os.path.join(
|
||||
vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
|
||||
vocal_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
|
||||
)
|
||||
sf.write(
|
||||
path,
|
||||
|
@ -46,3 +46,4 @@ fastapi==0.88
|
||||
ffmpy==0.3.1
|
||||
python-dotenv>=1.0.0
|
||||
av
|
||||
torchfcpe
|
||||
|
@ -44,3 +44,4 @@ fastapi==0.88
|
||||
ffmpy==0.3.1
|
||||
python-dotenv>=1.0.0
|
||||
av
|
||||
torchfcpe
|
@ -51,4 +51,5 @@ ffmpy==0.3.1
|
||||
python-dotenv>=1.0.0
|
||||
av
|
||||
PySimpleGUI
|
||||
sounddevice
|
||||
sounddevice
|
||||
torchfcpe
|
@ -26,4 +26,5 @@ PySimpleGUI
|
||||
sounddevice
|
||||
gradio
|
||||
noisereduce
|
||||
onnxruntime-directml
|
||||
onnxruntime-directml
|
||||
torchfcpe
|
@ -26,3 +26,4 @@ PySimpleGUI
|
||||
sounddevice
|
||||
gradio
|
||||
noisereduce
|
||||
torchfcpe
|
||||
|
@ -42,6 +42,7 @@ onnxruntime; sys_platform == 'darwin'
|
||||
onnxruntime-gpu; sys_platform != 'darwin'
|
||||
torchcrepe==0.0.20
|
||||
fastapi==0.88
|
||||
torchfcpe
|
||||
ffmpy==0.3.1
|
||||
python-dotenv>=1.0.0
|
||||
av
|
||||
|
20
run.sh
20
run.sh
@ -1,27 +1,27 @@
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
|
||||
if [[ "$(uname)" == "Darwin" ]]; then
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
# macOS specific env:
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
export PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0
|
||||
elif [[ "$(uname)" != "Linux" ]]; then
|
||||
elif [ "$(uname)" != "Linux" ]; then
|
||||
echo "Unsupported operating system."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -d ".venv" ]; then
|
||||
echo "Activate venv..."
|
||||
source .venv/bin/activate
|
||||
. .venv/bin/activate
|
||||
else
|
||||
echo "Create venv..."
|
||||
requirements_file="requirements.txt"
|
||||
|
||||
# Check if Python 3.8 is installed
|
||||
if ! command -v python3 &> /dev/null; then
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
echo "Python 3 not found. Attempting to install 3.8..."
|
||||
if [[ "$(uname)" == "Darwin" ]] && command -v brew &> /dev/null; then
|
||||
if [ "$(uname)" = "Darwin" ] && command -v brew >/dev/null 2>&1; then
|
||||
brew install python@3.8
|
||||
elif [[ "$(uname)" == "Linux" ]] && command -v apt-get &> /dev/null; then
|
||||
elif [ "$(uname)" = "Linux" ] && command -v apt-get >/dev/null 2>&1; then
|
||||
sudo apt-get update
|
||||
sudo apt-get install python3.8
|
||||
else
|
||||
@ -31,13 +31,13 @@ else
|
||||
fi
|
||||
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
. .venv/bin/activate
|
||||
|
||||
# Check if required packages are installed and install them if not
|
||||
if [ -f "${requirements_file}" ]; then
|
||||
installed_packages=$(python3 -m pip freeze)
|
||||
while IFS= read -r package; do
|
||||
[[ "${package}" =~ ^#.* ]] && continue
|
||||
expr "${package}" : "^#.*" > /dev/null && continue
|
||||
package_name=$(echo "${package}" | sed 's/[<>=!].*//')
|
||||
if ! echo "${installed_packages}" | grep -q "${package_name}"; then
|
||||
echo "${package_name} not found. Attempting to install..."
|
||||
@ -53,7 +53,7 @@ fi
|
||||
# Download models
|
||||
./tools/dlmodels.sh
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -1,421 +1,441 @@
|
||||
from io import BytesIO
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
import traceback
|
||||
from infer.lib import jit
|
||||
from infer.lib.jit.get_synthesizer import get_synthesizer
|
||||
from time import time as ttime
|
||||
import fairseq
|
||||
import faiss
|
||||
import numpy as np
|
||||
import parselmouth
|
||||
import pyworld
|
||||
import scipy.signal as signal
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torchcrepe
|
||||
|
||||
from infer.lib.infer_pack.models import (
|
||||
SynthesizerTrnMs256NSFsid,
|
||||
SynthesizerTrnMs256NSFsid_nono,
|
||||
SynthesizerTrnMs768NSFsid,
|
||||
SynthesizerTrnMs768NSFsid_nono,
|
||||
)
|
||||
|
||||
now_dir = os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
from multiprocessing import Manager as M
|
||||
|
||||
from configs.config import Config
|
||||
|
||||
# config = Config()
|
||||
|
||||
mm = M()
|
||||
|
||||
|
||||
def printt(strr, *args):
|
||||
if len(args) == 0:
|
||||
print(strr)
|
||||
else:
|
||||
print(strr % args)
|
||||
|
||||
|
||||
# config.device=torch.device("cpu")########强制cpu测试
|
||||
# config.is_half=False########强制cpu测试
|
||||
class RVC:
|
||||
def __init__(
|
||||
self,
|
||||
key,
|
||||
pth_path,
|
||||
index_path,
|
||||
index_rate,
|
||||
n_cpu,
|
||||
inp_q,
|
||||
opt_q,
|
||||
config: Config,
|
||||
last_rvc=None,
|
||||
) -> None:
|
||||
"""
|
||||
初始化
|
||||
"""
|
||||
try:
|
||||
if config.dml == True:
|
||||
|
||||
def forward_dml(ctx, x, scale):
|
||||
ctx.scale = scale
|
||||
res = x.clone().detach()
|
||||
return res
|
||||
|
||||
fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
|
||||
# global config
|
||||
self.config = config
|
||||
self.inp_q = inp_q
|
||||
self.opt_q = opt_q
|
||||
# device="cpu"########强制cpu测试
|
||||
self.device = config.device
|
||||
self.f0_up_key = key
|
||||
self.time_step = 160 / 16000 * 1000
|
||||
self.f0_min = 50
|
||||
self.f0_max = 1100
|
||||
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
||||
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
||||
self.sr = 16000
|
||||
self.window = 160
|
||||
self.n_cpu = n_cpu
|
||||
self.use_jit = self.config.use_jit
|
||||
self.is_half = config.is_half
|
||||
|
||||
if index_rate != 0:
|
||||
self.index = faiss.read_index(index_path)
|
||||
self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
|
||||
printt("Index search enabled")
|
||||
self.pth_path: str = pth_path
|
||||
self.index_path = index_path
|
||||
self.index_rate = index_rate
|
||||
|
||||
if last_rvc is None:
|
||||
models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
|
||||
["assets/hubert/hubert_base.pt"],
|
||||
suffix="",
|
||||
)
|
||||
hubert_model = models[0]
|
||||
hubert_model = hubert_model.to(self.device)
|
||||
if self.is_half:
|
||||
hubert_model = hubert_model.half()
|
||||
else:
|
||||
hubert_model = hubert_model.float()
|
||||
hubert_model.eval()
|
||||
self.model = hubert_model
|
||||
else:
|
||||
self.model = last_rvc.model
|
||||
|
||||
self.net_g: nn.Module = None
|
||||
|
||||
def set_default_model():
|
||||
self.net_g, cpt = get_synthesizer(self.pth_path, self.device)
|
||||
self.tgt_sr = cpt["config"][-1]
|
||||
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
|
||||
self.if_f0 = cpt.get("f0", 1)
|
||||
self.version = cpt.get("version", "v1")
|
||||
if self.is_half:
|
||||
self.net_g = self.net_g.half()
|
||||
else:
|
||||
self.net_g = self.net_g.float()
|
||||
|
||||
def set_jit_model():
|
||||
jit_pth_path = self.pth_path.rstrip(".pth")
|
||||
jit_pth_path += ".half.jit" if self.is_half else ".jit"
|
||||
reload = False
|
||||
if str(self.device) == "cuda":
|
||||
self.device = torch.device("cuda:0")
|
||||
if os.path.exists(jit_pth_path):
|
||||
cpt = jit.load(jit_pth_path)
|
||||
model_device = cpt["device"]
|
||||
if model_device != str(self.device):
|
||||
reload = True
|
||||
else:
|
||||
reload = True
|
||||
|
||||
if reload:
|
||||
cpt = jit.synthesizer_jit_export(
|
||||
self.pth_path,
|
||||
"script",
|
||||
None,
|
||||
device=self.device,
|
||||
is_half=self.is_half,
|
||||
)
|
||||
|
||||
self.tgt_sr = cpt["config"][-1]
|
||||
self.if_f0 = cpt.get("f0", 1)
|
||||
self.version = cpt.get("version", "v1")
|
||||
self.net_g = torch.jit.load(
|
||||
BytesIO(cpt["model"]), map_location=self.device
|
||||
)
|
||||
self.net_g.infer = self.net_g.forward
|
||||
self.net_g.eval().to(self.device)
|
||||
|
||||
def set_synthesizer():
|
||||
if self.use_jit and not config.dml:
|
||||
if self.is_half and "cpu" in str(self.device):
|
||||
printt(
|
||||
"Use default Synthesizer model. \
|
||||
Jit is not supported on the CPU for half floating point"
|
||||
)
|
||||
set_default_model()
|
||||
else:
|
||||
set_jit_model()
|
||||
else:
|
||||
set_default_model()
|
||||
|
||||
if last_rvc is None or last_rvc.pth_path != self.pth_path:
|
||||
set_synthesizer()
|
||||
else:
|
||||
self.tgt_sr = last_rvc.tgt_sr
|
||||
self.if_f0 = last_rvc.if_f0
|
||||
self.version = last_rvc.version
|
||||
self.is_half = last_rvc.is_half
|
||||
if last_rvc.use_jit != self.use_jit:
|
||||
set_synthesizer()
|
||||
else:
|
||||
self.net_g = last_rvc.net_g
|
||||
|
||||
if last_rvc is not None and hasattr(last_rvc, "model_rmvpe"):
|
||||
self.model_rmvpe = last_rvc.model_rmvpe
|
||||
except:
|
||||
printt(traceback.format_exc())
|
||||
|
||||
def change_key(self, new_key):
|
||||
self.f0_up_key = new_key
|
||||
|
||||
def change_index_rate(self, new_index_rate):
|
||||
if new_index_rate != 0 and self.index_rate == 0:
|
||||
self.index = faiss.read_index(self.index_path)
|
||||
self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
|
||||
printt("Index search enabled")
|
||||
self.index_rate = new_index_rate
|
||||
|
||||
def get_f0_post(self, f0):
|
||||
f0_min = self.f0_min
|
||||
f0_max = self.f0_max
|
||||
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
||||
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
||||
f0bak = f0.copy()
|
||||
f0_mel = 1127 * np.log(1 + f0 / 700)
|
||||
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
||||
f0_mel_max - f0_mel_min
|
||||
) + 1
|
||||
f0_mel[f0_mel <= 1] = 1
|
||||
f0_mel[f0_mel > 255] = 255
|
||||
f0_coarse = np.rint(f0_mel).astype(np.int32)
|
||||
return f0_coarse, f0bak
|
||||
|
||||
def get_f0(self, x, f0_up_key, n_cpu, method="harvest"):
|
||||
n_cpu = int(n_cpu)
|
||||
if method == "crepe":
|
||||
return self.get_f0_crepe(x, f0_up_key)
|
||||
if method == "rmvpe":
|
||||
return self.get_f0_rmvpe(x, f0_up_key)
|
||||
if method == "pm":
|
||||
p_len = x.shape[0] // 160 + 1
|
||||
f0_min = 65
|
||||
l_pad = int(np.ceil(1.5 / f0_min * 16000))
|
||||
r_pad = l_pad + 1
|
||||
s = parselmouth.Sound(np.pad(x, (l_pad, r_pad)), 16000).to_pitch_ac(
|
||||
time_step=0.01,
|
||||
voicing_threshold=0.6,
|
||||
pitch_floor=f0_min,
|
||||
pitch_ceiling=1100,
|
||||
)
|
||||
assert np.abs(s.t1 - 1.5 / f0_min) < 0.001
|
||||
f0 = s.selected_array["frequency"]
|
||||
if len(f0) < p_len:
|
||||
f0 = np.pad(f0, (0, p_len - len(f0)))
|
||||
f0 = f0[:p_len]
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0)
|
||||
if n_cpu == 1:
|
||||
f0, t = pyworld.harvest(
|
||||
x.astype(np.double),
|
||||
fs=16000,
|
||||
f0_ceil=1100,
|
||||
f0_floor=50,
|
||||
frame_period=10,
|
||||
)
|
||||
f0 = signal.medfilt(f0, 3)
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0)
|
||||
f0bak = np.zeros(x.shape[0] // 160 + 1, dtype=np.float64)
|
||||
length = len(x)
|
||||
part_length = 160 * ((length // 160 - 1) // n_cpu + 1)
|
||||
n_cpu = (length // 160 - 1) // (part_length // 160) + 1
|
||||
ts = ttime()
|
||||
res_f0 = mm.dict()
|
||||
for idx in range(n_cpu):
|
||||
tail = part_length * (idx + 1) + 320
|
||||
if idx == 0:
|
||||
self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts))
|
||||
else:
|
||||
self.inp_q.put(
|
||||
(idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts)
|
||||
)
|
||||
while 1:
|
||||
res_ts = self.opt_q.get()
|
||||
if res_ts == ts:
|
||||
break
|
||||
f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])]
|
||||
for idx, f0 in enumerate(f0s):
|
||||
if idx == 0:
|
||||
f0 = f0[:-3]
|
||||
elif idx != n_cpu - 1:
|
||||
f0 = f0[2:-3]
|
||||
else:
|
||||
f0 = f0[2:]
|
||||
f0bak[
|
||||
part_length * idx // 160 : part_length * idx // 160 + f0.shape[0]
|
||||
] = f0
|
||||
f0bak = signal.medfilt(f0bak, 3)
|
||||
f0bak *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0bak)
|
||||
|
||||
def get_f0_crepe(self, x, f0_up_key):
|
||||
if "privateuseone" in str(self.device): ###不支持dml,cpu又太慢用不成,拿pm顶替
|
||||
return self.get_f0(x, f0_up_key, 1, "pm")
|
||||
audio = torch.tensor(np.copy(x))[None].float()
|
||||
# printt("using crepe,device:%s"%self.device)
|
||||
f0, pd = torchcrepe.predict(
|
||||
audio,
|
||||
self.sr,
|
||||
160,
|
||||
self.f0_min,
|
||||
self.f0_max,
|
||||
"full",
|
||||
batch_size=512,
|
||||
# device=self.device if self.device.type!="privateuseone" else "cpu",###crepe不用半精度全部是全精度所以不愁###cpu延迟高到没法用
|
||||
device=self.device,
|
||||
return_periodicity=True,
|
||||
)
|
||||
pd = torchcrepe.filter.median(pd, 3)
|
||||
f0 = torchcrepe.filter.mean(f0, 3)
|
||||
f0[pd < 0.1] = 0
|
||||
f0 = f0[0].cpu().numpy()
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0)
|
||||
|
||||
def get_f0_rmvpe(self, x, f0_up_key):
|
||||
if hasattr(self, "model_rmvpe") == False:
|
||||
from infer.lib.rmvpe import RMVPE
|
||||
|
||||
printt("Loading rmvpe model")
|
||||
self.model_rmvpe = RMVPE(
|
||||
# "rmvpe.pt", is_half=self.is_half if self.device.type!="privateuseone" else False, device=self.device if self.device.type!="privateuseone"else "cpu"####dml时强制对rmvpe用cpu跑
|
||||
# "rmvpe.pt", is_half=False, device=self.device####dml配置
|
||||
# "rmvpe.pt", is_half=False, device="cpu"####锁定cpu配置
|
||||
"assets/rmvpe/rmvpe.pt",
|
||||
is_half=self.is_half,
|
||||
device=self.device, ####正常逻辑
|
||||
use_jit=self.config.use_jit,
|
||||
)
|
||||
# self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device)
|
||||
f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0)
|
||||
|
||||
def infer(
|
||||
self,
|
||||
feats: torch.Tensor,
|
||||
indata: np.ndarray,
|
||||
block_frame_16k,
|
||||
rate,
|
||||
cache_pitch,
|
||||
cache_pitchf,
|
||||
f0method,
|
||||
) -> np.ndarray:
|
||||
feats = feats.view(1, -1)
|
||||
if self.config.is_half:
|
||||
feats = feats.half()
|
||||
else:
|
||||
feats = feats.float()
|
||||
feats = feats.to(self.device)
|
||||
t1 = ttime()
|
||||
with torch.no_grad():
|
||||
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
|
||||
inputs = {
|
||||
"source": feats,
|
||||
"padding_mask": padding_mask,
|
||||
"output_layer": 9 if self.version == "v1" else 12,
|
||||
}
|
||||
logits = self.model.extract_features(**inputs)
|
||||
feats = (
|
||||
self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
|
||||
)
|
||||
feats = torch.cat((feats, feats[:, -1:, :]), 1)
|
||||
t2 = ttime()
|
||||
try:
|
||||
if hasattr(self, "index") and self.index_rate != 0:
|
||||
leng_replace_head = int(rate * feats[0].shape[0])
|
||||
npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32")
|
||||
score, ix = self.index.search(npy, k=8)
|
||||
weight = np.square(1 / score)
|
||||
weight /= weight.sum(axis=1, keepdims=True)
|
||||
npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
||||
if self.config.is_half:
|
||||
npy = npy.astype("float16")
|
||||
feats[0][-leng_replace_head:] = (
|
||||
torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate
|
||||
+ (1 - self.index_rate) * feats[0][-leng_replace_head:]
|
||||
)
|
||||
else:
|
||||
printt("Index search FAILED or disabled")
|
||||
except:
|
||||
traceback.print_exc()
|
||||
printt("Index search FAILED")
|
||||
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
||||
t3 = ttime()
|
||||
if self.if_f0 == 1:
|
||||
pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method)
|
||||
start_frame = block_frame_16k // 160
|
||||
end_frame = len(cache_pitch) - (pitch.shape[0] - 4) + start_frame
|
||||
cache_pitch[:] = np.append(cache_pitch[start_frame:end_frame], pitch[3:-1])
|
||||
cache_pitchf[:] = np.append(
|
||||
cache_pitchf[start_frame:end_frame], pitchf[3:-1]
|
||||
)
|
||||
p_len = min(feats.shape[1], 13000, cache_pitch.shape[0])
|
||||
else:
|
||||
cache_pitch, cache_pitchf = None, None
|
||||
p_len = min(feats.shape[1], 13000)
|
||||
t4 = ttime()
|
||||
feats = feats[:, :p_len, :]
|
||||
if self.if_f0 == 1:
|
||||
cache_pitch = cache_pitch[:p_len]
|
||||
cache_pitchf = cache_pitchf[:p_len]
|
||||
cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device)
|
||||
cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device)
|
||||
p_len = torch.LongTensor([p_len]).to(self.device)
|
||||
ii = 0 # sid
|
||||
sid = torch.LongTensor([ii]).to(self.device)
|
||||
with torch.no_grad():
|
||||
if self.if_f0 == 1:
|
||||
# printt(12222222222,feats.device,p_len.device,cache_pitch.device,cache_pitchf.device,sid.device,rate2)
|
||||
infered_audio = self.net_g.infer(
|
||||
feats,
|
||||
p_len,
|
||||
cache_pitch,
|
||||
cache_pitchf,
|
||||
sid,
|
||||
torch.FloatTensor([rate]),
|
||||
)[0][0, 0].data.float()
|
||||
else:
|
||||
infered_audio = self.net_g.infer(
|
||||
feats, p_len, sid, torch.FloatTensor([rate])
|
||||
)[0][0, 0].data.float()
|
||||
t5 = ttime()
|
||||
printt(
|
||||
"Spent time: fea = %.2fs, index = %.2fs, f0 = %.2fs, model = %.2fs",
|
||||
t2 - t1,
|
||||
t3 - t2,
|
||||
t4 - t3,
|
||||
t5 - t4,
|
||||
)
|
||||
return infered_audio
|
||||
from io import BytesIO
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
import traceback
|
||||
from infer.lib import jit
|
||||
from infer.lib.jit.get_synthesizer import get_synthesizer
|
||||
from time import time as ttime
|
||||
import fairseq
|
||||
import faiss
|
||||
import numpy as np
|
||||
import parselmouth
|
||||
import pyworld
|
||||
import scipy.signal as signal
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torchcrepe
|
||||
|
||||
from infer.lib.infer_pack.models import (
|
||||
SynthesizerTrnMs256NSFsid,
|
||||
SynthesizerTrnMs256NSFsid_nono,
|
||||
SynthesizerTrnMs768NSFsid,
|
||||
SynthesizerTrnMs768NSFsid_nono,
|
||||
)
|
||||
|
||||
now_dir = os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
from multiprocessing import Manager as M
|
||||
|
||||
from configs.config import Config
|
||||
|
||||
# config = Config()
|
||||
|
||||
mm = M()
|
||||
|
||||
|
||||
def printt(strr, *args):
|
||||
if len(args) == 0:
|
||||
print(strr)
|
||||
else:
|
||||
print(strr % args)
|
||||
|
||||
|
||||
# config.device=torch.device("cpu")########强制cpu测试
|
||||
# config.is_half=False########强制cpu测试
|
||||
class RVC:
|
||||
def __init__(
|
||||
self,
|
||||
key,
|
||||
pth_path,
|
||||
index_path,
|
||||
index_rate,
|
||||
n_cpu,
|
||||
inp_q,
|
||||
opt_q,
|
||||
config: Config,
|
||||
last_rvc=None,
|
||||
) -> None:
|
||||
"""
|
||||
初始化
|
||||
"""
|
||||
try:
|
||||
if config.dml == True:
|
||||
|
||||
def forward_dml(ctx, x, scale):
|
||||
ctx.scale = scale
|
||||
res = x.clone().detach()
|
||||
return res
|
||||
|
||||
fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
|
||||
# global config
|
||||
self.config = config
|
||||
self.inp_q = inp_q
|
||||
self.opt_q = opt_q
|
||||
# device="cpu"########强制cpu测试
|
||||
self.device = config.device
|
||||
self.f0_up_key = key
|
||||
self.f0_min = 50
|
||||
self.f0_max = 1100
|
||||
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
||||
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
||||
self.n_cpu = n_cpu
|
||||
self.use_jit = self.config.use_jit
|
||||
self.is_half = config.is_half
|
||||
|
||||
if index_rate != 0:
|
||||
self.index = faiss.read_index(index_path)
|
||||
self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
|
||||
printt("Index search enabled")
|
||||
self.pth_path: str = pth_path
|
||||
self.index_path = index_path
|
||||
self.index_rate = index_rate
|
||||
self.cache_pitch: np.ndarray = np.zeros(1024, dtype="int32")
|
||||
self.cache_pitchf = np.zeros(1024, dtype="float32")
|
||||
|
||||
if last_rvc is None:
|
||||
models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
|
||||
["assets/hubert/hubert_base.pt"],
|
||||
suffix="",
|
||||
)
|
||||
hubert_model = models[0]
|
||||
hubert_model = hubert_model.to(self.device)
|
||||
if self.is_half:
|
||||
hubert_model = hubert_model.half()
|
||||
else:
|
||||
hubert_model = hubert_model.float()
|
||||
hubert_model.eval()
|
||||
self.model = hubert_model
|
||||
else:
|
||||
self.model = last_rvc.model
|
||||
|
||||
self.net_g: nn.Module = None
|
||||
|
||||
def set_default_model():
|
||||
self.net_g, cpt = get_synthesizer(self.pth_path, self.device)
|
||||
self.tgt_sr = cpt["config"][-1]
|
||||
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
|
||||
self.if_f0 = cpt.get("f0", 1)
|
||||
self.version = cpt.get("version", "v1")
|
||||
if self.is_half:
|
||||
self.net_g = self.net_g.half()
|
||||
else:
|
||||
self.net_g = self.net_g.float()
|
||||
|
||||
def set_jit_model():
|
||||
jit_pth_path = self.pth_path.rstrip(".pth")
|
||||
jit_pth_path += ".half.jit" if self.is_half else ".jit"
|
||||
reload = False
|
||||
if str(self.device) == "cuda":
|
||||
self.device = torch.device("cuda:0")
|
||||
if os.path.exists(jit_pth_path):
|
||||
cpt = jit.load(jit_pth_path)
|
||||
model_device = cpt["device"]
|
||||
if model_device != str(self.device):
|
||||
reload = True
|
||||
else:
|
||||
reload = True
|
||||
|
||||
if reload:
|
||||
cpt = jit.synthesizer_jit_export(
|
||||
self.pth_path,
|
||||
"script",
|
||||
None,
|
||||
device=self.device,
|
||||
is_half=self.is_half,
|
||||
)
|
||||
|
||||
self.tgt_sr = cpt["config"][-1]
|
||||
self.if_f0 = cpt.get("f0", 1)
|
||||
self.version = cpt.get("version", "v1")
|
||||
self.net_g = torch.jit.load(
|
||||
BytesIO(cpt["model"]), map_location=self.device
|
||||
)
|
||||
self.net_g.infer = self.net_g.forward
|
||||
self.net_g.eval().to(self.device)
|
||||
|
||||
def set_synthesizer():
|
||||
if self.use_jit and not config.dml:
|
||||
if self.is_half and "cpu" in str(self.device):
|
||||
printt(
|
||||
"Use default Synthesizer model. \
|
||||
Jit is not supported on the CPU for half floating point"
|
||||
)
|
||||
set_default_model()
|
||||
else:
|
||||
set_jit_model()
|
||||
else:
|
||||
set_default_model()
|
||||
|
||||
if last_rvc is None or last_rvc.pth_path != self.pth_path:
|
||||
set_synthesizer()
|
||||
else:
|
||||
self.tgt_sr = last_rvc.tgt_sr
|
||||
self.if_f0 = last_rvc.if_f0
|
||||
self.version = last_rvc.version
|
||||
self.is_half = last_rvc.is_half
|
||||
if last_rvc.use_jit != self.use_jit:
|
||||
set_synthesizer()
|
||||
else:
|
||||
self.net_g = last_rvc.net_g
|
||||
|
||||
if last_rvc is not None and hasattr(last_rvc, "model_rmvpe"):
|
||||
self.model_rmvpe = last_rvc.model_rmvpe
|
||||
if last_rvc is not None and hasattr(last_rvc, "model_fcpe"):
|
||||
self.device_fcpe = last_rvc.device_fcpe
|
||||
self.model_fcpe = last_rvc.model_fcpe
|
||||
except:
|
||||
printt(traceback.format_exc())
|
||||
|
||||
def change_key(self, new_key):
|
||||
self.f0_up_key = new_key
|
||||
|
||||
def change_index_rate(self, new_index_rate):
|
||||
if new_index_rate != 0 and self.index_rate == 0:
|
||||
self.index = faiss.read_index(self.index_path)
|
||||
self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
|
||||
printt("Index search enabled")
|
||||
self.index_rate = new_index_rate
|
||||
|
||||
def get_f0_post(self, f0):
|
||||
f0bak = f0.copy()
|
||||
f0_mel = 1127 * np.log(1 + f0 / 700)
|
||||
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * 254 / (
|
||||
self.f0_mel_max - self.f0_mel_min
|
||||
) + 1
|
||||
f0_mel[f0_mel <= 1] = 1
|
||||
f0_mel[f0_mel > 255] = 255
|
||||
f0_coarse = np.rint(f0_mel).astype(np.int32)
|
||||
return f0_coarse, f0bak
|
||||
|
||||
def get_f0(self, x, f0_up_key, n_cpu, method="harvest"):
|
||||
n_cpu = int(n_cpu)
|
||||
if method == "crepe":
|
||||
return self.get_f0_crepe(x, f0_up_key)
|
||||
if method == "rmvpe":
|
||||
return self.get_f0_rmvpe(x, f0_up_key)
|
||||
if method == "fcpe":
|
||||
return self.get_f0_fcpe(x, f0_up_key)
|
||||
x = x.cpu().numpy()
|
||||
if method == "pm":
|
||||
p_len = x.shape[0] // 160 + 1
|
||||
f0_min = 65
|
||||
l_pad = int(np.ceil(1.5 / f0_min * 16000))
|
||||
r_pad = l_pad + 1
|
||||
s = parselmouth.Sound(np.pad(x, (l_pad, r_pad)), 16000).to_pitch_ac(
|
||||
time_step=0.01,
|
||||
voicing_threshold=0.6,
|
||||
pitch_floor=f0_min,
|
||||
pitch_ceiling=1100,
|
||||
)
|
||||
assert np.abs(s.t1 - 1.5 / f0_min) < 0.001
|
||||
f0 = s.selected_array["frequency"]
|
||||
if len(f0) < p_len:
|
||||
f0 = np.pad(f0, (0, p_len - len(f0)))
|
||||
f0 = f0[:p_len]
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0)
|
||||
if n_cpu == 1:
|
||||
f0, t = pyworld.harvest(
|
||||
x.astype(np.double),
|
||||
fs=16000,
|
||||
f0_ceil=1100,
|
||||
f0_floor=50,
|
||||
frame_period=10,
|
||||
)
|
||||
f0 = signal.medfilt(f0, 3)
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0)
|
||||
f0bak = np.zeros(x.shape[0] // 160 + 1, dtype=np.float64)
|
||||
length = len(x)
|
||||
part_length = 160 * ((length // 160 - 1) // n_cpu + 1)
|
||||
n_cpu = (length // 160 - 1) // (part_length // 160) + 1
|
||||
ts = ttime()
|
||||
res_f0 = mm.dict()
|
||||
for idx in range(n_cpu):
|
||||
tail = part_length * (idx + 1) + 320
|
||||
if idx == 0:
|
||||
self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts))
|
||||
else:
|
||||
self.inp_q.put(
|
||||
(idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts)
|
||||
)
|
||||
while 1:
|
||||
res_ts = self.opt_q.get()
|
||||
if res_ts == ts:
|
||||
break
|
||||
f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])]
|
||||
for idx, f0 in enumerate(f0s):
|
||||
if idx == 0:
|
||||
f0 = f0[:-3]
|
||||
elif idx != n_cpu - 1:
|
||||
f0 = f0[2:-3]
|
||||
else:
|
||||
f0 = f0[2:]
|
||||
f0bak[
|
||||
part_length * idx // 160 : part_length * idx // 160 + f0.shape[0]
|
||||
] = f0
|
||||
f0bak = signal.medfilt(f0bak, 3)
|
||||
f0bak *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0bak)
|
||||
|
||||
def get_f0_crepe(self, x, f0_up_key):
|
||||
if "privateuseone" in str(self.device): ###不支持dml,cpu又太慢用不成,拿fcpe顶替
|
||||
return self.get_f0(x, f0_up_key, 1, "fcpe")
|
||||
# printt("using crepe,device:%s"%self.device)
|
||||
f0, pd = torchcrepe.predict(
|
||||
x.unsqueeze(0).float(),
|
||||
16000,
|
||||
160,
|
||||
self.f0_min,
|
||||
self.f0_max,
|
||||
"full",
|
||||
batch_size=512,
|
||||
# device=self.device if self.device.type!="privateuseone" else "cpu",###crepe不用半精度全部是全精度所以不愁###cpu延迟高到没法用
|
||||
device=self.device,
|
||||
return_periodicity=True,
|
||||
)
|
||||
pd = torchcrepe.filter.median(pd, 3)
|
||||
f0 = torchcrepe.filter.mean(f0, 3)
|
||||
f0[pd < 0.1] = 0
|
||||
f0 = f0[0].cpu().numpy()
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0)
|
||||
|
||||
def get_f0_rmvpe(self, x, f0_up_key):
|
||||
if hasattr(self, "model_rmvpe") == False:
|
||||
from infer.lib.rmvpe import RMVPE
|
||||
|
||||
printt("Loading rmvpe model")
|
||||
self.model_rmvpe = RMVPE(
|
||||
"assets/rmvpe/rmvpe.pt",
|
||||
is_half=self.is_half,
|
||||
device=self.device,
|
||||
use_jit=self.config.use_jit,
|
||||
)
|
||||
f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
return self.get_f0_post(f0)
|
||||
|
||||
def get_f0_fcpe(self, x, f0_up_key):
|
||||
if hasattr(self, "model_fcpe") == False:
|
||||
from torchfcpe import spawn_bundled_infer_model
|
||||
|
||||
printt("Loading fcpe model")
|
||||
if "privateuseone" in str(self.device):
|
||||
self.device_fcpe = "cpu"
|
||||
else:
|
||||
self.device_fcpe = self.device
|
||||
self.model_fcpe = spawn_bundled_infer_model(self.device_fcpe)
|
||||
f0 = self.model_fcpe.infer(
|
||||
x.to(self.device_fcpe).unsqueeze(0).float(),
|
||||
sr=16000,
|
||||
decoder_mode="local_argmax",
|
||||
threshold=0.006,
|
||||
)
|
||||
f0 *= pow(2, f0_up_key / 12)
|
||||
f0 = f0.squeeze().cpu().numpy()
|
||||
return self.get_f0_post(f0)
|
||||
|
||||
def infer(
|
||||
self,
|
||||
input_wav: torch.Tensor,
|
||||
block_frame_16k,
|
||||
skip_head,
|
||||
return_length,
|
||||
f0method,
|
||||
) -> np.ndarray:
|
||||
t1 = ttime()
|
||||
with torch.no_grad():
|
||||
if self.config.is_half:
|
||||
feats = input_wav.half().view(1, -1)
|
||||
else:
|
||||
feats = input_wav.float().view(1, -1)
|
||||
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
|
||||
inputs = {
|
||||
"source": feats,
|
||||
"padding_mask": padding_mask,
|
||||
"output_layer": 9 if self.version == "v1" else 12,
|
||||
}
|
||||
logits = self.model.extract_features(**inputs)
|
||||
feats = (
|
||||
self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
|
||||
)
|
||||
feats = torch.cat((feats, feats[:, -1:, :]), 1)
|
||||
t2 = ttime()
|
||||
try:
|
||||
if hasattr(self, "index") and self.index_rate != 0:
|
||||
npy = feats[0][skip_head // 2 :].cpu().numpy().astype("float32")
|
||||
score, ix = self.index.search(npy, k=8)
|
||||
weight = np.square(1 / score)
|
||||
weight /= weight.sum(axis=1, keepdims=True)
|
||||
npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
||||
if self.config.is_half:
|
||||
npy = npy.astype("float16")
|
||||
feats[0][skip_head // 2 :] = (
|
||||
torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate
|
||||
+ (1 - self.index_rate) * feats[0][skip_head // 2 :]
|
||||
)
|
||||
else:
|
||||
printt("Index search FAILED or disabled")
|
||||
except:
|
||||
traceback.print_exc()
|
||||
printt("Index search FAILED")
|
||||
t3 = ttime()
|
||||
if self.if_f0 == 1:
|
||||
f0_extractor_frame = block_frame_16k + 800
|
||||
if f0method == "rmvpe":
|
||||
f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160
|
||||
pitch, pitchf = self.get_f0(
|
||||
input_wav[-f0_extractor_frame:], self.f0_up_key, self.n_cpu, f0method
|
||||
)
|
||||
start_frame = block_frame_16k // 160
|
||||
end_frame = len(self.cache_pitch) - (pitch.shape[0] - 4) + start_frame
|
||||
self.cache_pitch[:] = np.append(
|
||||
self.cache_pitch[start_frame:end_frame], pitch[3:-1]
|
||||
)
|
||||
self.cache_pitchf[:] = np.append(
|
||||
self.cache_pitchf[start_frame:end_frame], pitchf[3:-1]
|
||||
)
|
||||
t4 = ttime()
|
||||
p_len = input_wav.shape[0] // 160
|
||||
if self.if_f0 == 1:
|
||||
cache_pitch = (
|
||||
torch.LongTensor(self.cache_pitch[-p_len:]).to(self.device).unsqueeze(0)
|
||||
)
|
||||
cache_pitchf = (
|
||||
torch.FloatTensor(self.cache_pitchf[-p_len:])
|
||||
.to(self.device)
|
||||
.unsqueeze(0)
|
||||
)
|
||||
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
||||
feats = feats[:, :p_len, :]
|
||||
p_len = torch.LongTensor([p_len]).to(self.device)
|
||||
sid = torch.LongTensor([0]).to(self.device)
|
||||
skip_head = torch.LongTensor([skip_head])
|
||||
return_length = torch.LongTensor([return_length])
|
||||
with torch.no_grad():
|
||||
if self.if_f0 == 1:
|
||||
infered_audio, _, _ = self.net_g.infer(
|
||||
feats,
|
||||
p_len,
|
||||
cache_pitch,
|
||||
cache_pitchf,
|
||||
sid,
|
||||
skip_head,
|
||||
return_length,
|
||||
)
|
||||
else:
|
||||
infered_audio, _, _ = self.net_g.infer(
|
||||
feats, p_len, sid, skip_head, return_length
|
||||
)
|
||||
t5 = ttime()
|
||||
printt(
|
||||
"Spent time: fea = %.3fs, index = %.3fs, f0 = %.3fs, model = %.3fs",
|
||||
t2 - t1,
|
||||
t3 - t2,
|
||||
t4 - t3,
|
||||
t5 - t4,
|
||||
)
|
||||
return infered_audio.squeeze().float()
|
||||
|
Loading…
Reference in New Issue
Block a user