Add GPU specific pacakges to .toml and a python script to selectively uncomment those

This commit is contained in:
VSlobolinskyi 2025-03-18 18:14:00 +02:00
parent 0cbff69edf
commit fea77c0b96
2 changed files with 92 additions and 19 deletions

73
configure_gpu_deps.py Normal file
View File

@ -0,0 +1,73 @@
#!/usr/bin/env python3
import sys
import re
def process_lines(lines, target_gpu):
"""
Process the lines of a file and uncomment the configuration block corresponding
to target_gpu ('nvidia' or 'amd') while leaving the other block commented.
"""
output_lines = []
current_block = None # None, "nvidia", or "amd"
# Define regexes to detect the markers
nvidia_marker = re.compile(r'---\s*NVIDIA GPU configuration\s*---', re.IGNORECASE)
amd_marker = re.compile(r'---\s*AMD GPU configuration\s*---', re.IGNORECASE)
separator = re.compile(r'^#\s*-{5,}') # a commented separator line (at least 5 dashes)
for line in lines:
stripped = line.lstrip()
# Check for block start markers (they remain unchanged)
if nvidia_marker.search(line):
current_block = "nvidia"
output_lines.append(line)
continue
elif amd_marker.search(line):
current_block = "amd"
output_lines.append(line)
continue
# End of block when encountering a separator line
if separator.match(line):
current_block = None
output_lines.append(line)
continue
# If we're in a GPU configuration block and the line is commented, process it
if current_block is not None and stripped.startswith("#"):
# Remove the first '#' and any following space if we are in the target block.
if current_block == target_gpu:
# Uncomment by removing the first '#' (preserve indentation)
# Using regex to remove a leading '#' with possible spaces
uncommented = re.sub(r'^(?P<indent>\s*)#\s?', r'\g<indent>', line)
output_lines.append(uncommented)
else:
# Leave the line commented for the non-target block
output_lines.append(line)
else:
output_lines.append(line)
return output_lines
def main():
if len(sys.argv) != 3:
print("Usage: python configure_gpu_deps.py <pyproject.toml> <gpu_type>")
print(" where <gpu_type> is either 'nvidia' or 'amd'")
sys.exit(1)
toml_path = sys.argv[1]
gpu_type = sys.argv[2].lower()
if gpu_type not in {"nvidia", "amd"}:
print("gpu_type must be either 'nvidia' or 'amd'")
sys.exit(1)
with open(toml_path, "r", encoding="utf-8") as f:
lines = f.readlines()
new_lines = process_lines(lines, gpu_type)
with open(toml_path, "w", encoding="utf-8") as f:
f.writelines(new_lines)
print(f"Updated {toml_path} for {gpu_type.upper()} GPU configuration.")
if __name__ == "__main__":
main()

View File

@ -8,30 +8,14 @@ package-mode = false
[tool.poetry.dependencies]
python = ">=3.11,<3.12"
# GPU-specific installations for torch and torchaudio:
# If AMD_GPU environment variable is set to "true", then use AMD-compatible packages.
torch = [
{ url = "https://download.pytorch.org/whl/cu118/torch-2.1.1%2Bcu118-cp311-cp311-win_amd64.whl#sha256=d99be44487d3ed0f7e6ef5d6689a37fb4a2f2821a9e7b59e7e04002a876a667a", markers = "sys_platform == 'win32' and platform_machine != 'AMD64'" },
{ url = "https://download.pytorch.org/whl/cu118/torch-2.1.1%2Bcu118-cp311-cp311-linux_x86_64.whl#sha256=f3c0ba02b50d0021ff26f030e22d4c45965537cf91f322e52a65b8c58396f81c", markers = "sys_platform == 'linux' and platform_machine != 'AMD64'" },
{ version = "^2.4.1", markers = "platform_machine == 'AMD64'" }
]
torchaudio = [
{ url = "https://download.pytorch.org/whl/cu118/torchaudio-2.1.1%2Bcu118-cp311-cp311-win_amd64.whl#sha256=79b5afa556063be18de4a1964339242301fe04e782e1030a22695257dd9afbd2", markers = "sys_platform == 'win32' and platform_machine != 'AMD64'" },
{ url = "https://download.pytorch.org/whl/cu118/torchaudio-2.1.1%2Bcu118-cp311-cp311-linux_x86_64.whl#sha256=2b077639f240176bb27e964e2e9b3a5c2a8d560a3a7bc1ffd0a024e81f2e10b4", markers = "sys_platform == 'linux' and platform_machine != 'AMD64'" },
{ version = "2.4.1", markers = "platform_machine == 'AMD64'" }
]
# Additional dependency for AMD GPUs on Windows:
torch-directml = { version = "^0.2.5.dev240914", markers = "platform_machine == 'AMD64'" }
# Common dependencies (non GPU-specific):
fairseq = { git = "https://github.com/One-sixth/fairseq.git" }
joblib = ">=1.1.0"
numba = "*"
numpy = "*"
scipy = "*"
librosa = "==0.10.2"
llvmlite = "*"
fairseq = { git = "https://github.com/One-sixth/fairseq.git" }
faiss-cpu = "*"
gradio = "==3.34.0"
Cython = "*"
@ -65,7 +49,7 @@ uvicorn = ">=0.21.1"
colorama = ">=0.4.5"
pyworld = "==0.3.2"
httpx = "*"
onnxruntime = { version = "*", markers = "sys_platform == 'darwin'" }
onnxruntime = { version = "*", markers = "sys_platform == 'darwin'" }
onnxruntime-gpu = { version = "*", markers = "sys_platform != 'darwin'" }
torchcrepe = "==0.0.23"
fastapi = "==0.88"
@ -74,6 +58,22 @@ ffmpy = "==0.3.1"
python-dotenv = ">=1.0.0"
av = "*"
# ---------------------------------------------------------------------------
# --- NVIDIA GPU configuration ---
# torch = [
# { url = "https://download.pytorch.org/whl/cu118/torch-2.1.1+cu118-cp311-cp311-win_amd64.whl#sha256=d99be44487d3ed0f7e6ef5d6689a37fb4a2f2821a9e7b59e7e04002a876a667a", markers = "sys_platform == 'win32'" },
# { url = "https://download.pytorch.org/whl/cu118/torch-2.1.1+cu118-cp311-cp311-linux_x86_64.whl#sha256=f3c0ba02b50d0021ff26f030e22d4c45965537cf91f322e52a65b8c58396f81c", markers = "sys_platform == 'linux'" }
# ]
# torchaudio = [
# { url = "https://download.pytorch.org/whl/cu118/torchaudio-2.1.1+cu118-cp311-cp311-win_amd64.whl#sha256=79b5afa556063be18de4a1964339242301fe04e782e1030a22695257dd9afbd2", markers = "sys_platform == 'win32'" },
# { url = "https://download.pytorch.org/whl/cu118/torchaudio-2.1.1+cu118-cp311-cp311-linux_x86_64.whl#sha256=2b077639f240176bb27e964e2e9b3a5c2a8d560a3a7bc1ffd0a024e81f2e10b4", markers = "sys_platform == 'linux'" }
# ]
# --- AMD GPU configuration ---
# torch = "2.4.1"
# torchaudio = "2.4.1"
# torch-directml = "^0.2.5.dev240914"
# ---------------------------------------------------------------------------
[tool.poetry.group.dev.dependencies]
# Add any development dependencies here