diff --git a/Dockerfile b/Dockerfile index 9a8b6271..b4edf846 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,6 +49,8 @@ RUN pip3 install --no-cache-dir torch==$TORCH_VERSION -r requirements_styletts2. RUN pip3 install --no-cache-dir torch==$TORCH_VERSION -r requirements_vall_e.txt RUN pip3 install --no-cache-dir torch==$TORCH_VERSION -r requirements_maha_tts.txt RUN pip3 install --no-cache-dir torch==$TORCH_VERSION -r requirements_stable_audio.txt +# RUN pip3 install --no-cache-dir torch==$TORCH_VERSION hydra-core==1.3.2 +RUN pip3 install --no-cache-dir torch==$TORCH_VERSION nvidia-ml-py # Build the React UI RUN cd react-ui && npm install && npm run build diff --git a/installer_scripts/js/initializeApp.js b/installer_scripts/js/initializeApp.js index 29a7e875..dc9ad57e 100644 --- a/installer_scripts/js/initializeApp.js +++ b/installer_scripts/js/initializeApp.js @@ -203,6 +203,7 @@ async function updateDependencies(optional = true) { tryInstall("-r requirements_stable_audio.txt", "Stable Audio"); // reinstall hydra-core==1.3.2 because of fairseq tryInstall("hydra-core==1.3.2", "hydra-core fix due to fairseq"); + tryInstall("nvidia-ml-py", "nvidia-ml-py"); savePipPackagesVersion(newPipPackagesVersion); } diff --git a/installer_scripts/versions.json b/installer_scripts/versions.json index 0bc2901d..76412b99 100644 --- a/installer_scripts/versions.json +++ b/installer_scripts/versions.json @@ -1,5 +1,5 @@ { - "version": "0.0.3", + "version": "0.1.0", "pip_packages": 4, "npm_packages": 3, "react_ui": 3 diff --git a/tts_webui/utils/gpu_info_tab.py b/tts_webui/utils/gpu_info_tab.py index 562c8ea6..ba8e0158 100644 --- a/tts_webui/utils/gpu_info_tab.py +++ b/tts_webui/utils/gpu_info_tab.py @@ -26,6 +26,23 @@ def get_gpu_info(): return [get_gpu_info_idx(idx) for idx in range(device_count)] +def get_pynvml_fields(idx=0): + # check if pynvml is installed + try: + # import pynvml + return { + "temperature": torch.cuda.temperature(idx), + "power_draw": torch.cuda.power_draw(idx) / 1000, + "utilization": torch.cuda.utilization(idx), + } + # except ImportError: + except: + return { + "temperature": 0, + "power_draw": 0, + "utilization": 0, + } + def get_gpu_info_idx(idx=0): return { "torch_version": torch.__version__, @@ -43,9 +60,7 @@ def get_gpu_info_idx(idx=0): "multi_processor_count": torch.cuda.get_device_properties( idx ).multi_processor_count, - "temperature": torch.cuda.temperature(idx), - "power_draw": torch.cuda.power_draw(idx) / 1000, - "utilization": torch.cuda.utilization(idx), + **get_pynvml_fields(idx), }