diff --git a/bin/video2x.py b/bin/video2x.py index b0f46f1..c42ab5d 100755 --- a/bin/video2x.py +++ b/bin/video2x.py @@ -108,9 +108,12 @@ def check_memory(): Avalon.warning('Nvidia-smi not available, skipping available memory check') Avalon.warning('If you experience error \"cudaSuccess out of memory\", try reducing number of threads you\'re using') else: - # "0" is GPU ID. Both waifu2x drivers use the first GPU available, therefore only 0 makes sense - gpu_memory_available = (GPUtil.getGPUs()[0].memoryTotal - GPUtil.getGPUs()[0].memoryUsed) / 1024 - memory_status.append(('GPU', gpu_memory_available)) + try: + # "0" is GPU ID. Both waifu2x drivers use the first GPU available, therefore only 0 makes sense + gpu_memory_available = (GPUtil.getGPUs()[0].memoryTotal - GPUtil.getGPUs()[0].memoryUsed) / 1024 + memory_status.append(('GPU', gpu_memory_available)) + except ValueError: + pass # Go though each checkable memory type and check availability for memory_type, memory_available in memory_status: