fixed a NVIDIA 전

This commit is contained in:
k4yt3x 2019-03-13 11:59:42 -04:00
parent 7f3a377ea8
commit 99306c6e4b

View File

@ -108,9 +108,12 @@ def check_memory():
Avalon.warning('Nvidia-smi not available, skipping available memory check')
Avalon.warning('If you experience error \"cudaSuccess out of memory\", try reducing number of threads you\'re using')
else:
# "0" is GPU ID. Both waifu2x drivers use the first GPU available, therefore only 0 makes sense
gpu_memory_available = (GPUtil.getGPUs()[0].memoryTotal - GPUtil.getGPUs()[0].memoryUsed) / 1024
memory_status.append(('GPU', gpu_memory_available))
try:
# "0" is GPU ID. Both waifu2x drivers use the first GPU available, therefore only 0 makes sense
gpu_memory_available = (GPUtil.getGPUs()[0].memoryTotal - GPUtil.getGPUs()[0].memoryUsed) / 1024
memory_status.append(('GPU', gpu_memory_available))
except ValueError:
pass
# Go though each checkable memory type and check availability
for memory_type, memory_available in memory_status: