mirror of
https://github.com/k4yt3x/video2x.git
synced 2024-12-29 16:09:10 +00:00
fixed a NVIDIA 전
This commit is contained in:
parent
7f3a377ea8
commit
99306c6e4b
@ -108,9 +108,12 @@ def check_memory():
|
|||||||
Avalon.warning('Nvidia-smi not available, skipping available memory check')
|
Avalon.warning('Nvidia-smi not available, skipping available memory check')
|
||||||
Avalon.warning('If you experience error \"cudaSuccess out of memory\", try reducing number of threads you\'re using')
|
Avalon.warning('If you experience error \"cudaSuccess out of memory\", try reducing number of threads you\'re using')
|
||||||
else:
|
else:
|
||||||
# "0" is GPU ID. Both waifu2x drivers use the first GPU available, therefore only 0 makes sense
|
try:
|
||||||
gpu_memory_available = (GPUtil.getGPUs()[0].memoryTotal - GPUtil.getGPUs()[0].memoryUsed) / 1024
|
# "0" is GPU ID. Both waifu2x drivers use the first GPU available, therefore only 0 makes sense
|
||||||
memory_status.append(('GPU', gpu_memory_available))
|
gpu_memory_available = (GPUtil.getGPUs()[0].memoryTotal - GPUtil.getGPUs()[0].memoryUsed) / 1024
|
||||||
|
memory_status.append(('GPU', gpu_memory_available))
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
# Go though each checkable memory type and check availability
|
# Go though each checkable memory type and check availability
|
||||||
for memory_type, memory_available in memory_status:
|
for memory_type, memory_available in memory_status:
|
||||||
|
Loading…
Reference in New Issue
Block a user