Skip to content
Snippets Groups Projects
Commit 714bf8d8 authored by Giovanni La Mura's avatar Giovanni La Mura
Browse files

Write matrix size estimate when running model_maker

parent ba50e221
No related branches found
No related tags found
No related merge requests found
...@@ -418,13 +418,16 @@ def load_model(model_file): ...@@ -418,13 +418,16 @@ def load_model(model_file):
write_obj(sconf, gconf, max_rad) write_obj(sconf, gconf, max_rad)
try: try:
max_gpu_ram = int(model['system_settings']['max_gpu_ram']) max_gpu_ram = int(model['system_settings']['max_gpu_ram'])
matrix_dim = 2 * gconf['nsph'] * gconf['li'] * (gconf['li'] + 2)
matrix_size_bytes = 16 * matrix_dim * matrix_dim
matrix_size_Gb = float(matrix_size_bytes) / 1024.0 / 1024.0 / 1024.0
print("INFO: estimated matrix size is {0:.3g} Gb.".format(matrix_size_Gb))
if (max_gpu_ram > 0): if (max_gpu_ram > 0):
max_gpu_ram_bytes = max_gpu_ram * 1024 * 1024 * 1024 max_gpu_ram_bytes = max_gpu_ram * 1024 * 1024 * 1024
matrix_dim = 2 * gconf['nsph'] * gconf['li'] * (gconf['li'] + 2)
matrix_size_bytes = 16 * matrix_dim * matrix_dim
if (matrix_size_bytes < max_gpu_ram_bytes): if (matrix_size_bytes < max_gpu_ram_bytes):
max_gpu_processes = int(max_gpu_ram_bytes / matrix_size_bytes) max_gpu_processes = int(max_gpu_ram_bytes / matrix_size_bytes)
print("INFO: system supports up to %d simultaneous processes on GPU."%max_gpu_processes) print("INFO: system supports up to %d simultaneous processes on GPU."%max_gpu_processes)
print("INFO: only %d GPU processes allowed, if using refinement."%(max_gpu_processes / 3))
else: else:
print("WARNING: estimated matrix size is larger than available GPU memory!") print("WARNING: estimated matrix size is larger than available GPU memory!")
else: else:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment