From 890863324134029b20289a998f689a420dddc857 Mon Sep 17 00:00:00 2001 From: Stefano Alberto Russo <stefano.russo@gmail.com> Date: Thu, 8 Apr 2021 12:44:56 +0200 Subject: [PATCH] Refactored the computing type by adding the access mode. Refactored the computing manager classes. Added the "access_mode" field and related sys and user conf in the Computing model. --- services/webapp/code/rosetta/core_app/api.py | 6 +- .../rosetta/core_app/computing_managers.py | 357 +++++++++--------- .../management/commands/core_app_populate.py | 37 +- .../migrations/0004_auto_20210408_1041.py | 30 ++ .../webapp/code/rosetta/core_app/models.py | 33 +- .../templates/components/computing.html | 12 +- 6 files changed, 277 insertions(+), 198 deletions(-) create mode 100644 services/webapp/code/rosetta/core_app/migrations/0004_auto_20210408_1041.py diff --git a/services/webapp/code/rosetta/core_app/api.py b/services/webapp/code/rosetta/core_app/api.py index d2dd691..591b7b3 100644 --- a/services/webapp/code/rosetta/core_app/api.py +++ b/services/webapp/code/rosetta/core_app/api.py @@ -710,7 +710,11 @@ class FileManagerAPI(PrivateGETAPI, PrivatePOSTAPI): computings = list(Computing.objects.filter(user=None)) + list(Computing.objects.filter(user=request.user)) for computing in computings: - + + # For now, we only support SSH-based computing resources + if not 'ssh' in computing.access_method: + continue + # Attach user conf in any computing.attach_user_conf(request.user) diff --git a/services/webapp/code/rosetta/core_app/computing_managers.py b/services/webapp/code/rosetta/core_app/computing_managers.py index e485f33..08cd271 100644 --- a/services/webapp/code/rosetta/core_app/computing_managers.py +++ b/services/webapp/code/rosetta/core_app/computing_managers.py @@ -73,8 +73,21 @@ class ComputingManager(object): return self._get_task_log(task, **kwargs) +class SingleNodeComputingManager(ComputingManager): + pass -class LocalComputingManager(ComputingManager): + +class ClusterComputingManager(ComputingManager): + pass + + +class SSHComputingManager(ComputingManager): + # SSH-f + keys utils here + pass + + + +class InternalSingleNodeComputingManager(SingleNodeComputingManager): def _start_task(self, task): @@ -161,7 +174,11 @@ class LocalComputingManager(ComputingManager): -class RemoteComputingManager(ComputingManager): + + + + +class SSHSingleNodeComputingManager(SingleNodeComputingManager, SSHComputingManager): def _start_task(self, task, **kwargs): logger.debug('Starting a remote task "{}"'.format(self.computing)) @@ -300,7 +317,7 @@ class RemoteComputingManager(ComputingManager): -class SlurmComputingManager(ComputingManager): +class SlurmSSHClusterComputingManager(ClusterComputingManager, SSHComputingManager): def _start_task(self, task, **kwargs): logger.debug('Starting a remote task "{}"'.format(self.computing)) @@ -467,173 +484,173 @@ class SlurmComputingManager(ComputingManager): return out.stdout - -class RemotehopComputingManager(ComputingManager): - - def _start_task(self, task, **kwargs): - logger.debug('Starting a remote task "{}"'.format(self.computing)) - - # Get computing params - first_host = self.computing.conf.get('first_host') - first_user = self.computing.conf.get('first_user') - second_host = self.computing.conf.get('second_host') - second_user = self.computing.conf.get('second_user') - setup_command = self.computing.conf.get('setup_command') - - # TODO: De hard-code - use_agent = False - - # Get user keys - if self.computing.requires_user_keys: - user_keys = KeyPair.objects.get(user=task.user, default=True) - else: - raise NotImplementedError('Remote tasks not requiring keys are not yet supported') - - # Get webapp conn string - from.utils import get_webapp_conn_string - webapp_conn_string = get_webapp_conn_string() - - # Run the container on the host (non blocking) - if task.container.type == 'singularity': - - task.tid = task.uuid - task.save() - - # Set pass if any - if task.auth_pass: - authstring = ' export SINGULARITYENV_AUTH_PASS={} && '.format(task.auth_pass) - else: - authstring = '' - - # Set binds, only from sys config if the resource is not owned by the user - if self.computing.user != task.user: - binds = self.computing.sys_conf.get('binds') - else: - binds = self.computing.conf.get('binds') - if not binds: - binds = '' - else: - binds = '-B {}'.format(binds) - - # Manage task extra binds - if task.extra_binds: - if not binds: - binds = '-B {}'.format(task.extra_binds) - else: - binds += ',{}'.format(task.extra_binds) - - run_command = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} '.format(user_keys.private_key_file, first_user, first_host) - run_command += '"ssh -4 -o StrictHostKeyChecking=no {}@{} /bin/bash -c \''.format(second_user, second_host) - - if use_agent: - run_command += '\'wget {}/api/v1/base/agent/?task_uuid={} -O \$HOME/agent_{}.py &> /dev/null && export BASE_PORT=\$(python \$HOME/agent_{}.py 2> \$HOME/{}.log) && '.format(webapp_conn_string, task.uuid, task.uuid, task.uuid, task.uuid) - if setup_command: - run_command += setup_command + ' && ' - run_command += '\'export SINGULARITY_NOHTTPS=true && export SINGULARITYENV_BASE_PORT=\$BASE_PORT && {} '.format(authstring) - run_command += 'rm -rf /tmp/{}_data && mkdir -p /tmp/{}_data/tmp &>> \$HOME/{}.log && mkdir -p /tmp/{}_data/home &>> \$HOME/{}.log && chmod 700 /tmp/{}_data && '.format(task.uuid, task.uuid, task.uuid, task.uuid, task.uuid, task.uuid) - run_command += 'exec nohup singularity run {} --pid --writable-tmpfs --no-home --home=/home/metauser --workdir /tmp/{}_data/tmp -B/tmp/{}_data/home:/home --containall --cleanenv '.format(binds, task.uuid, task.uuid) - else: - run_command += ' : && ' # Trick to prevent some issues in exporting variables - if setup_command: - run_command += setup_command + ' && ' - run_command += 'export SINGULARITY_NOHTTPS=true && export SINGULARITYENV_BASE_PORT={} && {} '.format(task.port, authstring) - run_command += 'rm -rf /tmp/{}_data && mkdir -p /tmp/{}_data/tmp &>> \$HOME/{}.log && mkdir -p /tmp/{}_data/home &>> \$HOME/{}.log && chmod 700 /tmp/{}_data && '.format(task.uuid, task.uuid, task.uuid, task.uuid, task.uuid, task.uuid) - run_command += 'exec nohup singularity run {} --pid --writable-tmpfs --no-home --home=/home/metauser --workdir /tmp/{}_data/tmp -B/tmp/{}_data/home:/home --containall --cleanenv '.format(binds, task.uuid, task.uuid) - - # Set registry - if task.container.registry == 'docker_local': - raise Exception('This computing resource does not support local Docker registries yet') - # Get local Docker registry conn string - from.utils import get_local_docker_registry_conn_string - local_docker_registry_conn_string = get_local_docker_registry_conn_string() - registry = 'docker://{}/'.format(local_docker_registry_conn_string) - elif task.container.registry == 'docker_hub': - registry = 'docker://' - else: - raise NotImplementedError('Registry {} not supported'.format(task.container.registry)) - - run_command+='{}{} &>> \$HOME/{}.log & echo \$!\'"'.format(registry, task.container.image, task.uuid) - - else: - raise NotImplementedError('Container {} not supported'.format(task.container.type)) - - out = os_shell(run_command, capture=True) - if out.exit_code != 0: - raise Exception(out.stderr) - - # Log - logger.debug('Shell exec output: "{}"'.format(out)) - - - # Load back the task to avoid concurrency problems in the agent call - task_uuid = task.uuid - task = Task.objects.get(uuid=task_uuid) - - # Save pid echoed by the command above - task_pid = out.stdout - - # Set fields - task.status = TaskStatuses.running - task.pid = task_pid - task.ip = second_host - - # Save - task.save() - - - def _stop_task(self, task, **kwargs): - - # Get user keys - if self.computing.requires_user_keys: - user_keys = KeyPair.objects.get(user=task.user, default=True) - else: - raise NotImplementedError('Remote tasks not requiring keys are not yet supported') - - # Get computing params - first_host = self.computing.conf.get('first_host') - first_user = self.computing.conf.get('first_user') - second_host = self.computing.conf.get('second_host') - second_user = self.computing.conf.get('second_user') - - # Stop the task remotely - stop_command = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} '.format(user_keys.private_key_file, first_user, first_host) - stop_command += '"ssh -4 -o StrictHostKeyChecking=no {}@{} '.format(second_user, second_host) - stop_command += 'kill -9 {}"'.format(task.pid) - - out = os_shell(stop_command, capture=True) - if out.exit_code != 0: - if not 'No such process' in out.stderr: - raise Exception(out.stderr) - - # Set task as stopped - task.status = TaskStatuses.stopped - task.save() - - - def _get_task_log(self, task, **kwargs): - - # Get user keys - if self.computing.requires_user_keys: - user_keys = KeyPair.objects.get(user=task.user, default=True) - else: - raise NotImplementedError('Remote tasks not requiring keys are not yet supported') - - # Get computing params - first_host = self.computing.conf.get('first_host') - first_user = self.computing.conf.get('first_user') - second_host = self.computing.conf.get('second_host') - second_user = self.computing.conf.get('second_user') - - # View log remotely - view_log_command = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} '.format(user_keys.private_key_file, first_user, first_host) - view_log_command += '"ssh -4 -o StrictHostKeyChecking=no {}@{} '.format(second_user, second_host) - view_log_command += 'cat \\\\\\$HOME/{}.log"'.format(task.uuid) - - out = os_shell(view_log_command, capture=True) - if out.exit_code != 0: - raise Exception(out.stderr) - else: - return out.stdout +# TODO: rename the following as "ssh+ssh" access mode? Ore somethign similar? +# class RemotehopComputingManager(ComputingManager): +# +# def _start_task(self, task, **kwargs): +# logger.debug('Starting a remote task "{}"'.format(self.computing)) +# +# # Get computing params +# first_host = self.computing.conf.get('first_host') +# first_user = self.computing.conf.get('first_user') +# second_host = self.computing.conf.get('second_host') +# second_user = self.computing.conf.get('second_user') +# setup_command = self.computing.conf.get('setup_command') +# +# # TODO: De hard-code +# use_agent = False +# +# # Get user keys +# if self.computing.requires_user_keys: +# user_keys = KeyPair.objects.get(user=task.user, default=True) +# else: +# raise NotImplementedError('Remote tasks not requiring keys are not yet supported') +# +# # Get webapp conn string +# from.utils import get_webapp_conn_string +# webapp_conn_string = get_webapp_conn_string() +# +# # Run the container on the host (non blocking) +# if task.container.type == 'singularity': +# +# task.tid = task.uuid +# task.save() +# +# # Set pass if any +# if task.auth_pass: +# authstring = ' export SINGULARITYENV_AUTH_PASS={} && '.format(task.auth_pass) +# else: +# authstring = '' +# +# # Set binds, only from sys config if the resource is not owned by the user +# if self.computing.user != task.user: +# binds = self.computing.sys_conf.get('binds') +# else: +# binds = self.computing.conf.get('binds') +# if not binds: +# binds = '' +# else: +# binds = '-B {}'.format(binds) +# +# # Manage task extra binds +# if task.extra_binds: +# if not binds: +# binds = '-B {}'.format(task.extra_binds) +# else: +# binds += ',{}'.format(task.extra_binds) +# +# run_command = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} '.format(user_keys.private_key_file, first_user, first_host) +# run_command += '"ssh -4 -o StrictHostKeyChecking=no {}@{} /bin/bash -c \''.format(second_user, second_host) +# +# if use_agent: +# run_command += '\'wget {}/api/v1/base/agent/?task_uuid={} -O \$HOME/agent_{}.py &> /dev/null && export BASE_PORT=\$(python \$HOME/agent_{}.py 2> \$HOME/{}.log) && '.format(webapp_conn_string, task.uuid, task.uuid, task.uuid, task.uuid) +# if setup_command: +# run_command += setup_command + ' && ' +# run_command += '\'export SINGULARITY_NOHTTPS=true && export SINGULARITYENV_BASE_PORT=\$BASE_PORT && {} '.format(authstring) +# run_command += 'rm -rf /tmp/{}_data && mkdir -p /tmp/{}_data/tmp &>> \$HOME/{}.log && mkdir -p /tmp/{}_data/home &>> \$HOME/{}.log && chmod 700 /tmp/{}_data && '.format(task.uuid, task.uuid, task.uuid, task.uuid, task.uuid, task.uuid) +# run_command += 'exec nohup singularity run {} --pid --writable-tmpfs --no-home --home=/home/metauser --workdir /tmp/{}_data/tmp -B/tmp/{}_data/home:/home --containall --cleanenv '.format(binds, task.uuid, task.uuid) +# else: +# run_command += ' : && ' # Trick to prevent some issues in exporting variables +# if setup_command: +# run_command += setup_command + ' && ' +# run_command += 'export SINGULARITY_NOHTTPS=true && export SINGULARITYENV_BASE_PORT={} && {} '.format(task.port, authstring) +# run_command += 'rm -rf /tmp/{}_data && mkdir -p /tmp/{}_data/tmp &>> \$HOME/{}.log && mkdir -p /tmp/{}_data/home &>> \$HOME/{}.log && chmod 700 /tmp/{}_data && '.format(task.uuid, task.uuid, task.uuid, task.uuid, task.uuid, task.uuid) +# run_command += 'exec nohup singularity run {} --pid --writable-tmpfs --no-home --home=/home/metauser --workdir /tmp/{}_data/tmp -B/tmp/{}_data/home:/home --containall --cleanenv '.format(binds, task.uuid, task.uuid) +# +# # Set registry +# if task.container.registry == 'docker_local': +# raise Exception('This computing resource does not support local Docker registries yet') +# # Get local Docker registry conn string +# from.utils import get_local_docker_registry_conn_string +# local_docker_registry_conn_string = get_local_docker_registry_conn_string() +# registry = 'docker://{}/'.format(local_docker_registry_conn_string) +# elif task.container.registry == 'docker_hub': +# registry = 'docker://' +# else: +# raise NotImplementedError('Registry {} not supported'.format(task.container.registry)) +# +# run_command+='{}{} &>> \$HOME/{}.log & echo \$!\'"'.format(registry, task.container.image, task.uuid) +# +# else: +# raise NotImplementedError('Container {} not supported'.format(task.container.type)) +# +# out = os_shell(run_command, capture=True) +# if out.exit_code != 0: +# raise Exception(out.stderr) +# +# # Log +# logger.debug('Shell exec output: "{}"'.format(out)) +# +# +# # Load back the task to avoid concurrency problems in the agent call +# task_uuid = task.uuid +# task = Task.objects.get(uuid=task_uuid) +# +# # Save pid echoed by the command above +# task_pid = out.stdout +# +# # Set fields +# task.status = TaskStatuses.running +# task.pid = task_pid +# task.ip = second_host +# +# # Save +# task.save() +# +# +# def _stop_task(self, task, **kwargs): +# +# # Get user keys +# if self.computing.requires_user_keys: +# user_keys = KeyPair.objects.get(user=task.user, default=True) +# else: +# raise NotImplementedError('Remote tasks not requiring keys are not yet supported') +# +# # Get computing params +# first_host = self.computing.conf.get('first_host') +# first_user = self.computing.conf.get('first_user') +# second_host = self.computing.conf.get('second_host') +# second_user = self.computing.conf.get('second_user') +# +# # Stop the task remotely +# stop_command = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} '.format(user_keys.private_key_file, first_user, first_host) +# stop_command += '"ssh -4 -o StrictHostKeyChecking=no {}@{} '.format(second_user, second_host) +# stop_command += 'kill -9 {}"'.format(task.pid) +# +# out = os_shell(stop_command, capture=True) +# if out.exit_code != 0: +# if not 'No such process' in out.stderr: +# raise Exception(out.stderr) +# +# # Set task as stopped +# task.status = TaskStatuses.stopped +# task.save() +# +# +# def _get_task_log(self, task, **kwargs): +# +# # Get user keys +# if self.computing.requires_user_keys: +# user_keys = KeyPair.objects.get(user=task.user, default=True) +# else: +# raise NotImplementedError('Remote tasks not requiring keys are not yet supported') +# +# # Get computing params +# first_host = self.computing.conf.get('first_host') +# first_user = self.computing.conf.get('first_user') +# second_host = self.computing.conf.get('second_host') +# second_user = self.computing.conf.get('second_user') +# +# # View log remotely +# view_log_command = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} '.format(user_keys.private_key_file, first_user, first_host) +# view_log_command += '"ssh -4 -o StrictHostKeyChecking=no {}@{} '.format(second_user, second_host) +# view_log_command += 'cat \\\\\\$HOME/{}.log"'.format(task.uuid) +# +# out = os_shell(view_log_command, capture=True) +# if out.exit_code != 0: +# raise Exception(out.stderr) +# else: +# return out.stdout diff --git a/services/webapp/code/rosetta/core_app/management/commands/core_app_populate.py b/services/webapp/code/rosetta/core_app/management/commands/core_app_populate.py index 8bcdfd0..feadcdf 100644 --- a/services/webapp/code/rosetta/core_app/management/commands/core_app_populate.py +++ b/services/webapp/code/rosetta/core_app/management/commands/core_app_populate.py @@ -209,11 +209,12 @@ class Command(BaseCommand): print('Creating demo computing resources containers...') #============================== - # Local remote computing + # Demo Internal computing #============================== Computing.objects.create(user = None, - name = 'Local', - type = 'local', + name = 'Demo Internal', + type = 'singlenode', + access_method = 'internal', requires_sys_conf = False, requires_user_conf = False, requires_user_keys = False, @@ -222,32 +223,34 @@ class Command(BaseCommand): #============================== - # Demo remote computing + # Demo Single Node computing #============================== - demo_remote_auth_computing = Computing.objects.create(user = None, - name = 'Demo remote', - type = 'remote', - requires_sys_conf = True, - requires_user_conf = True, - requires_user_keys = True, - supports_docker = True, - supports_singularity = True) + demo_singlenode_computing = Computing.objects.create(user = None, + name = 'Demo Single Node', + type = 'singlenode', + access_method = 'ssh', + requires_sys_conf = True, + requires_user_conf = True, + requires_user_keys = True, + supports_docker = True, + supports_singularity = True) - ComputingSysConf.objects.create(computing = demo_remote_auth_computing, + ComputingSysConf.objects.create(computing = demo_singlenode_computing, data = {'host': 'slurmclusterworker-one', 'binds': '/shared/data/users:/shared/data/users,/shared/scratch:/shared/scratch'}) ComputingUserConf.objects.create(user = testuser, - computing = demo_remote_auth_computing, + computing = demo_singlenode_computing, data = {'user': 'slurmtestuser'}) #============================== - # Demo Slurm computing + # Demo Cluster computing #============================== demo_slurm_computing = Computing.objects.create(user = None, - name = 'Demo Slurm', - type = 'slurm', + name = 'Demo Cluster', + type = 'cluster', + access_method = 'slurm+ssh', requires_sys_conf = True, requires_user_conf = True, requires_user_keys = True, diff --git a/services/webapp/code/rosetta/core_app/migrations/0004_auto_20210408_1041.py b/services/webapp/code/rosetta/core_app/migrations/0004_auto_20210408_1041.py new file mode 100644 index 0000000..2df6120 --- /dev/null +++ b/services/webapp/code/rosetta/core_app/migrations/0004_auto_20210408_1041.py @@ -0,0 +1,30 @@ +# Generated by Django 2.2.1 on 2021-04-08 10:41 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('core_app', '0003_text'), + ] + + operations = [ + migrations.AddField( + model_name='computing', + name='access_method', + field=models.CharField(default='NA', max_length=255, verbose_name='Computing Access method'), + preserve_default=False, + ), + migrations.AlterField( + model_name='computingsysconf', + name='computing', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_sys_conf', to='core_app.Computing'), + ), + migrations.AlterField( + model_name='computinguserconf', + name='computing', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_user_conf', to='core_app.Computing'), + ), + ] diff --git a/services/webapp/code/rosetta/core_app/models.py b/services/webapp/code/rosetta/core_app/models.py index c374c39..3ec486f 100644 --- a/services/webapp/code/rosetta/core_app/models.py +++ b/services/webapp/code/rosetta/core_app/models.py @@ -139,6 +139,7 @@ class Computing(models.Model): name = models.CharField('Computing Name', max_length=255, blank=False, null=False) type = models.CharField('Computing Type', max_length=255, blank=False, null=False) + access_method = models.CharField('Computing Access method', max_length=255, blank=False, null=False) requires_sys_conf = models.BooleanField(default=False) requires_user_conf = models.BooleanField(default=False) @@ -147,6 +148,24 @@ class Computing(models.Model): supports_docker = models.BooleanField(default=False) supports_singularity = models.BooleanField(default=False) + @property + def type_str(self): + # TODO: improve me? + if self.type == 'cluster': + return 'Cluster' + elif self.type == 'singlenode': + return 'Single Node' + else: + raise ConsistencyException('Unknown computing resource type "{}"'.format(self.type)) + + @property + def access_method_str(self): + # TODO: improve me? + access_method = self.access_method + access_method = access_method.replace('ssh', 'SSH') + access_method = access_method.replace('slurm', 'Slurm') + return access_method + class Meta: ordering = ['name'] @@ -179,14 +198,14 @@ class Computing(models.Model): try: return self._manager except AttributeError: - if self.type == 'local': - self._manager = computing_managers.LocalComputingManager(self) - elif self.type == 'remote': - self._manager = computing_managers.RemoteComputingManager(self) - elif self.type == 'slurm': - self._manager = computing_managers.SlurmComputingManager(self) + if self.type == 'cluster' and self.access_method == 'slurm+ssh': + self._manager = computing_managers.SlurmSSHClusterComputingManager(self) + elif self.type == 'singlenode' and self.access_method == 'ssh': + self._manager = computing_managers.SSHSingleNodeComputingManager(self) + elif self.type == 'singlenode' and self.access_method == 'internal': + self._manager = computing_managers.InternalSingleNodeComputingManager(self) else: - raise ConsistencyException('Don\'t know how to instantiate a computing manager for computing resource of type "{}"'.format(self.type)) + raise ConsistencyException('Don\'t know how to instantiate a computing manager for computing resource of type "{}" and access mode "{}"'.format(self.type, self.access_method)) return self._manager diff --git a/services/webapp/code/rosetta/core_app/templates/components/computing.html b/services/webapp/code/rosetta/core_app/templates/components/computing.html index df1d1d4..a8e5284 100644 --- a/services/webapp/code/rosetta/core_app/templates/components/computing.html +++ b/services/webapp/code/rosetta/core_app/templates/components/computing.html @@ -24,13 +24,18 @@ <td>{{ data.computing.type }}</td> </tr> + <tr> + <td><b>Access method</b></td> + <td>{{ data.computing.access_method }}</td> + </tr> + <tr> <td><b>Owner</b></td> <td>{% if data.computing.user %}{{ data.computing.user }}{% else %}platform{% endif %}</td> </tr> <tr> - <td><b>Require</b></td> + <td><b>Requires</b></td> <td> Sys conf: {{ data.computing.requires_sys_conf }} <br/> User conf: {{ data.computing.requires_user_conf }} <br/> @@ -76,8 +81,9 @@ </div> <div style="padding:10px;"> - <b>Type:</b> {{ computing.type.title }}<br/> - <b>Owner:</b> {% if computing.user %}{{ computing.user }}{% else %}Platform{% endif %}<br/> + <b>Type:</b> {{ computing.type_str }}<br/> + <b>Access:</b> {{ computing.access_method_str }}<br/> + <!-- <b>Owner:</b> {% if computing.user %}{{ computing.user }}{% else %}Platform{% endif %}<br/> --> <b>Supports:</b> {% if computing.supports_docker %}Docker <img src="/static/img/docker-logo.svg" style="height:18px; width:18px; margin-bottom:2px" />{% endif %} {% if computing.supports_singularity %}Singularity <img src="/static/img/singularity-logo.svg" style="height:18px; width:18px; margin-bottom:2px" />{% endif %} -- GitLab