diff --git a/services/webapp/code/rosetta/core_app/computing_managers.py b/services/webapp/code/rosetta/core_app/computing_managers.py
index 1e4dfa5de7138b5031558d369b46a3d4a362752b..c0fb66363edb9cfc6a30db69da3d4ef05b6883ab 100644
--- a/services/webapp/code/rosetta/core_app/computing_managers.py
+++ b/services/webapp/code/rosetta/core_app/computing_managers.py
@@ -73,7 +73,7 @@ class ComputingManager(object):
         return self._get_task_log(task, **kwargs)
 
 
-class SingleNodeComputingManager(ComputingManager):
+class StandaloneComputingManager(ComputingManager):
     pass
 
 
@@ -87,7 +87,7 @@ class SSHComputingManager(ComputingManager):
 
 
 
-class InternalSingleNodeComputingManager(SingleNodeComputingManager):
+class InternalStandaloneComputingManager(StandaloneComputingManager):
     
     def _start_task(self, task):
 
@@ -176,7 +176,7 @@ class InternalSingleNodeComputingManager(SingleNodeComputingManager):
 
 
 
-class SSHSingleNodeComputingManager(SingleNodeComputingManager, SSHComputingManager):
+class SSHStandaloneComputingManager(StandaloneComputingManager, SSHComputingManager):
     
     def _start_task(self, task, **kwargs):
         logger.debug('Starting a remote task "{}"'.format(self.computing))
@@ -188,8 +188,11 @@ class SSHSingleNodeComputingManager(SingleNodeComputingManager, SSHComputingMana
         from.utils import get_webapp_conn_string
         webapp_conn_string = get_webapp_conn_string()
             
-        # Handle container runtime 
-        if self.computing.default_container_runtime == 'singularity':
+        # Handle container runtime
+        container_runtime = task.computing_options.get('container_runtime', task.computing.default_container_runtime)
+
+        # Runtime-specific part 
+        if container_runtime == 'singularity':
 
             #if not task.container.supports_custom_interface_port:
             #     raise Exception('This task does not support dynamic port allocation and is therefore not supported using singularity on Slurm')
@@ -327,8 +330,11 @@ class SlurmSSHClusterComputingManager(ClusterComputingManager, SSHComputingManag
         # Set output and error files
         sbatch_args += ' --output=\$HOME/{}.log --error=\$HOME/{}.log '.format(task.uuid, task.uuid)
 
-        # Submit the job
-        if task.computing.default_container_runtime == 'singularity':
+        # Handle container runtime
+        container_runtime = task.computing_options.get('container_runtime', task.computing.default_container_runtime)
+
+        # Runtime-specific part 
+        if container_runtime == 'singularity':
 
             #if not task.container.supports_custom_interface_port:
             #     raise Exception('This task does not support dynamic port allocation and is therefore not supported using singularity on Slurm')
diff --git a/services/webapp/code/rosetta/core_app/models.py b/services/webapp/code/rosetta/core_app/models.py
index 968a30340995c963915e9137ce316dc384a0d3b0..1415994bc67fa7bf09549025adc816328ac506f8 100644
--- a/services/webapp/code/rosetta/core_app/models.py
+++ b/services/webapp/code/rosetta/core_app/models.py
@@ -239,19 +239,19 @@ class Computing(models.Model):
     def manager(self):
         from . import computing_managers
         
-        # Instantiate the computing manager based on type (if not already done)
+        # Hash table mapping
+        managers_mapping = {}
+        managers_mapping['cluster'+'ssh+cli'+'user_keys'+'slurm'] = computing_managers.SlurmSSHClusterComputingManager
+        managers_mapping['standalone'+'ssh+cli'+'user_keys'+'None'] = computing_managers.SSHStandaloneComputingManager
+        managers_mapping['standalone'+'internal'+'internal'+'None'] = computing_managers.InternalStandaloneComputingManager
+        
+        # Instantiate the computing manager and return (if not already done)
         try:
             return self._manager
         except AttributeError:
-            if self.type == 'cluster' and self.access_mode == 'ssh+cli' and self.auth_mode == 'user_keys' and self.wms == 'slurm':
-                self._manager = computing_managers.SlurmSSHClusterComputingManager(self)
-            elif self.type == 'standalone' and self.access_mode == 'ssh+cli' and self.auth_mode == 'user_keys' and self.wms is None:
-                self._manager = computing_managers.SSHSingleNodeComputingManager(self)
-            elif self.type == 'standalone' and self.access_mode == 'internal' and self.auth_mode == 'internal' and self.wms is None:
-                self._manager = computing_managers.InternalSingleNodeComputingManager(self)
-            else:
-                raise ConsistencyException('Don\'t know how to instantiate a computing manager for computing resource of type "{}", access mode "{}" and WMS "{}"'.format(self.type, self.access_mode, self.wms))
+            self._manager = managers_mapping[self.type+self.access_mode+self.auth_mode+str(self.wms)](self)
             return self._manager
+
     
 
 #=========================