diff --git a/services/webapp/code/rosetta/core_app/api.py b/services/webapp/code/rosetta/core_app/api.py
index 0164a4c72e4d8ad3e6632a33b502c7378fd7241a..ea5415165eedab9600ccb2961fbd122dffd0d222 100644
--- a/services/webapp/code/rosetta/core_app/api.py
+++ b/services/webapp/code/rosetta/core_app/api.py
@@ -325,19 +325,19 @@ print(port)
             task.status = TaskStatuses.running
             task.interface_ip = task_interface_ip
             
-            # Get container runtime
-            container_runtime = None
+            # Get container engine
+            container_engine = None
             if task.computing_options:
-                container_runtime = task.computing_options.get('container_runtime', None)
-            if not container_runtime:
-                container_runtime = task.computing.default_container_runtime
+                container_engine = task.computing_options.get('container_engine', None)
+            if not container_engine:
+                container_engine = task.computing.default_container_engine
             
-            if container_runtime=='singularity':
+            if container_engine=='singularity':
                 # For Singularity, set this only if the container supports custom interface ports
                 if task.container.supports_custom_interface_port:
                     task.interface_port = int(task_interface_port)
             else:
-                # For all other container runtimes, set it in any case
+                # For all other container engines, set it in any case
                 task.interface_port = int(task_interface_port)
             
             # Save the task
diff --git a/services/webapp/code/rosetta/core_app/computing_managers.py b/services/webapp/code/rosetta/core_app/computing_managers.py
index 33277aac6d344e212c7e55e1b7d4ae42f19a1144..f065b319329d016474e0d0ff75915ac22019b6c2 100644
--- a/services/webapp/code/rosetta/core_app/computing_managers.py
+++ b/services/webapp/code/rosetta/core_app/computing_managers.py
@@ -188,15 +188,15 @@ class SSHStandaloneComputingManager(StandaloneComputingManager, SSHComputingMana
         from.utils import get_webapp_conn_string
         webapp_conn_string = get_webapp_conn_string()
             
-        # Handle container runtime
-        container_runtime = None
+        # Handle container engine
+        container_engine = None
         if task.computing_options:
-            container_runtime = task.computing_options.get('container_runtime', None)
-        if not container_runtime:
-            container_runtime = task.computing.default_container_runtime
+            container_engine = task.computing_options.get('container_engine', None)
+        if not container_engine:
+            container_engine = task.computing.default_container_engine
 
-        # Runtime-specific part 
-        if container_runtime == 'singularity':
+        # engine-specific part 
+        if container_engine == 'singularity':
 
             #if not task.container.supports_custom_interface_port:
             #     raise Exception('This task does not support dynamic port allocation and is therefore not supported using singularity on Slurm')
@@ -248,7 +248,7 @@ class SSHStandaloneComputingManager(StandaloneComputingManager, SSHComputingMana
             run_command+='docker://{}/{}:{} &>> /tmp/{}_data/task.log & echo \$!"\''.format(task.container.registry, task.container.image_name, task.container.image_tag, task.uuid)
 
 
-        elif container_runtime in ['docker', 'podman']:
+        elif container_engine in ['docker', 'podman']:
 
             # Set pass if any
             authstring = ''
@@ -288,20 +288,20 @@ class SSHStandaloneComputingManager(StandaloneComputingManager, SSHComputingMana
                         binds += ' -v{}:{}'.format(expanded_base_path, expanded_bind_path)
             
             # TODO: remove this hardcoding
-            prefix = 'sudo' if (computing_host == 'slurmclusterworker' and container_runtime=='docker') else ''
+            prefix = 'sudo' if (computing_host == 'slurmclusterworker' and container_engine=='docker') else ''
             
             run_command  = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} '.format(computing_keys.private_key_file, computing_user, computing_host)
             run_command += '/bin/bash -c \'"rm -rf /tmp/{}_data && mkdir /tmp/{}_data && chmod 700 /tmp/{}_data && '.format(task.uuid, task.uuid, task.uuid) 
             run_command += 'wget {}/api/v1/base/agent/?task_uuid={} -O /tmp/{}_data/agent.py &> /dev/null && export TASK_PORT=\$(python /tmp/{}_data/agent.py 2> /tmp/{}_data/task.log) && '.format(webapp_conn_string, task.uuid, task.uuid, task.uuid, task.uuid)
-            run_command += '{} {} run -p \$TASK_PORT:{} {} {} '.format(prefix, container_runtime, task.container.interface_port, authstring, binds)        
-            if container_runtime == 'podman':
+            run_command += '{} {} run -p \$TASK_PORT:{} {} {} '.format(prefix, container_engine, task.container.interface_port, authstring, binds)        
+            if container_engine == 'podman':
                 run_command += '--network=private --uts=private '
             #run_command += '-d -t {}/{}:{}'.format(task.container.registry, task.container.image_name, task.container.image_tag)
             run_command += '-h task-{} -d -t {}/{}:{}'.format(task.short_uuid, task.container.registry, task.container.image_name, task.container.image_tag)
             run_command += '"\''
             
         else:
-            raise NotImplementedError('Container runtime {} not supported'.format(container_runtime))
+            raise NotImplementedError('Container engine {} not supported'.format(container_engine))
 
         out = os_shell(run_command, capture=True)
         if out.exit_code != 0:
@@ -326,21 +326,21 @@ class SSHStandaloneComputingManager(StandaloneComputingManager, SSHComputingMana
         # Get credentials
         computing_user, computing_host, computing_keys = get_ssh_access_mode_credentials(self.computing, task.user)
 
-        # Handle container runtime
-        container_runtime = None
+        # Handle container engine
+        container_engine = None
         if task.computing_options:
-            container_runtime = task.computing_options.get('container_runtime', None)
-        if not container_runtime:
-            container_runtime = task.computing.default_container_runtime
+            container_engine = task.computing_options.get('container_engine', None)
+        if not container_engine:
+            container_engine = task.computing.default_container_engine
 
-        if container_runtime=='singularity':
+        if container_engine=='singularity':
             internal_stop_command = 'kill -9 {}'.format(task.id)            
-        elif container_runtime in ['docker', 'podman']:
+        elif container_engine in ['docker', 'podman']:
             # TODO: remove this hardcoding
-            prefix = 'sudo' if (computing_host == 'slurmclusterworker' and container_runtime=='docker') else ''
-            internal_stop_command = '{} {} stop {} && {} {} rm {}'.format(prefix,container_runtime,task.id,prefix,container_runtime,task.id)
+            prefix = 'sudo' if (computing_host == 'slurmclusterworker' and container_engine=='docker') else ''
+            internal_stop_command = '{} {} stop {} && {} {} rm {}'.format(prefix,container_engine,task.id,prefix,container_engine,task.id)
         else:
-            raise NotImplementedError('Container runtime {} not supported'.format(container_runtime))
+            raise NotImplementedError('Container engine {} not supported'.format(container_engine))
 
         stop_command = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} \'/bin/bash -c "{}"\''.format(computing_keys.private_key_file, computing_user, computing_host, internal_stop_command)
         out = os_shell(stop_command, capture=True)
@@ -360,21 +360,21 @@ class SSHStandaloneComputingManager(StandaloneComputingManager, SSHComputingMana
         # Get credentials
         computing_user, computing_host, computing_keys = get_ssh_access_mode_credentials(self.computing, task.user)
         
-        # Handle container runtime
-        container_runtime = None
+        # Handle container engine
+        container_engine = None
         if task.computing_options:
-            container_runtime = task.computing_options.get('container_runtime', None)
-        if not container_runtime:
-            container_runtime = task.computing.default_container_runtime
+            container_engine = task.computing_options.get('container_engine', None)
+        if not container_engine:
+            container_engine = task.computing.default_container_engine
 
-        if container_runtime=='singularity':
+        if container_engine=='singularity':
             internal_view_log_command = 'cat /tmp/{}_data/task.log'.format(task.uuid)            
-        elif container_runtime in ['docker','podman']:
+        elif container_engine in ['docker','podman']:
             # TODO: remove this hardcoding
-            prefix = 'sudo' if (computing_host == 'slurmclusterworker' and container_runtime=='docker') else ''
-            internal_view_log_command = '{} {} logs {}'.format(prefix,container_runtime,task.id)
+            prefix = 'sudo' if (computing_host == 'slurmclusterworker' and container_engine=='docker') else ''
+            internal_view_log_command = '{} {} logs {}'.format(prefix,container_engine,task.id)
         else:
-            raise NotImplementedError('Container runtime {} not supported'.format(container_runtime))
+            raise NotImplementedError('Container engine {} not supported'.format(container_engine))
             
         # Prepare full comand
         view_log_command = 'ssh -o LogLevel=ERROR -i {} -4 -o StrictHostKeyChecking=no {}@{} \'/bin/bash -c "{}"\''.format(computing_keys.private_key_file, computing_user, computing_host, internal_view_log_command)
@@ -419,15 +419,15 @@ class SlurmSSHClusterComputingManager(ClusterComputingManager, SSHComputingManag
         # Set output and error files
         sbatch_args += ' --output=\$HOME/{}.log --error=\$HOME/{}.log '.format(task.uuid, task.uuid)
 
-        # Handle container runtime
-        container_runtime = None
+        # Handle container engine
+        container_engine = None
         if task.computing_options:
-            container_runtime = task.computing_options.get('container_runtime', None)
-        if not container_runtime:
-            container_runtime = task.computing.default_container_runtime
+            container_engine = task.computing_options.get('container_engine', None)
+        if not container_engine:
+            container_engine = task.computing.default_container_engine
 
-        # Runtime-specific part 
-        if container_runtime == 'singularity':
+        # engine-specific part 
+        if container_engine == 'singularity':
 
             #if not task.container.supports_custom_interface_port:
             #     raise Exception('This task does not support dynamic port allocation and is therefore not supported using singularity on Slurm')
@@ -479,7 +479,7 @@ class SlurmSSHClusterComputingManager(ClusterComputingManager, SSHComputingManag
             run_command+='docker://{}/{}:{} &> \$HOME/{}.log\\" > \$HOME/{}.sh && sbatch {} \$HOME/{}.sh"\''.format(task.container.registry, task.container.image_name, task.container.image_tag, task.uuid, task.uuid, sbatch_args, task.uuid)
 
         else:
-            raise NotImplementedError('Container runtime {} not supported'.format(container_runtime))
+            raise NotImplementedError('Container engine {} not supported'.format(container_engine))
 
         out = os_shell(run_command, capture=True)
         if out.exit_code != 0:
diff --git a/services/webapp/code/rosetta/core_app/management/commands/core_app_populate.py b/services/webapp/code/rosetta/core_app/management/commands/core_app_populate.py
index af838700a3d422d8f6b4a500c0f513e1e5d7ec93..2ed44a31aff2095e1315a46d10ee47bfed60721e 100644
--- a/services/webapp/code/rosetta/core_app/management/commands/core_app_populate.py
+++ b/services/webapp/code/rosetta/core_app/management/commands/core_app_populate.py
@@ -267,7 +267,7 @@ to provide help, news and informations on your deployment. Or you can just ignor
                                      access_mode = 'internal',
                                      auth_mode = 'internal',
                                      wms = None,
-                                     container_runtimes = ['docker'])
+                                     container_engines = ['docker'])
 
             
             # Demo standalone computing plus conf
@@ -280,7 +280,7 @@ to provide help, news and informations on your deployment. Or you can just ignor
                                                                  auth_mode = 'user_keys',
                                                                  wms = None,
                                                                  conf = {'host': 'standaloneworker'},
-                                                                 container_runtimes = ['singularity','podman'])
+                                                                 container_engines = ['singularity','podman'])
     
             # Add testuser extra conf for this computing resource
             testuser.profile.add_extra_conf(conf_type = 'computing_user', object=demo_singlenode_computing, value= 'testuser')
@@ -295,7 +295,7 @@ to provide help, news and informations on your deployment. Or you can just ignor
                                                             auth_mode = 'user_keys',
                                                             wms = 'slurm',
                                                             conf = {'host': 'slurmclustermaster', 'default_partition': 'partition1'},
-                                                            container_runtimes = ['singularity'])
+                                                            container_engines = ['singularity'])
            
             # Add testuser extra conf for this computing resource
             testuser.profile.add_extra_conf(conf_type = 'computing_user', object=demo_slurm_computing, value= 'slurmtestuser')
diff --git a/services/webapp/code/rosetta/core_app/migrations/0029_auto_20211218_2354.py b/services/webapp/code/rosetta/core_app/migrations/0029_auto_20211218_2354.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8d9887c32a848d565e49fb4bb0a745c551dba87
--- /dev/null
+++ b/services/webapp/code/rosetta/core_app/migrations/0029_auto_20211218_2354.py
@@ -0,0 +1,18 @@
+# Generated by Django 2.2.1 on 2021-12-18 23:54
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core_app', '0028_computing_arch'),
+    ]
+
+    operations = [
+        migrations.RenameField(
+            model_name='computing',
+            old_name='container_runtimes',
+            new_name='container_engines',
+        ),
+    ]
diff --git a/services/webapp/code/rosetta/core_app/migrations/0030_auto_20211218_2355.py b/services/webapp/code/rosetta/core_app/migrations/0030_auto_20211218_2355.py
new file mode 100644
index 0000000000000000000000000000000000000000..13b707a10c7288bb288bd1636ef61fbd0b7a9271
--- /dev/null
+++ b/services/webapp/code/rosetta/core_app/migrations/0030_auto_20211218_2355.py
@@ -0,0 +1,19 @@
+# Generated by Django 2.2.1 on 2021-12-18 23:55
+
+import django.contrib.postgres.fields.jsonb
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core_app', '0029_auto_20211218_2354'),
+    ]
+
+    operations = [
+        migrations.AlterField(
+            model_name='computing',
+            name='container_engines',
+            field=django.contrib.postgres.fields.jsonb.JSONField(verbose_name='Container engines/runtimes'),
+        ),
+    ]
diff --git a/services/webapp/code/rosetta/core_app/models.py b/services/webapp/code/rosetta/core_app/models.py
index f98c409341308670e52021a959663c9ef52215db..061bb2656f45b0cbbe52ff361cc8914412c5a4a7 100644
--- a/services/webapp/code/rosetta/core_app/models.py
+++ b/services/webapp/code/rosetta/core_app/models.py
@@ -192,8 +192,8 @@ class Computing(models.Model):
     auth_mode   = models.CharField('Auth mode', max_length=36, blank=False, null=False)
     wms         = models.CharField('Workload management system', max_length=36, blank=True, null=True)
     
-    # Supported container runtimes ['docker', 'singularity']
-    container_runtimes = JSONField('Container runtimes', blank=False, null=False)
+    # Supported container engines (e.g. ['docker', 'singularity', 'podman'])
+    container_engines = JSONField('Container engines/runtimes', blank=False, null=False)
     #container_runtime = models.CharField('Container runtimes', max_length=256, blank=False, null=False)
  
     # Supported architectures (i.e. 386 for amd64), as list: ['386']
@@ -227,8 +227,8 @@ class Computing(models.Model):
         return color_map[color_map_index]
 
     @property
-    def default_container_runtime(self):
-        return self.container_runtimes[0]
+    def default_container_engine(self):
+        return self.container_engines[0]
     
 
     #=======================
diff --git a/services/webapp/code/rosetta/core_app/templates/components/computing.html b/services/webapp/code/rosetta/core_app/templates/components/computing.html
index 1e6935f480c647664433b1805d61aa7f0cc6fc28..6c55b09e3c091d824a8d30f8cab75ded93c76545 100644
--- a/services/webapp/code/rosetta/core_app/templates/components/computing.html
+++ b/services/webapp/code/rosetta/core_app/templates/components/computing.html
@@ -45,8 +45,8 @@
        </tr>
 
        <tr>
-        <td><b>Container runtimes</b></td>
-        <td>{{ data.computing.container_runtimes }}</td>
+        <td><b>Container engines</b></td>
+        <td>{{ data.computing.container_engines }}</td>
        </tr>
 
        <tr>
@@ -110,10 +110,10 @@
         </div>
         <!-- ><b>Access:</b> {{ computing.access_mode }}<br/> --
         <!-- <b>Owner:</b> {% if computing.user %}{{ computing.user }}{% else %}Platform{% endif %}<br/> -->
-        <!--  <b>Runtimes:</b> {{ computing.container_runtimes }} -->
+        <!--  <b>Engines:</b> {{ computing.container_engines }} -->
         
-        <!-- {% if 'docker' in computing.container_runtimes %}<img src="/static/img/docker-logo.svg" style="height:18px; width:18px; margin-bottom:2px" />{% endif %}
-        {% if 'singularity' in computing.container_runtimes %}<img src="/static/img/singularity-logo.svg" style="height:18px; width:18px; margin-bottom:2px" />{% endif %}-->
+        <!-- {% if 'docker' in computing.container_engines %}<img src="/static/img/docker-logo.svg" style="height:18px; width:18px; margin-bottom:2px" />{% endif %}
+        {% if 'singularity' in computing.container_engines %}<img src="/static/img/singularity-logo.svg" style="height:18px; width:18px; margin-bottom:2px" />{% endif %}-->
         {% if container %}
         <div style="margin:0px; margin-top:2px; text-align:center; padding:5px">
         <form action="/new_task/" method="POST">
diff --git a/services/webapp/code/rosetta/core_app/templates/new_task.html b/services/webapp/code/rosetta/core_app/templates/new_task.html
index 991c902cb115d1ba9c8a4b01ac0a08f9964d8875..16aed722728a7ceb239121d41a38566a7445950d 100644
--- a/services/webapp/code/rosetta/core_app/templates/new_task.html
+++ b/services/webapp/code/rosetta/core_app/templates/new_task.html
@@ -128,13 +128,13 @@
            </tr>
            {% endif %}
 
-           {% if data.task_computing.container_runtimes|length > 1 %}
+           {% if data.task_computing.container_engines|length > 1 %}
            <tr>
-            <td><b>Container&nbsp;runtime</b></td><td>
-             <select name="container_runtime" >
+            <td><b>Container&nbsp;engine</b></td><td>
+             <select name="container_engine" >
              <option value="" selected>Default</option>
-             {% for container_runtime in data.task_computing.container_runtimes %}
-             <option value="{{container_runtime}}">{{container_runtime}}</option>
+             {% for container_engine in data.task_computing.container_engines %}
+             <option value="{{container_engine}}">{{container_engine}}</option>
              {% endfor %}
              </select>
             </td>
@@ -161,9 +161,9 @@
           </table>
           
           
-          {% if data.task_computing.default_container_runtime == 'singularity' and not data.task_container.supports_custom_interface_port %}
+          {% if data.task_computing.default_container_engine == 'singularity' and not data.task_container.supports_custom_interface_port %}
           <div> <p style="font-size:15px; max-width:700px; margin-bottom:20px; margin-left:5px">
-          <i class="fa fa-exclamation-triangle" style="color:orange"></i> This container does not support custom interface ports and the computing resource you selected might use a container runtime which does not support port mapping (Singularity). In this case, if the container interface port is already allocated, the task will fail to start.
+          <i class="fa fa-exclamation-triangle" style="color:orange"></i> This container does not support custom interface ports and the computing resource you selected might use a container engine/runtime which does not support port mapping. In this case, if the container interface port is already allocated, the task will fail to start.
           </p></div>
           {% endif %}
 
@@ -175,7 +175,7 @@
 
           {% if data.arch_auto_selection %}
           <div> <p style="font-size:15px; max-width:700px; margin-bottom:20px; margin-left:5px">
-          <i class="fa fa-exclamation-triangle" style="color:orange"></i> The selected software container does not specify any architecture. This will leave to the container runtime either to auto-select the right image architecture on the registry, or to fallback on emulation if not found. Beware of potential incompatibilities or slowdowns.
+          <i class="fa fa-exclamation-triangle" style="color:orange"></i> The selected software container does not specify any architecture. This will leave to the container engine/runtime either to auto-select the right image architecture on the registry, or to fallback on emulation if not found. Beware of potential incompatibilities or slowdowns.
           </p></div>
           {% endif %}
           
diff --git a/services/webapp/code/rosetta/core_app/views.py b/services/webapp/code/rosetta/core_app/views.py
index b8927d45ad6fe4d46295d62b8bbde15a669eeb27..4605b3cdde4b554cb63c4fa8a62e706ffec30fbf 100644
--- a/services/webapp/code/rosetta/core_app/views.py
+++ b/services/webapp/code/rosetta/core_app/views.py
@@ -515,21 +515,38 @@ def new_task(request):
         data['task_computing'] = get_task_computing(request)
         
         # Check that container required architecture is compatible with the computing resource
-        # TODO: support setting the container runtime when creating the task
+        # TODO: support setting the container engine/engine when creating the task
         # TODO: refactor and unroll this code
         if data['task_computing'].supported_archs is None: data['task_computing'].supported_archs=[]
         if data['task_computing'].emulated_archs is None: data['task_computing'].emulated_archs={}
+        data['arch_emulation'] = False
         
         if data['task_container'].image_arch:
             if (data['task_container'].image_arch != data['task_computing'].arch) and (data['task_container'].image_arch not in data['task_computing'].supported_archs):
 
+                # Does container engines/engines support emulated archs?
                 if data['task_computing'].emulated_archs:
-                    container_runtime = data['task_computing'].container_runtimes[0]
+                                        
+                    # For now by default our container engine is the first one
+                    container_engine = data['task_computing'].container_engines[0]
                     
-                    if container_runtime in data['task_computing'].emulated_archs and data['task_container'].image_arch in data['task_computing'].emulated_archs[container_runtime]:
+                    # Check for emulation against the engine
+                    if container_engine in data['task_computing'].emulated_archs and data['task_container'].image_arch in data['task_computing'].emulated_archs[container_engine]:
                         data['arch_emulation'] = True
+
+                    # Check for emulation against the engine
+                    def get_engines(container_engine):
+                        if not '[' in container_engine:
+                            return None
+                        else:
+                            container_engines = container_engine.split('[')[1].replace(']','').split(',')
+                            return container_engines
                         
-                    else:
+                    for container_engine in get_engines(container_engine):
+                        if container_engine in data['task_computing'].emulated_archs and data['task_container'].image_arch in data['task_computing'].emulated_archs[container_engine]:
+                            data['arch_emulation'] = True
+
+                    if not data['arch_emulation']:
                         raise ErrorMessage('This computing resource does not support architecture \'{}\' nor as native or emulated'.format(data['task_container'].image_arch))
                 
                 else:
@@ -611,12 +628,12 @@ def new_task(request):
         # Computing options
         computing_options = {}
         
-        # Container runtime if any set
-        container_runtime = request.POST.get('container_runtime', None)
-        if container_runtime:
-            if not container_runtime in data['task_computing'].container_runtimes:
-                raise ErrorMessage('Unknown container runtime  "{}"'.format(container_runtime))
-            computing_options['container_runtime'] = container_runtime
+        # Container engine if any set
+        container_engine = request.POST.get('container_engine', None)
+        if container_engine:
+            if not container_engine in data['task_computing'].container_engines:
+                raise ErrorMessage('Unknown container engine "{}"'.format(container_engine))
+            computing_options['container_engine'] = container_engine
 
         # CPUs, memory and partition if set 
         computing_cpus = request.POST.get('computing_cpus', None)