Skip to content
Snippets Groups Projects
Commit ca0a98b3 authored by Stefano Alberto Russo's avatar Stefano Alberto Russo
Browse files

First codebase push. Edited README accordingly.

parent 8ff7a024
Branches
Tags
No related merge requests found
Showing
with 893 additions and 0 deletions
#!/bin/sh
cd /metauser
unset SESSION_MANAGER
unset DBUS_SESSION_BUS_ADDRESS
OS=`uname -s`
if [ $OS = 'Linux' ]; then
case "$WINDOWMANAGER" in
*gnome*)
if [ -e /etc/SuSE-release ]; then
PATH=$PATH:/opt/gnome/bin
export PATH
fi
;;
esac
fi
if [ -x /etc/X11/xinit/xinitrc ]; then
exec /etc/X11/xinit/xinitrc
fi
if [ -f /etc/X11/xinit/xinitrc ]; then
exec sh /etc/X11/xinit/xinitrc
fi
[ -r $HOME/.Xresources ] && xrdb $HOME/.Xresources
#xsetroot -solid grey
xterm -geometry 80x24+10+10 -ls -title "$VNCDESKTOP Desktop" &
#twm &
fluxbox &
#!/bin/bash
docker run -v$PWD/:/data -p5900:5900 -p8590:8590 -it sarusso/metadesktop
version: '3'
services:
slurmclustermaster-main:
image: "rosetta/slurmclustermaster"
container_name: slurmclustermaster-main
hostname: slurmclustermaster-main
environment:
- SAFEMODE=False
privileged: true
volumes:
- ./data/shared:/shared
# - ./data/singularity_cache:/rosetta/.singularity/cache # Not working, check permissions...
ports:
- "8590:8590"
- "5900:5900"
slurmclusterworker-one:
image: "rosetta/slurmclusterworker"
container_name: slurmclusterworker-one
hostname: slurmclusterworker-one
environment:
- SAFEMODE=False
privileged: true
volumes:
- ./data/shared:/shared
dregistry:
container_name: dregistry
hostname: dregistry
image: "rosetta/dregistry"
volumes:
- ./data/dregistry:/var/lib/registry
ports:
- "5000:5000"
#!/bin/bash
set -e
# Build
cd ../Software/MetaDesktop
docker build . -t rosetta/metadesktop
cd ../../
# Tag
docker tag rosetta/metadesktop localhost:5000/rosetta/metadesktop
# Push
docker push localhost:5000/rosetta/metadesktop
# Run
rosetta/shell slurmclustermaster-main "SINGULARITY_NOHTTPS=true singularity run --writable-tmpfs --containall --cleanenv docker://dregistry:5000/rosetta/metadesktop"
# Run variants/tests
# rosetta/shell slurmclustermaster-main "SINGULARITY_NOHTTPS=true singularity run docker://dregistry:5000/rosetta/metadesktop"
# rosetta/shell slurmclustermaster-main "rm -rf tmp && mkdir tmp && SINGULARITYENV_HOME=/metauser SINGULARITY_NOHTTPS=true singularity run -B ./tmp:/tmp,./tmp:/metauser --writable-tmpfs --containall --cleanenv docker://dregistry:5000/rosetta/metadesktop"
cd examples
FROM ubuntu:18.04
MAINTAINER Stefano Alberto Russo <stefano.russo@gmail.com>
#----------------------
# Basics
#----------------------
# Set non-interactive
ENV DEBIAN_FRONTEND noninteractive
# Update
RUN apt-get update
# Utilities
RUN apt-get install -y nano telnet unzip wget supervisor openssh-server
# Devel
RUN apt-get install -y build-essential python-dev git-core
# Java
RUN apt-get install -y openjdk-8-jre
# IP utilities (mandatory for DNS!)
RUN apt-get install net-tools iproute2 iputils-ping -y
#------------------------
# Scienceuser user
#------------------------
# Add group. We chose GID 65527 to try avoiding conflicts.
RUN groupadd -g 65527 rosetta
# Add user. We chose UID 65527 to try avoiding conflicts.
RUN useradd rosetta -d /rosetta -u 65527 -g 65527 -m -s /bin/bash
# Add rosetta user to sudoers
RUN adduser rosetta sudo
# Keys
RUN mkdir /rosetta/.ssh
COPY keys/authorized_keys /rosetta/.ssh/
COPY keys/id_rsa /rosetta/.ssh/
RUN chmod 0600 /rosetta/.ssh/id_rsa
COPY keys/id_rsa.pub /rosetta/.ssh/
RUN chown -R rosetta:rosetta /rosetta/.ssh
# Install suodo
RUN apt-get install sudo -y
# No pass sudo (for everyone, actually)
COPY sudoers /etc/sudoers
# bash_profile for loading correct env (/env.sh created by entrypoint.sh)
RUN echo "source /env.sh" > /rosetta/.bash_profile
#------------------------
# Data, Logs and opt dirs
#------------------------
# Create dirs
RUN mkdir /data && mkdir /opt/rosetta && mkdir /var/log/rosetta
# Give right permissions
RUN chown -R rosetta:rosetta /data && chown -R rosetta:rosetta /opt/rosetta && chown -R rosetta:rosetta /var/log/rosetta
#----------------------
# Supervisord conf
#----------------------
COPY supervisord.conf /etc/supervisor/
#----------------------
# SSH conf
#----------------------
RUN mkdir /var/run/sshd && chmod 0755 /var/run/sshd
COPY supervisord_sshd.conf /etc/supervisor/conf.d/
#----------------------
# Prestartup scripts
#----------------------
# Create dir for prestartup scripts and copy main script
RUN mkdir /prestartup
COPY prestartup.py /
#----------------------
# Singularity
#----------------------
# Dependencies
RUN apt-get update && apt-get install -y \
build-essential \
libssl-dev \
uuid-dev \
libgpgme11-dev \
squashfs-tools \
libseccomp-dev \
pkg-config \
cryptsetup-bin \
wget
# Install GO
RUN cd /tmp && wget https://dl.google.com/go/go1.11.linux-amd64.tar.gz
RUN cd /tmp && tar -zxf go1.11.linux-amd64.tar.gz && mv go /usr/local
ENV GOROOT=/usr/local/go
ENV GOPATH=/root/go
ENV PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
# Install Singularity
RUN mkdir -p /usr/local/var/singularity/mnt && \
mkdir -p $GOPATH/src/github.com/sylabs && \
cd $GOPATH/src/github.com/sylabs && \
wget https://github.com/sylabs/singularity/releases/download/v3.4.1/singularity-3.4.1.tar.gz && \
tar -xzvf singularity-3.4.1.tar.gz
RUN cd $GOPATH/src/github.com/sylabs/singularity && \
./mconfig -p /usr/local && \
make -C builddir && \
make -C builddir install
# Build test image
RUN mkdir /singularity_images && chmod 777 /singularity_images
COPY testimage.def /singularity_images/testimage.def
RUN singularity build /singularity_images/testimage.simg /singularity_images/testimage.def
#----------------------
# Entrypoint
#----------------------
# Copy entrypoint
COPY entrypoint.sh /
# Give right permissions
RUN chmod 755 /entrypoint.sh
# Set entrypoint
ENTRYPOINT ["/entrypoint.sh"]
#!/bin/bash
# Exit on any error. More complex thing could be done in future
# (see https://stackoverflow.com/questions/4381618/exit-a-script-on-error)
set -e
echo ""
echo "[INFO] Executing entrypoint..."
#---------------------
# Persistency
#---------------------
echo "[INFO] Handling safe persistency"
if [ "x$SAFE_PERSISTENCY" == "xTrue" ]; then
echo "[INFO] Safe persistency set"
if [ ! -f /safe_persistent/persistent.img ]; then
truncate -s 10G /safe_persistent/persistent.img
mkfs.ext4 -F /safe_persistent/persistent.img
fi
mkdir /persistent
mount -oloop /safe_persistent/persistent.img /persistent
fi
echo "[INFO] Handling persistency"
# If persistent data:
if [ "x$PERSISTENT_DATA" == "xTrue" ]; then
echo "[INFO] Persistent data set"
if [ ! -f /persistent/data/.persistent_initialized ]; then
mv /data /persistent/data
ln -s /persistent/data /data
touch /data/.persistent_initialized
else
mkdir -p /trash
mv /data /trash
ln -s /persistent/data /data
fi
fi
# If persistent log:
if [ "x$PERSISTENT_LOG" == "xTrue" ]; then
echo "[INFO] Persistent log set"
if [ ! -f /persistent/log/.persistent_initialized ]; then
mv /var/log /persistent/log
ln -s /persistent/log /var/log
touch /var/log/.persistent_initialized
else
mkdir -p /trash
mv /var/log /trash
ln -s /persistent/log /var/log
fi
fi
# If persistent home:
if [ "x$PERSISTENT_HOME" == "xTrue" ]; then
echo "[INFO] Persistent home set"
if [ ! -f /persistent/home/.persistent_initialized ]; then
mv /home /persistent/home
ln -s /persistent/home /home
touch /home/.persistent_initialized
else
mkdir -p /trash
mv /home /trash
ln -s /persistent/home /home
fi
fi
# If persistent opt:
if [ "x$PERSISTENT_OPT" == "xTrue" ]; then
echo "[INFO] Persistent opt set"
if [ ! -f /persistent/opt/.persistent_initialized ]; then
mv /opt /persistent/opt
ln -s /persistent/opt /opt
touch /opt/.persistent_initialized
else
mkdir -p /trash
mv /opt /trash
ln -s /persistent/opt /opt
fi
fi
#---------------------
# Prestartup scripts
#---------------------
if [ "x$SAFEMODE" == "xFalse" ]; then
echo "[INFO] Executing prestartup scripts (current + parents):"
python /prestartup.py
else
echo "[INFO] Not executing prestartup scripts as we are in safemode"
fi
#---------------------
# Save env
#---------------------
echo "[INFO] Dumping env"
# Save env vars for later usage (e.g. ssh)
env | \
while read env_var; do
if [[ $env_var == HOME\=* ]]; then
: # Skip HOME var
elif [[ $env_var == PWD\=* ]]; then
: # Skip PWD var
else
echo "export $env_var" >> /env.sh
fi
done
#---------------------
# Entrypoint command
#---------------------
# Start!
if [[ "x$@" == "x" ]] ; then
ENTRYPOINT_COMMAND="supervisord"
else
ENTRYPOINT_COMMAND=$@
fi
echo -n "[INFO] Executing Docker entrypoint command: "
echo $ENTRYPOINT_COMMAND
exec "$ENTRYPOINT_COMMAND"
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC2n4wiLiRmE1sla5+w0IW3wwPW/mqhhkm7IyCBS+rGTgnts7xsWcxobvamNdD6KSLNnjFZbBb7Yaf/BvWrwQgdqIFVU3gRWHYzoU6js+lKtBjd0e2DAVGivWCKEkSGLx7zhx7uH/Jt8kyZ4NaZq0p5+SFHBzePdR/1rURd8G8+G3OaCPKqP+JQT4RMUQHC5SNRJLcK1piYdmhDiYEyuQG4FlStKCWLCXeUY2EVirNMeQIfOgbUHJsVjH07zm1y8y7lTWDMWVZOnkG6Ap5kB+n4l1eWbslOKgDv29JTFOMU+bvGvYZh70lmLK7Hg4CMpXVgvw5VF9v97YiiigLwvC7wasBHaASwH7wUqakXYhdGFxJ23xVMSLnvJn4S++4L8t8bifRIVqhT6tZCPOU4fdOvJKCRjKrf7gcW/E33ovZFgoOCJ2vBLIh9N9ME0v7tG15JpRtgIBsCXwLcl3tVyCZJ/eyYMbc3QJGsbcPGb2CYRjDbevPCQlNavcMdlyrNIke7VimM5aW8OBJKVh5wCNRpd9XylrKo1cZHYxu/c5Lr6VUZjLpxDlSz+IuTn4VE7vmgHNPnXdlxRKjLHG/FZrZTSCWFEBcRoSa/hysLSFwwDjKd9nelOZRNBvJ+NY48vA8ixVnk4WAMlR/5qhjTRam66BVysHeRcbjJ2IGjwTJC5Q== docker@dev.ops
-----BEGIN RSA PRIVATE KEY-----
MIIJKQIBAAKCAgEAtp+MIi4kZhNbJWufsNCFt8MD1v5qoYZJuyMggUvqxk4J7bO8
bFnMaG72pjXQ+ikizZ4xWWwW+2Gn/wb1q8EIHaiBVVN4EVh2M6FOo7PpSrQY3dHt
gwFRor1gihJEhi8e84ce7h/ybfJMmeDWmatKefkhRwc3j3Uf9a1EXfBvPhtzmgjy
qj/iUE+ETFEBwuUjUSS3CtaYmHZoQ4mBMrkBuBZUrSgliwl3lGNhFYqzTHkCHzoG
1BybFYx9O85tcvMu5U1gzFlWTp5BugKeZAfp+JdXlm7JTioA79vSUxTjFPm7xr2G
Ye9JZiyux4OAjKV1YL8OVRfb/e2IoooC8Lwu8GrAR2gEsB+8FKmpF2IXRhcSdt8V
TEi57yZ+EvvuC/LfG4n0SFaoU+rWQjzlOH3TrySgkYyq3+4HFvxN96L2RYKDgidr
wSyIfTfTBNL+7RteSaUbYCAbAl8C3Jd7VcgmSf3smDG3N0CRrG3Dxm9gmEYw23rz
wkJTWr3DHZcqzSJHu1YpjOWlvDgSSlYecAjUaXfV8payqNXGR2Mbv3OS6+lVGYy6
cQ5Us/iLk5+FRO75oBzT513ZcUSoyxxvxWa2U0glhRAXEaEmv4crC0hcMA4ynfZ3
pTmUTQbyfjWOPLwPIsVZ5OFgDJUf+aoY00WpuugVcrB3kXG4ydiBo8EyQuUCAwEA
AQKCAgEAh0Vm52qGS5XKzc0KXE4YviUVkwqgsURnGNbMHPm+zWTAtfGMgDWD01de
G3+Ba8tMnEGxDCukWk/bwGvHTZGOEWnfYvSQ20hLRbMWLOv2wf7k7GmzJHa1oXXl
LGCboUkGBBzyLDA9wnLXiqOgUfMvF2oR3CrcXMbFBZVyLqMJw1dSKaa3GKR5XkOI
G39lbpeLsW8gpkaOgWAzmtMfgBLJ0zG3RwuVw4cfrCpwnyQ960c26ypwJG2L8ko9
+S7Oo3a+JdtK+BK0e0d+J+oIqM+z3w87MZKeSeeTChgpkqDGE6NoE64O/DvigmxW
ijI95fApIaBjXWRu74gizUKtKuQ5X1pvo1zyQXWqhcaFnB4fv7+kI4L7JwlY4QIf
CLEjYfZFXCtmRo6QPn/09OPiU8xgimqVdIfr7JYjDMoEyMW9vfy5EJmtwS9M41tJ
2gDbhw1fhwUVW1MsJjLuboMXudsubGvGUy+jB48YPQs2Yx13NgUu15jtvPxVCC9v
CdnaL6PJtloSXh5zYpapUg2UN5oH48BLw1hWFoDBcgzTxlCjyEJGtem9QM1Y997e
z561gw8iu1vw0XDuv5zd7qzyIgAYuB8b3Pe6Rg+V2jennKvymMrtCvUNcLRs1pF8
LV0t9rTQzQWP5d8AmxywZfgXaQ0zcrTTd2rkjwf/yBH5yNIhDAECggEBAOl6K4pA
EHsWjGy1IrvhoDztbLkkLzxLvVbkjKT9NJwHucL+oekfxLt/c+1+8mNUjiMHyLd8
cH+R2Lyy1YhfBrT92gPfRRHUBLx+XS0P3p0dj3N+U+C//WAaMS5mb+pkTUFGLQ8g
vRHPHt0rAjvzpMUCNUtO+o11srZIOjLOLYkxSIDqwFXFWDyCgfqYev1jkNDivILk
HjeNrz3G5XpIBQdclZtX1f9yII5EfA6ChUGOLIAMwY1Mr6gTJTKtE3Q6anC0AgoW
ugw5oTSZpKySCKjf20AVcKvPBA3Tq+TBR10XmSTwL6r0bzuptXJBr+teOsnvs1+g
qhwgqExgFrkLf30CggEBAMg9g5VtYtmSfKxFR/59YSwjgRUSz7xFzsdUnbYN71X1
fd7o5htmEIitGRshzbRLCE85cW6TlGC02RiH9wQctc288BkYPYMW7LXRluKSXm+J
WlEwiWsGAJzsNK8U6fxCM0ZsX2AQ3tVSRHnhl7a/CByUQZFS/M+aTLWuOZQZElyK
PqsCw4eD6GbTk2qtlkxp8Gc/eAnii4KWfb6yvx5dgJXz1Nuu/ueZI+lmEP+EgubD
/f9vUzNFHgcU0+z2bH49gvUJ6t9nIAJ4HsHvoI6L286YVzR7qCP5inVksRspVLPP
iH8EDr4QhLnCh4GZiWy1JBpm/Zg+YcibQKxacs/nfYkCggEAXby3DmJ6O3DqIBr5
PwVvGAcax5pHfKXL9r772KHwJVTUt/0TdE1U5xJcsNVu64JfLqFJbKGBaTZdFiWW
pZHBV5kzlqplSKse267AKf9dGSdtGKl3c5yhVZwucrqd5DUw7ywFmzVBs4y8j39c
/kTruk0QqJOk9HZ0scp90zgEADjRKzEU11rL+j9LgBkICAOZeMQPe12q5BL2cI8S
Qu33VuVNC3lQaaage33zcL/mUFOMejyk2N4ZCBnnrVjfnqJ1aZpb10EYoR/iIQQu
oTpgT6zQkgIJonES55o8QTN4O1/mFHZ6LODGZ+XS+3Rz9MN4Rur90T7oDTLvXvqV
JOYA4QKCAQEAluueKFq4nUnGQ8U3/Pyc57qeyLZT8hAfSKdi8ttP31bXFtIs1Mu5
fHoSqRtyQggnbCbccr4yoCzOT6nyqJvG/xj/UbquagY2RNeCRKSTHrfEZdsSR6LP
hXaWQrudm659nP+DZxFwEhIeYEqCoY8b2wZ24MROnV4roOd+qDu5VhwwHY5ItvPZ
jt66hjXtSQyzz+3LWI/yHGu2vKtWVtmcV+jeLvGXWBFZOsnd1+gVDT79Sq+qYsMe
XbH6BOi6Xu+Xq35dEyJTwuisLfmg5q9M7Uput7TXxr2G+PH6doFRQPETbMAvKFuk
3albnneNV2yzmF61ljC2XI9/UCgfzskoGQKCAQBcgsPCQREaEiMvfmWjoDeip/Cy
c0QzTJ6Oy5kVxfjHxRhEZyjKPBbXLGjewLoUfuBJvOJ7Iqadv5vP2AOUS0KMkmwt
w0rIUhk9WaLo+f4Fci1d14CPs59w2GYhSniGOT/qiPprUZVUr+J0fJ6q2i7kRUTR
gLmSxLEKbHUTKJVTJ0wviIHZYHA+WIQzK1j2NdVIjpLNRXaV4+g0vDBnmCovbBgy
VkyXcPF8q/aDjPcDb9cyCxt4PJQRrP7n959Y2sIjyVwAIEg5wzFuPp3LG+ITnLpG
TtrkLRzqxPKqAY0p4D/7exFyk4SeUHFWWifs7uYeflw3vxN+VmazFE4WdXh3
-----END RSA PRIVATE KEY-----
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC2n4wiLiRmE1sla5+w0IW3wwPW/mqhhkm7IyCBS+rGTgnts7xsWcxobvamNdD6KSLNnjFZbBb7Yaf/BvWrwQgdqIFVU3gRWHYzoU6js+lKtBjd0e2DAVGivWCKEkSGLx7zhx7uH/Jt8kyZ4NaZq0p5+SFHBzePdR/1rURd8G8+G3OaCPKqP+JQT4RMUQHC5SNRJLcK1piYdmhDiYEyuQG4FlStKCWLCXeUY2EVirNMeQIfOgbUHJsVjH07zm1y8y7lTWDMWVZOnkG6Ap5kB+n4l1eWbslOKgDv29JTFOMU+bvGvYZh70lmLK7Hg4CMpXVgvw5VF9v97YiiigLwvC7wasBHaASwH7wUqakXYhdGFxJ23xVMSLnvJn4S++4L8t8bifRIVqhT6tZCPOU4fdOvJKCRjKrf7gcW/E33ovZFgoOCJ2vBLIh9N9ME0v7tG15JpRtgIBsCXwLcl3tVyCZJ/eyYMbc3QJGsbcPGb2CYRjDbevPCQlNavcMdlyrNIke7VimM5aW8OBJKVh5wCNRpd9XylrKo1cZHYxu/c5Lr6VUZjLpxDlSz+IuTn4VE7vmgHNPnXdlxRKjLHG/FZrZTSCWFEBcRoSa/hysLSFwwDjKd9nelOZRNBvJ+NY48vA8ixVnk4WAMlR/5qhjTRam66BVysHeRcbjJ2IGjwTJC5Q== docker@dev.ops
import os
import sys
import datetime
import subprocess
from collections import namedtuple
def shell(command, interactive=False):
'''Execute a command in the shell. By default prints everything. If the capture switch is set,
then it returns a namedtuple with stdout, stderr, and exit code.'''
if interactive:
exit_code = subprocess.call(command, shell=True)
if exit_code == 0:
return True
else:
return False
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout, stderr) = process.communicate()
exit_code = process.wait()
# Convert to str (Python 3)
stdout = stdout.decode(encoding='UTF-8')
stderr = stderr.decode(encoding='UTF-8')
# Output namedtuple
Output = namedtuple('Output', 'stdout stderr exit_code')
# Return
return Output(stdout, stderr, exit_code)
prestartup_scripts_path='/prestartup'
def sorted_ls(path):
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=mtime))
for item in sorted_ls(prestartup_scripts_path):
if item.endswith('.sh'):
# Execute this startup script
print('[INFO] Executing prestartup script "{}"...'.format(item))
script = prestartup_scripts_path+'/'+item
# Use bash and not chmod + execute, see https://github.com/moby/moby/issues/9547
out = shell('bash {}'.format(script))
# Set date
date_str = str(datetime.datetime.now()).split('.')[0]
# Print and log stdout and stderr
for line in out.stdout.strip().split('\n'):
print(' out: {}'.format(line))
for line in out.stderr.strip().split('\n'):
print(' err: {}'.format(line))
# Handle error in the startup script
if out.exit_code:
print('[ERROR] Exit code "{}" for "{}"'.format(out.exit_code, item))
# Exit with error code 1
sys.exit(1)
#
# This file MUST be edited with the 'visudo' command as root.
#
# Please consider adding local content in /etc/sudoers.d/ instead of
# directly modifying this file.
#
# See the man page for details on how to write a sudoers file.
#
Defaults env_reset
Defaults mail_badpass
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# Host alias specification
# User alias specification
# Cmnd alias specification
# User privilege specification
root ALL=(ALL:ALL) ALL
# Members of the admin group may gain root privileges
%admin ALL=(ALL) ALL
# Allow members of group sudo to execute any command
%sudo ALL=(ALL:ALL) NOPASSWD:ALL
# See sudoers(5) for more information on "#include" directives:
#includedir /etc/sudoers.d
; supervisor config file
[unix_http_server]
file=/var/run/supervisor.sock ; (the path to the socket file)
chmod=0700 ; sockef file mode (default 0700)
[supervisord]
logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log)
pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP)
nodaemon=true ; Mandatory to run Supervisor in foreground and avoid Docker to exit!
; The below section must remain in the config file for RPC
; (supervisorctl/web interface) to work, additional interfaces may be
; added by defining them in separate rpcinterface: sections
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket
; The [include] section can just contain the "files" setting. This
; setting can list multiple files (separated by whitespace or
; newlines). It can also contain wildcards. The filenames are
; interpreted as relative to this file. Included files *cannot*
; include files themselves.
[include]
files = /etc/supervisor/conf.d/*.conf
[program:sshd]
; Process definition
process_name = sshd
command = /usr/sbin/sshd -D
autostart = true
autorestart = true
startsecs = 5
stopwaitsecs = 10
; Log files
stdout_logfile = /var/log/supervisor/%(program_name)s_out.log
stdout_logfile_maxbytes = 10MB
stdout_logfile_backups = 5
stderr_logfile = /var/log/supervisor/%(program_name)s_err.log
stderr_logfile_maxbytes = 10MB
stderr_logfile_backups = 5
Bootstrap:library
From: debian:9
%runscript
exec echo "Hello world!"
%environment
TESTVARIABLE=42
export TESTVARIABLE
%labels
AUTHOR stefano.russo@gmail.com
[
{
"service": "dns",
"instance": "exposed",
"sleep": 5,
"hostname": "dns"
},
{
"service": "slurmclusterworker",
"instance": "partition1node1",
"instance_type": "standard",
"sleep": 0,
"links": [
{
"name": "DNS",
"service": "dns",
"instance": null
}
],
"hostname": "partition1-node1"
},
{
"service": "slurmclusterworker",
"instance": "partition2node1",
"instance_type": "standard",
"sleep": 0,
"links": [
{
"name": "DNS",
"service": "dns",
"instance": null
}
],
"hostname": "partition2-node1"
},
{
"service": "slurmclustermaster",
"instance": "one",
"sleep": 0,
"links": [
{
"name": "DNS",
"service": "dns",
"instance": null
}
],
"hostname": "slurm-master"
}
]
FROM registry:2
FROM rosetta/base
MAINTAINER Stefano Alberto Russo <stefano.russo@gmail.com>
# Install Slurm
RUN apt-get -y install slurm-wlm
# Explicitly create /var/run/ dirs
RUN mkdir -p /var/run/munge
RUN mkdir -p /var/run/slurm-wlm
# Add munge key and set permissions
COPY munge.key /etc/munge/munge.key
RUN chown munge:munge /etc/munge/munge.key
RUN chmod 0400 /etc/munge/munge.key
# Add munge daemon supervisord coinf
COPY supervisord_munge.conf /etc/supervisor/conf.d/
# Add Slurm conf
COPY slurm.conf /etc/slurm-llnl/slurm.conf
# TODO: why do we need this?
RUN ln -s /var/lib/slurm-llnl /var/lib/slurm-wlm
RUN ln -s /var/log/slurm-llnl /var/log/slurm-wlm
nVSHnJzgIVuvfxPf2AJtHlEA6jWeJdLhxdujd04/ZPysw3MZk7QaURlIOMArbXpH8w37bsuAFw9G
ulS6bWPIaja8JiLYqjdQQApLBhCaqK27nid3siIiZkzU2J4IEaCV3KneOPZax9xuIJyDIHb5ailI
V7YQXddTprZt/uluhkAYNwaVQt6PvXLH3Kofa2M/rEPVMc8VaYgsmHq3GGkwqZ3tqNyE0GienWal
zM56vJJrUZtEPc/IK8Sl/0QRkCWXxLOr1XRrNf6w9CbI3Wx44LF22L0NcaV6WLYDHbIn2raTod+5
nSHD0Mrnvmvx70kcKCsDOftcoj4d8eRlRQqnY0hM2NevziI4U0d9Ejo0CscTc0YiGsntmGh8SgPl
YHUhwjsj5DznVkyTj2ilDVkXDMdnKH36cd/Ti7rI9xhGwWiWroJs8GZwwltoNKrLZz5hvpGJRjza
N4FRPRfTNV9el2USWM1qSMM1Y/7NRXaaonBN8gJch5DjD0u4v2qFMXnN8/phbfYtvHzjjwT0+iuU
7ZerEj5DV83FPCuPu3neM8vqLO0O2ZdYIhh8N/b+mtVfh1BGmdBeqlwbQzupyi2jtYa8UsRJNryJ
aQNAI1nqE7WkQNi4jnfR3c1mpqrpJm9A0iz0IIK2o3sjIVAY1nB1DvOAql4uV2+L9cSs8YjEiGEo
KG35YpgUoOdQZCSAppLebh6rTHdjmovruZpV28o2pc/Oy336pr4cq5pWJCbrpHOKgF7Zcqw7aT4g
01OxZVG4iurQUBIhovnbHxdfHZBtNKS0ArwffI7sbu7LG2vwJOdvqAnnEc1B+VI9kLqqZRlCNAqE
kJ4j/cKu0mlq4nxv4wxpsPeN6Uf8Lj+9yHoDPxnoy9DeibBGvgTenUCS2KC0hHgbfnGNkmd0fpoi
RQhvUxRoEpPDlfjnWAk8g/IPetdAEKoo9N/xZpmmxIz9M8JFZWhAkmoMc/Gr05iSYBD1gwCfZkRZ
5LJSITBvOi78pRN1fMct5dW8K7bbefN3s+EmS3kmhB12o0t2X08rv7a4nHwnlNQ3SfirkWaQlfL2
QV7HjU6X5atv072ArDX9fJYxjdG22he+Fk1eYDx63oS7LdXwcETBaDO7z60tQSUp1YSmFEpnocFt
R5dqK/U8dnxUo80Cjm/DerWXkUPtfLpQqIV4DpNrQBTEc5TUlfKNUa61N2QvJXyDME4A4Ynm1vdR
XZJcUAVu03nuxvlUFolusR/Qu0LOPLMqD5pX1cjgEGQkkS9OWjp0YsvQiVVDItpE3k2C4ETWC8MW
wnHFuhVF1UB5o5BC6wy3/wgejsJ84lyMtD1vF3/OZftjIW94ksFUdS0mJ75Jj0wKKdXHclhv9A==
# slurm.conf file generated by configurator.html.
# Put this file on all nodes of your cluster.
# See the slurm.conf man page for more information.
#
ControlMachine=slurmclustermaster-main
#ControlAddr=
#BackupController=
#BackupAddr=
#
AuthType=auth/munge
CacheGroups=0
#CheckpointType=checkpoint/none
CryptoType=crypto/munge
#DisableRootJobs=NO
#EnforcePartLimits=NO
#Epilog=
#EpilogSlurmctld=
#FirstJobId=1
#MaxJobId=999999
#GresTypes=
#GroupUpdateForce=0
#GroupUpdateTime=600
JobCheckpointDir=/var/lib/slurm-llnl/checkpoint
#JobCredentialPrivateKey=
#JobCredentialPublicCertificate=
#JobFileAppend=0
#JobRequeue=1
#JobSubmitPlugins=1
#KillOnBadExit=0
#LaunchType=launch/slurm
#Licenses=foo*4,bar
#MailProg=/usr/bin/mail
#MaxJobCount=5000
#MaxStepCount=40000
#MaxTasksPerNode=128
MpiDefault=none
#MpiParams=ports=#-#
#PluginDir=
#PlugStackConfig=
#PrivateData=jobs
ProctrackType=proctrack/pgid
#Prolog=
#PrologSlurmctld=
#PropagatePrioProcess=0
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
#RebootProgram=
ReturnToService=1
#SallocDefaultCommand=
SlurmctldPidFile=/var/run/slurm-llnl/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurm-llnl/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/var/lib/slurm-llnl/slurmd
SlurmUser=slurm
#SlurmdUser=root
#SrunEpilog=
#SrunProlog=
StateSaveLocation=/var/lib/slurm-llnl/slurmctld
SwitchType=switch/none
#TaskEpilog=
TaskPlugin=task/none
#TaskPluginParam=
#TaskProlog=
#TopologyPlugin=topology/tree
#TmpFS=/tmp
#TrackWCKey=no
#TreeWidth=
#UnkillableStepProgram=
#UsePAM=0
#
#
# TIMERS
#BatchStartTimeout=10
#CompleteWait=0
#EpilogMsgTime=2000
#GetEnvTimeout=2
#HealthCheckInterval=0
#HealthCheckProgram=
InactiveLimit=0
KillWait=30
#MessageTimeout=10
#ResvOverRun=0
MinJobAge=300
#OverTimeLimit=0
SlurmctldTimeout=120
SlurmdTimeout=300
#UnkillableStepTimeout=60
#VSizeFactor=0
Waittime=0
#
#
# SCHEDULING
#DefMemPerCPU=0
FastSchedule=1
#MaxMemPerCPU=0
#SchedulerRootFilter=1
#SchedulerTimeSlice=30
SchedulerType=sched/builtin
SchedulerPort=7321
SelectType=select/linear
#SelectTypeParameters=
#
#
# JOB PRIORITY
#PriorityFlags=
#PriorityType=priority/basic
#PriorityDecayHalfLife=
#PriorityCalcPeriod=
#PriorityFavorSmall=
#PriorityMaxAge=
#PriorityUsageResetPeriod=
#PriorityWeightAge=
#PriorityWeightFairshare=
#PriorityWeightJobSize=
#PriorityWeightPartition=
#PriorityWeightQOS=
#
#
# LOGGING AND ACCOUNTING
#AccountingStorageEnforce=0
#AccountingStorageHost=
#AccountingStorageLoc=
#AccountingStoragePass=
#AccountingStoragePort=
AccountingStorageType=accounting_storage/none
#AccountingStorageUser=
AccountingStoreJobComment=YES
ClusterName=cluster
#DebugFlags=
#JobCompHost=
JobCompLoc=/var/log/slurm-llnl/jobs.log
#JobCompPass=
#JobCompPort=
JobCompType=jobcomp/filetxt
#JobCompUser=
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/none
SlurmctldDebug=3
SlurmctldLogFile=/var/log/slurm-llnl/slurmctld.log
SlurmdDebug=3
SlurmdLogFile=/var/log/slurm-llnl/slurmd.log
#SlurmSchedLogFile=
#SlurmSchedLogLevel=
#
#
# POWER SAVE SUPPORT FOR IDLE NODES (optional)
#SuspendProgram=
#ResumeProgram=
#SuspendTimeout=
#ResumeTimeout=
#ResumeRate=
#SuspendExcNodes=
#SuspendExcParts=
#SuspendRate=
#SuspendTime=
#
# Must add controller node explictly but don't place it into any partition
NodeName=slurmclustermaster-main CPUs=1 State=UNKNOWN
#NodeName=partitiona-instrument CPUs=1 State=UNKNOWN
#NodeName=partitionb-instrument CPUs=1 State=UNKNOWN
#NodeName=cris-instrument CPUs=1 State=UNKNOWN
# COMPUTE NODES
NodeName=slurmclusterworker-one CPUs=1 State=UNKNOWN
#NodeName=slurmclusterworker-two CPUs=1 State=UNKNOWN
PartitionName=partition1 Nodes=slurmclusterworker-one MaxTime=INFINITE State=UP
#PartitionName=partition2 Nodes=slurmclusterworker-two MaxTime=INFINITE State=UP
[program:munged]
; Process definition
process_name = munged
command = /usr/sbin/munged -f --key-file /etc/munge/munge.key -F
autostart = true
autorestart = true
startsecs = 5
stopwaitsecs = 10
priority = 100
; Log files
stdout_logfile = /var/log/supervisor/%(program_name)s_out.log
stdout_logfile_maxbytes = 100MB
stdout_logfile_backups = 5
stderr_logfile = /var/log/supervisor/%(program_name)s_err.log
stderr_logfile_maxbytes = 100MB
stderr_logfile_backups = 5
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment