diff --git a/Untitled.ipynb b/Untitled.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..363fcab7ed6e9634e198cf5555ceb88932c9a245
--- /dev/null
+++ b/Untitled.ipynb
@@ -0,0 +1,6 @@
+{
+ "cells": [],
+ "metadata": {},
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/run_pleiadi b/run_pleiadi
new file mode 100644
index 0000000000000000000000000000000000000000..2e1a0e102fefde234d3dbb36bab7ac0400e1ad50
--- /dev/null
+++ b/run_pleiadi
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+#SBATCH --nodes=4
+#SBATCH --ntasks-per-node=2
+#SBATCH --cpus-per-task=18
+#SBATCH --time=01:00:00
+#SBATCH --job-name=dADP-test
+#SBATCH --account=ulearn
+#SBATCH --partition=pleiadi
+#SBATCH --output=out_pleiadi
+#SBATCH --error=err_pleiadi
+#SBATCH --mem=230G
+
+
+
+cd $SLURM_SUBMIT_DIR
+module restore dev_pleiadi
+source /u/ftomba/my_envs/dadac-dev/bin/activate
+make clean
+make
+export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
+export OMP_PLACES=cores
+export OMP_PROC_BIND=close
+#export PSM2_MQ_SENDREQS_MAX=268435456
+#export PSM2_MQ_RECVREQS_MAX=268435456
+
+rm bb/*
+
+#time mpirun -n ${SLURM_NTASKS} --map-by ppr:1:node:PE=${SLURM_CPUS_PER_TASK}  main
+time mpirun -n ${SLURM_NTASKS} --map-by ppr:1:socket:PE=${SLURM_CPUS_PER_TASK}  main
+#time mpirun -n ${SLURM_NTASKS} main
+
+#time python3 check.py
+
diff --git a/src/tree/tree.c b/src/tree/tree.c
index 8a466900fe1aa25dfc14d38f22a476c23b82395e..0f7a091bd6c900bb56328ac77b27a9cfd040f5c9 100644
--- a/src/tree/tree.c
+++ b/src/tree/tree.c
@@ -3515,7 +3515,7 @@ clusters_t Heuristic1(global_context_t *ctx, int verbose)
         */
 
 
-        DB_PRINT("rank %d proc %d\n", ctx -> mpi_rank, proc_points);
+        //DB_PRINT("rank %d proc %d\n", ctx -> mpi_rank, proc_points);
         MPI_Allreduce(MPI_IN_PLACE, &completed, 1, MPI_INT, MPI_SUM, ctx -> mpi_communicator);
         completed = completed == ctx -> world_size ? 1 : 0;
         /* copy cluster idx into buffer */