#!/usr/bin/env zsh ###Group Jara #SBATCH -A jara0193 ### SLURM: -A anstatt -P (sonst Job dependency problem) #SBATCH -J P1_mid_RANS #SBATCH -o Output_P1_mid_RANS.%J ### Request the time you need for execution ### The format for the parameter is: [minutes] or [days-hh:mm:ss] #SBATCH -t 1-00:00:00 ### Request virtual memory you need for your job in MB #SBATCH --mem-per-cpu=1024 ### Specify your mail address #SBATCH --mail-user=janssen@ist.rwth-aachen.de #SBATCH --mail-type=ALL ### options: BEGIN/END/ALL ### Request the number of compute slots you want to use #SBATCH -n 48 ### ### Change to the work directory cd /work/jara0193/P1_mid ### load modules module switch intelmpi openmpi module switch intel/19.0 intel/16.0 module load gcc/8 module load TECHNICS module load ansys/17.0 # build list of available hosts tasks_per_node=$(echo $SLURM_TASKS_PER_NODE | cut -f1 -d'(') nodelist=$(scontrol show hostnames $SLURM_JOB_NODELIST) NODELIST="$(echo $nodelist | sed 's/ /*'"${tasks_per_node}"',/g')*$tasks_per_node" # echo $NODELIST # echo $CFXHOSTS # wird jetzt von SLURM bereitgestellt. ### start non-interactive batch job cfx5solve -def P1_mid_RANS_Claix.def -partition ${SLURM_NTASKS} -par-dist "${NODELIST}" -start-method "HP MPI Distributed Parallel"