#!/bin/bash # This example script requests 3 nodes of the cluster, each with 24 processor cores #PBS -P projectid #PBS -l select=3:ncpus=24:mpiprocs=24:nodetype=haswell_reg #PBS -q normal #PBS -l walltime=01:00:00 #PBS -o /mnt/lustre/users/username/su2Jobs/job01/stdout #PBS -e /mnt/lustre/users/username/su2Jobs/job01/stderr #PBS -m abe #PBS -M user@wherever.co.za #### Set up environment for SU2 module add chpc/compmech/SU2/6.2-intel # Set the working directory environment variable explicitly. export PBS_JOBDIR=/mnt/lustre/users/username/su2Jobs/job01 # Change to the job directory cd $PBS_JOBDIR nproc=`cat $PBS_NODEFILE | wc -l` ### Run the SU2_CFD solver ### For the gcc/mpich and PGI/mpich versions, use mpirun -iface ib0 .... mpirun -np $nproc SU2_CFD inv_ONERAM6_JST.cfg > SU2_CFD.out ### Check to see if the solution_flow.dat file exists. If it does, ### use it to generate the necessary output files. If it doesn't, ### copy the restart file to solution_flow.dat and generate the output. ### Make sure that these two file names are consistent with what you have ### used in the .cfg file for the run. if [ ! -e solution_flow.dat ] ; then cp restart_flow.dat solution_flow.dat fi ### Build the specified format output files SU2_SOL inv_ONERAM6_JST.cfg > SU2_SOL.out