#!/bin/bash ##### The following line will request 2 (virtual) nodes, each with 24 cores running 24 mpi processes for ##### a total of 48-way parallel. Specifying memory requirement is unlikely to be necessary, as the ##### compute nodes have 128 GB each. #PBS -l select=2:ncpus=24:mpiprocs=24:mem=120GB:nodetype=haswell_reg ## For your own benefit, try to estimate a realistic walltime request. Over-estimating the ## wallclock requirement interferes with efficient scheduling, will delay the launch of the job, ## and ties up more of your CPU-time allocation untill the job has finished. #PBS -q normal #PBS -P myprojectcode #PBS -l walltime=1:00:00 #PBS -o /mnt/lustre/users/username/MechTesting/mechJob.out #PBS -e /mnt/lustre/users/username/MechTesting/mechJob.err #PBS -m abe #PBS -M username@email.co.za ##### Running commands ### Tell it where to find the license export LM_LICENSE_FILE=1055@login1 export ANSYSLMD_LICENSE_FILE=1055@login1 ### You may need the Intel compiler to be available module load chpc/parallel_studio_xe/18.0.2/2018.2.046 ### Mesa is needed to provide libGLU.so module load chpc/compmech/mesa/18.1.9 #### There is no -d option available under PBS Pro, therefore #### explicitly set working directory and change to that. export PBS_JOBDIR=/mnt/lustre3p/users/username/MechTesting cd $PBS_JOBDIR ### Count the number of lines in the machinefile nproc=`cat $PBS_NODEFILE | wc -l` ### Get a file with only one line per host cat $PBS_NODEFILE | sort -u > hostlist ### Create a machinefile with the number of MPI processes per node appended to each hostname sed 's/.cm.cluster/:24/g' hostlist > hosts ### Select the solver that you want to run exe=/apps/chpc/compmech/CFD/ansys_inc/v190/ansys/bin/ansys190 ### Command to execute the solver $exe -b nolist -s noread -i name_of_input_file.dat -o some_output.out -np $nproc -dis -machinefile hosts