#!/bin/bash ### Request 10 compute nodes with 6 MPI processes per node #PBS -l select=10:ncpus=24:mpiprocs=6:nodetype=haswell_reg #PBS -q normal #PBS -P ERTH1234 #PBS -l walltime=06:00:00 #PBS -N WRF4-10X6X4 #PBS -o /mnt/lustre/users/jblogs/WRFrun/wrf4.out #PBS -e /mnt/lustre/users/jblogs/WRFrun/wrf4.err ### These two stack size settings are essential for use with Intel-compiled code ulimit -s unlimited export OMP_STACKSIZE=2G ### Source the appropriate environment script . /apps/chpc/earth/WRF-4.1.1-pnc-impi/setWRF export PBSJOBDIR=/mnt/lustre/users/jblogs/WRFrun/wrf4.out cd $PBSJOBDIR ### Get total number of MPI ranks nproc=`cat $PBS_NODEFILE | wc -l` ### Clear and set the lustre stripes for the working directory lfs setstripe -d $PBSJOBDIR lfs setstripe -c 12 ./ ### Issue the command line, passing the number of OpenMP threads. ### These affinity settings work OK, but may be unnecessary. YMMV. time mpirun -np $nproc -genv OMP_NUM_THREADS 4 -genv KMP_AFFINITY "verbose,granularity=core,compact,0,1" -bind-to socket -map-by socket wrf.exe > runWRF.out