#!/bin/bash #### For the distributed memory versions of the code that we use at CHPC, mpiprocs should be equal to ncpus #### Here we have selected the maximum resources available to a regular CHPC user #### Obviously provide your own project identifier #### For your own benefit, try to estimate a realistic walltime request. Over-estimating the #### wallclock requirement interferes with efficient scheduling, will delay the launch of the job, #### and ties up more of your CPU-time allocation untill the job has finished. #PBS -l select=10:ncpus=24:mpiprocs=24 -q normal -P TEST1234 #PBS -l walltime=3:00:00 #PBS -o /mnt/lustre/users/jblogs/WRFV3_test/run/stdout #PBS -e /mnt/lustre/users/jblogs/WRFV3_test/run/stderr #PBS -m abe #PBS -M jblogs@unseenuniversity.ac.za ### Source the WRF-4.1.1 environment with parallel NetCDF: export WRFDIR=/apps/chpc/earth/WRF-4.1.1-pnc-impi . $WRFDIR/setWRF # Set the stack size unlimited for the intel compiler ulimit -s unlimited ##### Running commands # Set PBS_JOBDIR to where YOUR simulation will be run export PBS_JOBDIR=/mnt/lustre/users/jblogs/WRFV3_test/run cd $PBS_JOBDIR exe=$WRFDIR/WRF/run/wrf.exe # Clear and re-set the lustre striping for the job directory. For the lustre configuration # used by CHPC, a stripe size of 12 should work well. lfs setstripe -d $PBS_JOBDIR lfs setstripe -c 12 ./ ## For this example, assume that nproc_x=8, nproc_y=28, nio_tasks_per_group=4 and nio_groups=4, for a total ## of 16 I/O processes and 228 solver processes, therefore 240 MPI processes in total. mpirun -np 240 $exe &> wrf.out