Skip to content

Commit

Permalink
Add stampede3 support
Browse files Browse the repository at this point in the history
  • Loading branch information
HaoCheng Yu committed Aug 9, 2024
1 parent dca137a commit 97954f6
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 0 deletions.
15 changes: 15 additions & 0 deletions cmake/SCHISM.local.stampede3
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
###XSEDE Stampede3 (Sapphire Rapids HBM)

set (SCHISM_EXE_BASENAME pschism_STAM3 CACHE STRING "Base name (modules and file extension to be added of the executable. If you want a machine name, add it here")

###Relative paths won't work
set(CMAKE_Fortran_COMPILER ifx CACHE PATH "Path to serial Fortran compiler")
set(CMAKE_C_COMPILER icx CACHE PATH "Path to serial C compiler")
set(NetCDF_FORTRAN_DIR "$ENV{TACC_NETCDF_LIB}" CACHE PATH "Path to NetCDF Fortran library")
set(NetCDF_C_DIR "$ENV{TACC_NETCDF_LIB}" CACHE PATH "Path to NetCDF C library")
set(NetCDF_INCLUDE_DIR "$ENV{TACC_NETCDF_INC}" CACHE PATH "Path to NetCDF include")

set(CMAKE_Fortran_FLAGS_RELEASE "-xCORE-AVX512 -O3 -no-prec-sqrt -no-prec-div -align all -assume buffered_io -assume byterecl" CACHE STRING "Fortran flags" FORCE)

#Hybrid (remember to add OMP in the name above)
#set(CMAKE_Fortran_FLAGS_RELEASE "-xCORE-AVX512 -O3 -no-prec-sqrt -no-prec-div -align all -assume buffered_io -assume byterecl -qopenmp" CACHE STRING "Fortran flags" FORCE)
2 changes: 2 additions & 0 deletions src/Utility/Cluster_files/modules.stampede3_spr
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
module load netcdf/4.9.2
###Use defaults for others
65 changes: 65 additions & 0 deletions src/Utility/Cluster_files/run_stampede3_spr
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#!/bin/bash
#----------------------------------------------------
# Sample Slurm job script
# for TACC Stampede3 SPR nodes
#
# *** MPI Job in SPR Queue ***
#
# Last revised: 23 April 2024
#
# Notes:
#
# -- Launch this script by executing
# "sbatch spr.mpi.slurm" on Stampede3 login node.
#
# -- Use ibrun to launch MPI codes on TACC systems.
# Do not use mpirun or mpiexec.
#
# -- Max recommended MPI ranks per SPR node: 112
# (start small, increase gradually).
#
# -- If you're running out of memory, try running
# on more nodes using fewer tasks and/or threads
# per node to give each task access to more memory.
#
# -- Don't worry about task layout. By default, ibrun
# will provide proper affinity and pinning.
#
# -- You should always run out of $SCRATCH. Your input
# files, output files, and exectuable should be
# in the $SCRATCH directory hierarchy.
#
# From Dan,
# SPR nodes has 112 cores, however,
# try lower cores if you have allocation error in output log.
# Especially for large domain case like STOFS-3D
# STOFS-Atl use 85 cores per node
#----------------------------------------------------

#SBATCH -J STOFS-Atl # Job name
#SBATCH -o Atl.o%j # Name of stdout output file
#SBATCH -e Atl.e%j # Name of stderr error file
#SBATCH -p spr # Queue (partition) name
#SBATCH -N 32 # Total # of nodes
#SBATCH -n 2720 # Total # of mpi tasks (85 cores/nd *32 node)
#SBATCH -t 01:00:00 # Run time (hh:mm:ss)
#SBATCH [email protected]
#SBATCH --mail-type=all # Send email at begin and end of job

# Other commands must follow all #SBATCH directives...
module load netcdf/4.9.2
module list
pwd
date

# Always run your jobs out of $SCRATCH. Your input files, output files,
# and exectuable should be in the $SCRATCH directory hierarchy.
# Change directories to your $SCRATCH directory where your executable is

#cd $SCRATCH

# Launch MPI code...

ibrun ./pschism_STAM3_NO_PARMETIS_PREC_EVAP_BLD_STANDALONE_TVD-VL 6 # Use ibrun instead of mpirun or mpiexec


0 comments on commit 97954f6

Please sign in to comment.