MOLECULAR DYNAMICS PERFORMANCE GUIDE - Digital Research Alliance of CANADA
ID=293
- Dataset: 9naw
- Software: GROMACS.cuda.mpi (gromacs/2024.4-gofbc-2023a-avx512)
- Resource: 2 tasks, 14 cores, 1 nodes, 2 GPUs, with NVLink
- CPU: Xeon Platinum 8570 (Emerald Rapids), 2.1 GHz
- GPU: NVidia-H100-HBM3-80GB, 14 cores/GPU
- Simulation speed: 26.63 ns/day
- Efficiency: 73.5 %
- Site: Nibi
- Date: Dec. 24, 2025, 1:42 p.m.
- Submission script:
#!/bin/bash
#SBATCH --mem-per-cpu=4000 --time=1:0:0
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --cpus-per-task=14
#SBATCH --gpus-per-task=h100:1
module load StdEnv/2023 gcc/12.3 openmpi/4.1.5 cuda/12.2 gromacs/2024.4
WORKDIR=`pwd`
cp * $SLURM_TMPDIR
cd $SLURM_TMPDIR
export GMX_ENABLE_DIRECT_GPU_COMM=1
gmx mdrun \
-ntmpi $SLURM_NTASKS \
-ntomp $SLURM_CPUS_PER_TASK \
-nb gpu \
-pme gpu \
-npme 1 \
-update gpu \
-bonded gpu \
-noconfout \
-nstlist 300 \
-s topol.tpr
- Notes:
- Simulation input file:
title = benchmark
; Run parameters
integrator = md
nsteps = 100000
dt = 0.002
; Output control
nstxout = 0
nstvout = 0
nstfout = 0
nstenergy = 1000
nstlog = 500
nstxout-compressed = 5000
compressed-x-grps = System
; Bond parameters
continuation = yes
constraint_algorithm = Lincs
constraints = h-bonds
; Neighborsearching
cutoff-scheme = Verlet
ns_type = grid
nstlist = 10
rcoulomb = 0.8
rvdw = 0.8
DispCorr = Ener ; anaytic VDW correction
; Electrostatics
coulombtype = PME
pme_order = 4
fourier-nx = 324
fourier-ny = 324
fourier-nz = 324
; Temperature coupling is on
tcoupl = V-rescale
tc-grps = system
tau_t = 0.1
ref_t = 300
; Pressure coupling is on
pcoupl = Parrinello-Rahman
pcoupltype = isotropic
tau_p = 2.0
ref_p = 1.0
compressibility = 4.5e-5
; Periodic boundary conditions
pbc = xyz
; Velocity generation
gen_vel = no