#!/bin/bash
#SBATCH --mem-per-cpu 2000 --time 1:0:0
#SBATCH -c 12 --gpus-per-node 1
td=$SLURM_TMPDIR
wd=$SLURM_SUBMIT_DIR
cp topol.tpr $td && cd $td
module load StdEnv/2020 gcc/9.3.0 cuda/11.4 openmpi/4.0.3 gromacs/2023.2
gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK:-1} \
-nb gpu -pme gpu -update gpu -bonded cpu -s topol.tpr
cp md.log $wd
#!/bin/bash
#SBATCH --mem-per-cpu 2000 --time 1:0:0
#SBATCH -c 12 --gpus-per-node 1
td=$SLURM_TMPDIR
wd=$SLURM_SUBMIT_DIR
cp topol.tpr $td && cd $td
#module load StdEnv/2020 gcc/9.3.0 cuda/11.4 openmpi/4.0.3 gromacs/2022.3
module load StdEnv/2020 gcc/9.3.0 cuda/11.4 openmpi/4.0.3 gromacs/2023.2
gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK:-1} \
-nb gpu -pme gpu -update gpu -bonded cpu -s topol.tpr
# Print CPU info and timing into SLURM log
grep Brand md.log
grep -A1 "Number of GPUs detected:" md.log | tail -n1
grep "The number of OpenMP threads" md.log
grep Performance: md.log
cp md.log $wd