#!/bin/bash
#SBATCH --ntasks=2 --gres=gpu:v100:2 --partition=all_gpus
#SBATCH --mem-per-cpu=2000 --time=1:0:0
module --force purge
ml StdEnv/2020 gcc/9.3.0 cuda/11.0 openmpi/4.0.3 amber/20
srun pmemd.cuda.MPI -O -i pmemd_prod.in -o production.log -p prmtop.parm7 -c restart.rst7
#!/bin/bash
#SBATCH -c1 --ntasks=2 --gres=gpu:v100:2 --partition=all_gpus
#SBATCH --mem-per-cpu=2000 --time=1:0:0
# Usage: sbatch submit.cuda.MPI.sh
INPFILE=pmemd_prod.in
STEPS=10000
# End of user input
TMPFILE=tf_${SLURM_NTASKS}
LOGFILE=production_${SLURM_NTASKS}.log
module --force purge
ml StdEnv/2020 gcc/9.3.0 cuda/11.0 openmpi/4.0.3 amber/20
# Print resource info
echo ${SLURM_NODELIST} running on ${SLURM_NTASKS} tasks
cat /proc/cpuinfo | grep "model name" | uniq
# Run simulation three times
srun pmemd.cuda.MPI -O -i $INPFILE -o $LOGFILE -p prmtop.parm7 -c restart.rst7
grep "Master Total CPU time" $LOGFILE > $TMPFILE
srun pmemd.cuda.MPI -O -i $INPFILE -o $LOGFILE -p prmtop.parm7 -c restart.rst7
grep "Master Total CPU time" $LOGFILE >> $TMPFILE
srun pmemd.cuda.MPI -O -i $INPFILE -o $LOGFILE -p prmtop.parm7 -c restart.rst7
grep "Master Total CPU time" $LOGFILE >> $TMPFILE
# Print average of three runs.
echo -n "ns/day:"
awk -v steps=$STEPS '{total += $6; count++ } END { print count*3.6*2.4*steps*0.01/total}' $TMPFILE
rm $TMPFILE $LOGFILE mdinfo mdcrd restrt logfile