#!/bin/bash
#SBATCH -c1 --gres=gpu:h100:1
#SBATCH --men-per-cpu=4000 --time=1:0:0
module purge
module load arch/avx2 StdEnv/2023 gcc/12.3 openmpi/4.1.5 cuda/12.2
module load ambertools/23.5 openmm/8.1.1
virtualenv \${SLURM_TMPDIR}/env
source \${SLURM_TMPDIR}/env/bin/activate
pip install --no-index netCDF4
python openmm_input.py
# Usage: bash $0
sbatch << EOF
#!/bin/bash
#SBATCH -c${1} --gres=gpu:h100:${1}
#SBATCH --mem-per-cpu=4000 --time=1:0:0 --partition=gpu_general
module purge
module load arch/avx2 StdEnv/2023 gcc/12.3 openmpi/4.1.5 cuda/12.2
module load ambertools/23.5 openmm/8.1.1
virtualenv \${SLURM_TMPDIR}/env
source \${SLURM_TMPDIR}/env/bin/activate
pip install --no-index netCDF4
cat /proc/cpuinfo | grep "model name" | uniq
python openmm_input.py
echo -n "Using ${1} GPUs, "
hostname -s
EOF