#!/bin/bash # # Infers pseudo log likelihood approximations from ESM Transformer models # #SBATCH --cluster= #SBATCH --partition= #SBATCH --account= #SBATCH --job-name=esm_inf #SBATCH --gres=gpu:1 # Number of GPU(s) per node. #SBATCH --cpus-per-task=2 # CPU cores/threads #SBATCH --mem=32000M # memory per node #SBATCH --time=0-24:00 # Max time (DD-HH:MM) #SBATCH --ntasks=1 # Only set to >1 if you want to use multi-threading export OMP_NUM_THREADS=40 dataset=$1 model_name=esm1b_t33_650M_UR50S model_location=/home/gz2294/.cache/torch/hub/checkpoints/esm1b_t33_650M_UR50S.pt python src/esm_inference.py data/$dataset/seqs.fasta \ data/$dataset/wt.fasta inference/${dataset}/esm/${model_name} \ --model_location $model_location --toks_per_batch 4;