File size: 1,834 Bytes
b1b96c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#!/bin/bash
#SBATCH -A csc605
#SBATCH -J ETTh1_inital
#SBATCH -o logs/etth1/%x-%j.o
#SBATCH -e logs/etth1/%x-%j.e
#SBATCH -t 01:45:00
#SBATCH -p batch
#SBATCH -N 1

# Only necessary if submitting like: sbatch --export=NONE ... (recommended)
# Do NOT include this line when submitting without --export=NONE
unset SLURM_EXPORT_ENV

# Load modules
module load PrgEnv-gnu/8.5.0
module load rocm/5.7.1
module load craype-accel-amd-gfx90a
module load miniforge3/23.11.0-0

# Activate your environment
ENV_NAME=time-llm-env
export PATH="/lustre/orion/csc605/scratch/rolandriachi/$ENV_NAME/bin:$PATH"
source /autofs/nccs-svm1_sw/frontier/miniforge3/23.11.0/etc/profile.d/conda.sh
conda activate time-llm-env

export MIOPEN_USER_DB_PATH="$SCRATCH/my-miopen-cache"
export MIOPEN_CUSTOM_CACHE_DIR=${MIOPEN_USER_DB_PATH}
rm -rf ${MIOPEN_USER_DB_PATH}
mkdir -p ${MIOPEN_USER_DB_PATH}

# V --- Time-LLM Config Args --- V

model_name=TimeLLM # Or, DLinear
train_epochs=50
learning_rate=0.001
llama_layers=32

batch_size=16
d_model=32
d_ff=128

comment='TimeLLM-ETTh1' # Or, 'DLinear-ETTh1'

export LAUNCHER="accelerate launch \
    --num_processes 1 \
    --num_machines 1 \
    --mixed_precision bf16 \
    --dynamo_backend no \
    "

# To resume training, include a --resume flag
$LAUNCHER run_main.py \
  --task_name long_term_forecast \
  --is_training 1 \
  --root_path ./dataset/ETT-small/ \
  --data_path ETTh1.csv \
  --model_id ETTh1_512_96 \
  --model $model_name \
  --data ETTh1 \
  --features M \
  --seq_len 96 \
  --label_len 48 \
  --pred_len 96 \
  --factor 3 \
  --enc_in 7 \
  --dec_in 7 \
  --c_out 7 \
  --des 'Exp' \
  --itr 1 \
  --d_model $d_model \
  --d_ff $d_ff \
  --batch_size $batch_size \
  --learning_rate $learning_rate \
  --llm_layers $llama_layers \
  --train_epochs $train_epochs \
  --model_comment $comment \