File size: 1,072 Bytes
385e4f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473e849
385e4f4
 
4c31851
385e4f4
0b58370
4c31851
385e4f4
 
 
473e849
 
 
 
0b58370
 
 
473e849
0b58370
 
473e849
0b58370
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#!/bin/sh

BASEDIR=$(dirname "$0")
cd $BASEDIR/..
echo Current Directory:
pwd

BASEDIR=`pwd`

nvidia-smi
uname -a
cat /etc/os-release
lscpu
grep MemTotal /proc/meminfo

export LOGICAL_REASONING_DATA_PATH=datasets/mgtv
export RESIZE_TOKEN_EMBEDDINGS=true
export USING_LLAMA_FACTORY=true
export USING_P1_PROMPT_TEMPLATE=false
export LOAD_IN_4BIT=true

export START_EPOCH=7
export MODEL_PREFIX=Qwen2-72B-Instruct_p2
export MODEL_NAME=Qwen/Qwen2-72B-Instruct
export BATCH_SIZE=1

export LOGICAL_REASONING_RESULTS_PATH=results/$MODEL_PREFIX.csv
export ADAPTER_PATH_BASE=llama-factory/saves/Qwen2-72B-Instruct

echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
python llm_toolkit/eval_logical_reasoning_all_epochs.py

# export START_EPOCH=0
# export MODEL_PREFIX=Llama3.1-70B-Chinese-Chat
# export MODEL_NAME=shenzhi-wang/$MODEL_PREFIX

# export LOGICAL_REASONING_RESULTS_PATH=data/${MODEL_PREFIX}_results.csv
# export ADAPTER_PATH_BASE=llama-factory/saves/$MODEL_PREFIX

# echo "Eval $MODEL_NAME with $ADAPTER_PATH_BASE"
# python llm_toolkit/eval_logical_reasoning_all_epochs.py