File size: 1,437 Bytes
2a26d3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import json
import os
import subprocess
import sys
from argparse import ArgumentParser
from pathlib import Path

import numpy as np
import pandas as pd
import torch
import torch.distributed as dist
import torch.nn.functional as F
from accelerate import Accelerator, DistributedDataParallelKwargs
from mbpp import MBPP as evaltor
from transformers import AutoModelForCausalLM, AutoTokenizer

if __name__ == "__main__":
    kwargs_handlers = [DistributedDataParallelKwargs(find_unused_parameters=True)]
    accelerator = Accelerator(mixed_precision="bf16", kwargs_handlers=kwargs_handlers)

    parser = ArgumentParser()
    parser.add_argument("--logdir", type=str, default="")
    parser.add_argument("--dataroot", type=str, default="")
    args = parser.parse_args()

    logdir = args.logdir

    if logdir == "":
        logdir = "tmp/"
    tokenizer = dict(
        cls=AutoTokenizer,
        model_path=logdir,
    )

    dataroot = args.dataroot

    evaluator = evaltor(
        data_root=dataroot,
        max_seq_len=4096,
        tokenizer_cfg=tokenizer,
        log_dir=logdir,
        n_sample=1,
        batch_size=1,
        max_gen_len=500,
    )
    model = AutoModelForCausalLM.from_pretrained(
        logdir,
        device_map=accelerator.device,
        trust_remote_code=True,
        torch_dtype=torch.bfloat16,
    )
    os.environ["TOKENIZERS_PARALLELISM"] = "false"
    evaluator.eval_model(model, accelerator)