File size: 3,934 Bytes
7713b1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import logging
import os
import random
import torch

from transformers import (
    AutoConfig,
    AutoTokenizer,
)

from model.utils import get_model, TaskType
from tasks.superglue.dataset import SuperGlueDataset
from training import BaseTrainer
from training.trainer_exp import ExponentialTrainer
from tasks import utils
from .utils import load_from_cache

logger = logging.getLogger(__name__)

def get_trainer(args):
    model_args, data_args, training_args, _ = args
    log_level = training_args.get_process_log_level()
    logger.setLevel(log_level)
    model_args.model_name_or_path = load_from_cache(model_args.model_name_or_path)

    if "llama" in model_args.model_name_or_path:
        from transformers import LlamaTokenizer
        model_path = f'openlm-research/{model_args.model_name_or_path}'
        tokenizer = LlamaTokenizer.from_pretrained(model_path)
        tokenizer.pad_token = tokenizer.eos_token
        tokenizer.mask_token = tokenizer.unk_token
        tokenizer.mask_token_id = tokenizer.unk_token_id
    elif 'gpt' in model_args.model_name_or_path:
        tokenizer = AutoTokenizer.from_pretrained(
            model_args.model_name_or_path,
            use_fast=model_args.use_fast_tokenizer,
            revision=model_args.model_revision,
        )
        tokenizer.pad_token_id = '<|endoftext|>'
        tokenizer.pad_token = '<|endoftext|>'
    else:
        tokenizer = AutoTokenizer.from_pretrained(
            model_args.model_name_or_path,
            use_fast=model_args.use_fast_tokenizer,
            revision=model_args.model_revision,
        )
    tokenizer = utils.add_task_specific_tokens(tokenizer)
    dataset = SuperGlueDataset(tokenizer, data_args, training_args)

    if training_args.do_train:
        for index in random.sample(range(len(dataset.train_dataset)), 3):
            logger.info(f"Sample {index} of the training set: {dataset.train_dataset[index]}.")

    if not dataset.multiple_choice:
        if "llama" in model_args.model_name_or_path:
            model_path = f'openlm-research/{model_args.model_name_or_path}'
            config = AutoConfig.from_pretrained(
                model_path,
                num_labels=dataset.num_labels,
                label2id=dataset.label2id,
                id2label=dataset.id2label,
                finetuning_task=data_args.dataset_name,
                revision=model_args.model_revision,
                trust_remote_code=True
            )
        else:
            config = AutoConfig.from_pretrained(
                model_args.model_name_or_path,
                num_labels=dataset.num_labels,
                label2id=dataset.label2id,
                id2label=dataset.id2label,
                finetuning_task=data_args.dataset_name,
                revision=model_args.model_revision,
                trust_remote_code=True
            )
    else:
        config = AutoConfig.from_pretrained(
            model_args.model_name_or_path,
            num_labels=dataset.num_labels,
            finetuning_task=data_args.dataset_name,
            revision=model_args.model_revision,
        )

    config.trigger = training_args.trigger
    config.clean_labels = training_args.clean_labels
    config.target_labels = training_args.target_labels

    if not dataset.multiple_choice:
        model = get_model(model_args, TaskType.SEQUENCE_CLASSIFICATION, config)
    else:
        model = get_model(model_args, TaskType.MULTIPLE_CHOICE, config, fix_bert=True)

    # Initialize our Trainer
    trainer = BaseTrainer(
        model=model,
        args=training_args,
        train_dataset=dataset.train_dataset if training_args.do_train else None,
        eval_dataset=dataset.eval_dataset if training_args.do_eval else None,
        compute_metrics=dataset.compute_metrics,
        tokenizer=tokenizer,
        data_collator=dataset.data_collator,
        test_key=dataset.test_key
    )


    return trainer, None