File size: 4,176 Bytes
897bd83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# %%
from datasets import Dataset, load_dataset
import pandas as pd
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer
from peft import LoraConfig, TaskType, get_peft_model, AutoPeftModelForCausalLM

# %%
df = pd.read_csv('data/riddles_data.csv')
df =df.sample(frac = 1)
#df = df[:1000]

# %%
df.describe()

# %%
ds = Dataset.from_pandas(df)

# %%
ds[:3]

# %%
llm_model_name="Qwen/Qwen1.5-0.5B-Chat"
model = AutoModelForCausalLM.from_pretrained(llm_model_name)
tokenizer = AutoTokenizer.from_pretrained(llm_model_name,trust_remote_code=True, pad_token='<|endoftext|>')

tokenizer


# %%
def process_func(example):
    MAX_LENGTH = 512 
    input_ids, attention_mask, labels = [], [], []
    instruction = tokenizer(f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n猜谜语:\n谜面:{example['riddle']}\n\n谜底是什么?<|im_end|>\n<|im_start|>assistant\n", add_special_tokens=False)  # add_special_tokens 不在开头加 special_tokens
    response = tokenizer(f"谜底是:{example['label']}", add_special_tokens=False)
    input_ids = instruction["input_ids"] + response["input_ids"] + [tokenizer.pad_token_id]
    attention_mask = instruction["attention_mask"] + response["attention_mask"] + [1]
    labels = [-100] * len(instruction["input_ids"]) + response["input_ids"] + [tokenizer.pad_token_id]  
    if len(input_ids) > MAX_LENGTH:  # 做一个截断
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]
        print (f"{tokenizer.decode(input_ids)} Too Long")
    return {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "labels": labels
    }

# %%
tokenized_id = ds.map(process_func, remove_columns=ds.column_names)
tokenized_id

# %%
tokenizer.decode(tokenized_id[0]['input_ids'])

# %%
tokenizer.decode(list(filter(lambda x: x != -100, tokenized_id[1]["labels"])))

# %%
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM, 
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    #target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
    inference_mode=False,
    r=32,
    lora_alpha=32,
    lora_dropout=0.05
)


# %%
model = get_peft_model(model, config)
config

# %%
model.print_trainable_parameters()

# %%
args = TrainingArguments(
    output_dir="./Qwen1.5_0.5B_Chat_sft_full",
    logging_steps=10,
    num_train_epochs=2,
    save_steps=10,
    learning_rate=1e-4,
    save_on_each_node=True,
    fp16=False
)

# %%
trainer = Trainer(
    model=model,
    args=args,
    train_dataset=tokenized_id,
    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
)

trainer.train(resume_from_checkpoint=True)

# %%
trainer.save_model("./qwen_sft_full")

# %%
llm_model_name="Qwen/Qwen1.5-0.5B-Chat"

#model = AutoModelForCausalLM.from_pretrained(llm_model_name)
# # Load PEFT model on CPU
model = AutoPeftModelForCausalLM.from_pretrained(
    "Qwen1.5_0.5B_Chat_sft_full_ckpt_200_ok/checkpoint-210",
    #low_cpu_mem_usage=True,
)
# # Merge LoRA and base model and save
#merged_model = model.merge_and_unload()
#merged_model.save_pretrained("./qwen_sft",safe_serialization=False, max_shard_size="2GB")

tokenizer = AutoTokenizer.from_pretrained(llm_model_name,trust_remote_code=True, pad_token='<|endoftext|>')


# %%
prompt = "谜面:一生受用(猜一字)\n谜底是什么?请解释。"
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)

print(text)
model_inputs = tokenizer([text], return_tensors="pt").to("cpu")

generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=128,
        do_sample=False,
        top_p=0.0
)

generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

# %%
response 

# %%