|
|
|
from typing import Any, List, Optional |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig |
|
from peft import PeftModel |
|
import torch |
|
import json |
|
import csv |
|
from lmdeploy import pipeline, GenerationConfig, TurbomindEngineConfig,ChatTemplateConfig |
|
|
|
backend_config = TurbomindEngineConfig(tp=2) |
|
templateconfig = ChatTemplateConfig(model_name = "chatglm3-6b",system = "As a reading comprehension expert, you will receive context, question and four options. Please understand the context given below first, and then output the label of the correct option as the answer to the question based on the context.") |
|
gen_config = GenerationConfig(top_p=0.8, |
|
top_k=40, |
|
temperature=0.8, |
|
max_new_tokens=1024) |
|
pipe = pipeline(model_path='/root/lanyun-tmp/ZhipuAI/chatglm3-6b', |
|
model_name="chatglm3-6b", |
|
backend_config=backend_config, |
|
chat_template_config = templateconfig ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
filename = '/root/lanyun-tmp/Dataset/test.jsonl' |
|
data = [] |
|
with open(filename, 'r') as f: |
|
for line in f: |
|
item = json.loads(line) |
|
data.append(item) |
|
|
|
|
|
|
|
files = 'chatglm3_answers.csv' |
|
with open(files, 'w', newline='') as csvfile: |
|
writer = csv.writer(csvfile) |
|
|
|
for item in data: |
|
context = item['context'] |
|
question = item['question'] |
|
answer0 = item['answer0'] |
|
answer1 = item['answer1'] |
|
answer2 = item['answer2'] |
|
answer3 = item['answer3'] |
|
|
|
|
|
prompts = [[{ |
|
'role': 'user', |
|
'content': str({'context':{context},'question':{question},"answer0":{answer0},"answer1":{answer1},"answer2":{answer2},"answer3":{answer3}}) |
|
}], ] |
|
response = pipe(prompts, |
|
gen_config=gen_config, |
|
) |
|
print(response) |
|
answer = response |
|
writer.writerow([answer, '\n']) |
|
|
|
|