File size: 2,088 Bytes
7c93e9d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from typing import Any, List, Optional
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
from peft import PeftModel
import torch
import json
import csv
from lmdeploy import pipeline, GenerationConfig, TurbomindEngineConfig,ChatTemplateConfig
backend_config = TurbomindEngineConfig(tp=2)
templateconfig = ChatTemplateConfig(model_name = "chatglm3-6b",system = "As a reading comprehension expert, you will receive context, question and four options. Please understand the context given below first, and then output the label of the correct option as the answer to the question based on the context.")
gen_config = GenerationConfig(top_p=0.8,
top_k=40,
temperature=0.8,
max_new_tokens=1024)
pipe = pipeline(model_path='/root/lanyun-tmp/ZhipuAI/chatglm3-6b',
model_name="chatglm3-6b",
backend_config=backend_config,
chat_template_config = templateconfig )
# 读取JSONL文件
filename = '/root/lanyun-tmp/Dataset/test.jsonl'
data = []
with open(filename, 'r') as f:
for line in f:
item = json.loads(line)
data.append(item)
files = 'chatglm3_answers.csv'
with open(files, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
# 提取内容
for item in data:
context = item['context']
question = item['question']
answer0 = item['answer0']
answer1 = item['answer1']
answer2 = item['answer2']
answer3 = item['answer3']
prompts = [[{
'role': 'user',
'content': str({'context':{context},'question':{question},"answer0":{answer0},"answer1":{answer1},"answer2":{answer2},"answer3":{answer3}})
}], ]
response = pipe(prompts,
gen_config=gen_config,
)
print(response)
answer = response
writer.writerow([answer, '\n'])
|