|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from transformers.generation.utils import GenerationConfig |
|
from peft import PeftModel, PeftConfig |
|
import json |
|
import csv |
|
|
|
|
|
model_path = '/root/lanyun-tmp/OpenBMB/MiniCPM-2B-sft-fp32' |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_path, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True |
|
) |
|
model.generation_config = GenerationConfig.from_pretrained(model_path) |
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_path, use_fast=False, trust_remote_code=True, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
filename = '/root/lanyun-tmp/Dataset/val_triviaqa.csv' |
|
data = [] |
|
with open(filename, newline='',encoding="utf-8") as csvfile: |
|
reader = csv.DictReader(csvfile) |
|
|
|
|
|
files = 'TriviaQA_MiniCPM_NLoRA.csv' |
|
with open(files, 'w', newline='',encoding='utf-8') as csvfile: |
|
writer = csv.writer(csvfile) |
|
|
|
|
|
for row in reader: |
|
context = row['context'] |
|
question = row['question'] |
|
|
|
|
|
messages = str([{'role': 'system', 'content': 'Don t output "[" !!!, As a reading comprehension expert, you will receive context and question. Please understand the given Context first and then output the answer of the question based on the Context'}, {'role': 'user', 'content': '{\'context\': \'[DOC] [TLE] richard marx had an 80s No 1 hit with Hold On To The Nights? \', \'question\': \'Who had an 80s No 1 hit with Hold On To The Nights?\'}'}, {'role': 'assistant', 'content': "richard marx"}]) |
|
response = model.chat(tokenizer, messages) |
|
|
|
answer = response[0][0] |
|
print(answer) |
|
writer.writerow(answer) |
|
|
|
|