|
from langchain.llms.base import LLM |
|
from typing import Any, List, Optional |
|
from langchain.callbacks.manager import CallbackManagerForLLMRun |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig |
|
from peft import PeftModel |
|
import torch |
|
import jsonlines |
|
import json |
|
import csv |
|
class MiniCPM_LLM(LLM): |
|
|
|
tokenizer : AutoTokenizer = None |
|
model: AutoModelForCausalLM = None |
|
|
|
def __init__(self, model_path :str): |
|
|
|
|
|
super().__init__() |
|
print("正在从本地加载模型...") |
|
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) |
|
self.model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True,torch_dtype=torch.bfloat16, device_map="auto") |
|
self.model = PeftModel.from_pretrained(model = self.model, model_id="/root/lanyun-tmp/output/MiniCPM/checkpoint-9000/") |
|
print("完成本地模型的加载") |
|
|
|
def _call(self, prompt : str, stop: Optional[List[str]] = None, |
|
run_manager: Optional[CallbackManagerForLLMRun] = None, |
|
**kwargs: Any): |
|
|
|
responds, history = self.model.chat(self.tokenizer, prompt, temperature=0, top_p=0.8, repetition_penalty=1.02) |
|
return responds |
|
|
|
@property |
|
def _llm_type(self) -> str: |
|
return "MiniCPM_LLM" |
|
|
|
|
|
llm = MiniCPM_LLM('/root/lanyun-tmp/OpenBMB/MiniCPM-2B-sft-fp32') |
|
|
|
|
|
|
|
filename = '/root/lanyun-tmp/Dataset/test.jsonl' |
|
data = [] |
|
with open(filename, 'r') as f: |
|
for line in f: |
|
item = json.loads(line) |
|
data.append(item) |
|
|
|
|
|
|
|
files = 'MiniCPM2B_answers.csv' |
|
with open(files, 'w', newline='') as csvfile: |
|
writer = csv.writer(csvfile) |
|
|
|
for item in data: |
|
context = item['context'] |
|
question = item['question'] |
|
answer0 = item['answer0'] |
|
answer1 = item['answer1'] |
|
answer2 = item['answer2'] |
|
answer3 = item['answer3'] |
|
message = "<User>As a reading comprehension expert, you will receive context, question and four options. Please understand the context given below first, and then output the label of the correct option as the answer to the question based on the context"+str({'context':{context},'question':{question},"answer0":{answer0},"answer1":{answer1},"answer2":{answer2},"answer3":{answer3}})+"<AI>" |
|
|
|
|
|
answer=llm._call(message) |
|
writer.writerow(answer) |
|
|
|
|