File size: 3,781 Bytes
68d4493
 
273a3d0
d72c4e8
 
 
 
c1f3388
68d4493
273a3d0
 
 
d72c4e8
273a3d0
 
 
 
 
 
 
 
 
 
68d4493
273a3d0
d72c4e8
 
 
273a3d0
d72c4e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273a3d0
d72c4e8
273a3d0
d72c4e8
273a3d0
68d4493
d72c4e8
 
 
 
 
 
 
68d4493
d72c4e8
c1f3388
 
 
 
 
 
273a3d0
c1f3388
 
 
273a3d0
c1f3388
 
 
 
 
273a3d0
c1f3388
 
 
 
273a3d0
68d4493
273a3d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68d4493
 
273a3d0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from PyPDF2 import PdfReader
import google.generativeai as genai
import os
from langsmith import Client
from ragas.metrics import faithfulness, answer_relevancy, context_relevancy

# 更新的 langchain_community 導入
from langchain_community.llms import OpenAI  # 示例導入

# 加載模型
openelm_model = AutoModelForCausalLM.from_pretrained(
    "apple/OpenELM-270M", 
    trust_remote_code=True
)

# 加載 tokenizer,確保 trust_remote_code=True
openelm_tokenizer = AutoTokenizer.from_pretrained(
    "apple/OpenELM-270M", 
    trust_remote_code=True
)

# 設置 Gemini API
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=GOOGLE_API_KEY)

# 設置 LangSmith
os.environ["LANGCHAIN_API_KEY"] = "your_langchain_api_key"
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
client = Client()

def extract_text_from_pdf(pdf_path):
    with open(pdf_path, 'rb') as file:
        reader = PdfReader(file)
        text = ""
        for page in reader.pages:
            text += page.extract_text() + "\n"
    return text

def gemini_generate(prompt, max_tokens):
    model = genai.GenerativeModel('gemini-pro')
    response = model.generate_content(prompt, max_output_tokens=max_tokens)
    return response.text

def openelm_generate(prompt, max_tokens):
    tokenized_prompt = openelm_tokenizer(prompt, return_tensors="pt")
    output_ids = openelm_model.generate(
        tokenized_prompt["input_ids"],
        max_length=max_tokens,
        pad_token_id=0,
    )
    return openelm_tokenizer.decode(output_ids[0], skip_special_tokens=True)

def evaluate_response(response, context, query):
    faith_score = faithfulness.score([response], [context], [query])
    ans_rel_score = answer_relevancy.score([response], [query])
    ctx_rel_score = context_relevancy.score([response], [context], [query])
    return faith_score, ans_rel_score, ctx_rel_score

def process_query(pdf_file, llm_choice, query, max_tokens, api_key):
    try:
        global GOOGLE_API_KEY
        if api_key:
            GOOGLE_API_KEY = api_key
            genai.configure(api_key=GOOGLE_API_KEY)
        
        # 從 PDF 提取文本
        pdf_path = pdf_file.name
        context = extract_text_from_pdf(pdf_path)
        
        # 根據選擇的 LLM 生成回應
        if llm_choice == "Gemini":
            response = gemini_generate(f"上下文: {context}\n問題: {query}", max_tokens)
        else:  # OpenELM
            response = openelm_generate(f"上下文: {context}\n問題: {query}", max_tokens)
        
        # 評估回應
        faith_score, ans_rel_score, ctx_rel_score = evaluate_response(response, context, query)
        
        return response, faith_score, ans_rel_score, ctx_rel_score
    except Exception as e:
        return str(e), 0, 0, 0  # 返回錯誤消息和零分數

# Gradio 介面
iface = gr.Interface(
    fn=process_query,
    inputs=[
        gr.File(label="上傳 PDF"),
        gr.Dropdown(["Gemini", "OpenELM"], label="選擇 LLM"),
        gr.Textbox(label="輸入您的問題"),
        gr.Slider(minimum=50, maximum=1000, step=50, label="最大令牌數"),
        gr.Textbox(label="Gemini API 金鑰 (可選)", type="password")
    ],
    outputs=[
        gr.Textbox(label="生成的答案"),
        gr.Number(label="真實性得分"),
        gr.Number(label="答案相關性得分"),
        gr.Number(label="上下文相關性得分")
    ],
    title="多模型 LLM 查詢介面,支持 PDF 上下文",
    description="上傳 PDF,選擇 LLM,並提出問題。回應將使用 RAGAS 指標進行評估。"
)

if __name__ == "__main__":
    iface.launch()