rag / app.py
seawolf2357's picture
Update app.py
d2de08e verified
raw
history blame
3.16 kB
import os
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer
from datasets import load_dataset
import faiss
import gradio as gr
from accelerate import Accelerator
# ν™˜κ²½ λ³€μˆ˜μ—μ„œ Hugging Face API ν‚€ λ‘œλ“œ
hf_api_key = os.getenv('HF_API_KEY')
# λͺ¨λΈ ID 및 ν† ν¬λ‚˜μ΄μ € μ„€μ •
model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_api_key)
accelerator = Accelerator()
# μ–‘μžν™” μ„€μ • 없이 λͺ¨λΈ λ‘œλ“œ (문제 해결을 μœ„ν•œ μž„μ‹œ 쑰치)
model = AutoModelForCausalLM.from_pretrained(
model_id,
token=hf_api_key,
torch_dtype=torch.float32 # κΈ°λ³Έ dtype μ‚¬μš©
)
model = accelerator.prepare(model)
# 데이터 λ‘œλ”© 및 faiss 인덱슀 생성
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
dataset = load_dataset("not-lain/wikipedia", revision="embedded")
data = dataset["train"]
data = data.add_faiss_index("embeddings")
# 기타 ν•¨μˆ˜ 및 Gradio μΈν„°νŽ˜μ΄μŠ€ ꡬ성은 이전과 동일
# Define functions for search, prompt formatting, and generation
def search(query: str, k: int = 3):
embedded_query = ST.encode(query)
scores, retrieved_examples = data.get_nearest_examples("embeddings", embedded_query, k=k)
return scores, retrieved_examples
def format_prompt(prompt, retrieved_documents, k):
PROMPT = f"Question:{prompt}\nContext:"
for idx in range(k):
PROMPT += f"{retrieved_documents['text'][idx]}\n"
return PROMPT
def generate(formatted_prompt):
formatted_prompt = formatted_prompt[:2000] # Limit due to GPU memory constraints
messages = [{"role": "system", "content": "You are an assistant..."}, {"role": "user", "content": formatted_prompt}]
input_ids = tokenizer(messages, return_tensors="pt", padding=True).input_ids.to(accelerator.device)
outputs = model.generate(
input_ids,
max_new_tokens=1024,
eos_token_id=tokenizer.eos_token_id,
do_sample=True,
temperature=0.6,
top_p=0.9
)
return tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)
def rag_chatbot_interface(prompt: str, k: int = 2):
scores, retrieved_documents = search(prompt, k)
formatted_prompt = format_prompt(prompt, retrieved_documents, k)
return generate(formatted_prompt)
# Define system prompt for the chatbot
SYS_PROMPT = "You are an assistant for answering questions. You are given the extracted parts of a long document and a question. Provide a conversational answer. If you don't know the answer, just say 'I do not know.' Don't make up an answer."
# Set up Gradio interface
iface = gr.Interface(
fn=rag_chatbot_interface,
inputs=gr.inputs.Textbox(label="Enter your question"),
outputs=gr.outputs.Textbox(label="Answer"),
title="Retrieval-Augmented Generation Chatbot",
description="This chatbot uses a retrieval-augmented generation approach to provide more accurate answers. It first searches for relevant documents and then generates a response based on the prompt and the retrieved documents."
)
iface.launch()