Spaces:
Runtime error
Runtime error
import gradio as gr | |
from datasets import load_dataset | |
import os | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig | |
import torch | |
from threading import Thread | |
from sentence_transformers import SentenceTransformer | |
from datasets import load_dataset | |
import time | |
token = os.environ["HF_TOKEN"] | |
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1") | |
art_dataset= load_dataset("hichri-mo/arxiver-1000",revision="embedded") | |
data = art_dataset["train"] | |
data = data.add_faiss_index("embeddings") | |
model_id= "Qwen/Qwen2.5-3B-Instruct" | |
# use quantization to lower GPU usage | |
bnb_config = BitsAndBytesConfig( | |
load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 | |
) | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
torch_dtype=torch.bfloat16, | |
device_map="auto", | |
quantization_config=bnb_config | |
) | |
terminators = [ | |
tokenizer.eos_token_id, | |
tokenizer.convert_tokens_to_ids("<|eot_id|>") | |
] | |
SYS_PROMPT = """You are an assistant for answering questions. | |
You are given the extracted parts of a long document and a question. Provide a conversational answer. | |
If you don't know the answer, just say "I do not know." Don't make up an answer.""" | |
def format_prompt(prompt,retrieved_documents,k): | |
"""using the retrieved documents we will prompt the model to generate our responses""" | |
PROMPT = f"Question: {prompt}\nContext: \n" | |
for idx in range(k) : | |
PROMPT+= f"{retrieved_documents['markdown'][idx]}\n" | |
return PROMPT | |
def generate(formatted_prompt): | |
formatted_prompt = formatted_prompt[:2000] # to avoid GPU OOM | |
messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}] | |
# tell the model to generate | |
input_ids = tokenizer.apply_chat_template( | |
messages, | |
add_generation_prompt=True, | |
return_tensors="pt" | |
).to(model.device) | |
# Check if terminators contain None and replace with tokenizer.eos_token_id | |
eos_token_id = terminators[0] # Default to tokenizer.eos_token_id | |
if terminators[1] is not None: | |
eos_token_id = terminators[1] # Use "<|eot_id|>" if it exists | |
outputs = model.generate( | |
input_ids, | |
max_new_tokens=1024, | |
eos_token_id=eos_token_id, # Pass a single integer value | |
do_sample=True, | |
temperature=0.6, | |
top_p=0.9, | |
) | |
response = outputs[0][input_ids.shape[-1]:] | |
return tokenizer.decode(response, skip_special_tokens=True) | |
def rag_chatbot(prompt:str,k:int=2): | |
scores , retrieved_documents = search(prompt, k) | |
formatted_prompt = format_prompt(prompt,retrieved_documents,k) | |
return generate(formatted_prompt) | |
def rag_chatbot_interface(prompt, k): | |
return rag_chatbot(prompt, k) | |
iface = gr.Interface( | |
fn=rag_chatbot_interface, | |
inputs=[ | |
gr.Textbox(label="Enter your question"), | |
gr.Slider(minimum=1, maximum=10, step=1, value=2, label="Number of documents to retrieve") | |
], | |
outputs=gr.Textbox(label="Response"), | |
title="Chatbot with RAG", | |
description="Ask questions and get answers based on retrieved documents." | |
) | |
iface.launch() | |