Spaces:

arjunanand13's picture
Create app.py
7f2869e verified
raw
history blame
7.69 kB
import concurrent.futures
import threading
import torch
from datetime import datetime
import json
import gradio as gr
import re
import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
class DocumentRetrievalAndGeneration:
def __init__(self, embedding_model_name, lm_model_id, data_folder):
self.all_splits = self.load_documents(data_folder)
self.embeddings = SentenceTransformer(embedding_model_name)
self.gpu_index = self.create_faiss_index()
self.llm = self.initialize_llm(lm_model_id)
self.cancel_flag = threading.Event()
def load_documents(self, folder_path):
loader = DirectoryLoader(folder_path, loader_cls=TextLoader)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250)
all_splits = text_splitter.split_documents(documents)
print('Length of documents:', len(documents))
print("LEN of all_splits", len(all_splits))
for i in range(5):
print(all_splits[i].page_content)
return all_splits
def create_faiss_index(self):
all_texts = [split.page_content for split in self.all_splits]
embeddings = self.embeddings.encode(all_texts, convert_to_tensor=True).cpu().numpy()
index = faiss.IndexFlatL2(embeddings.shape[1])
index.add(embeddings)
gpu_resource = faiss.StandardGpuResources()
gpu_index = faiss.index_cpu_to_gpu(gpu_resource, 0, index)
return gpu_index
def initialize_llm(self, model_id):
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
tokenizer = AutoTokenizer.from_pretrained(model_id)
generate_text = pipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True,
task='text-generation',
temperature=0.6,
max_new_tokens=256,
)
return generate_text
def generate_response_with_timeout(self, model_inputs):
def target(future):
if self.cancel_flag.is_set():
return
generated_ids = self.llm.model.generate(model_inputs, max_new_tokens=1000, do_sample=True)
if not self.cancel_flag.is_set():
future.set_result(generated_ids)
else:
future.set_exception(TimeoutError("Text generation process was canceled"))
future = concurrent.futures.Future()
thread = threading.Thread(target=target, args=(future,))
thread.start()
try:
generated_ids = future.result(timeout=60) # Timeout set to 60 seconds
return generated_ids
except concurrent.futures.TimeoutError:
self.cancel_flag.set()
raise TimeoutError("Text generation process timed out")
def qa_infer_gradio(self, query):
# Set the cancel flag to false for the new query
self.cancel_flag.clear()
try:
query_embedding = self.embeddings.encode(query, convert_to_tensor=True).cpu().numpy()
distances, indices = self.gpu_index.search(np.array([query_embedding]), k=5)
content = ""
for idx in indices[0]:
content += "-" * 50 + "\n"
content += self.all_splits[idx].page_content + "\n"
prompt = f"""<s>
Here's my question:
Query: {query}
Solution:
RETURN ONLY SOLUTION. IF THERE IS NO ANSWER RELATABLE IN RETRIEVED CHUNKS, RETURN "NO SOLUTION AVAILABLE"
</s>
"""
messages = [{"role": "user", "content": prompt}]
encodeds = self.llm.tokenizer.apply_chat_template(messages, return_tensors="pt")
model_inputs = encodeds.to(self.llm.device)
start_time = datetime.now()
generated_ids = self.generate_response_with_timeout(model_inputs)
elapsed_time = datetime.now() - start_time
decoded = self.llm.tokenizer.batch_decode(generated_ids)
generated_response = decoded[0]
match = re.search(r'Solution:(.*?)</s>', generated_response, re.DOTALL | re.IGNORECASE)
if match:
solution_text = match.group(1).strip()
else:
solution_text = "NO SOLUTION AVAILABLE"
print("Generated response:", generated_response)
print("Time elapsed:", elapsed_time)
print("Device in use:", self.llm.device)
return solution_text, content
except TimeoutError:
return "timeout", content
if __name__ == "__main__":
# Example usage
embedding_model_name = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L12'
lm_model_id = "mistralai/Mistral-7B-Instruct-v0.2"
data_folder = 'sample_embedding_folder2'
doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder)
def launch_interface():
css_code = """
.gradio-container {
background-color: #daccdb;
}
/* Button styling for all buttons */
button {
background-color: #927fc7; /* Default color for all other buttons */
color: black;
border: 1px solid black;
padding: 10px;
margin-right: 10px;
font-size: 16px; /* Increase font size */
font-weight: bold; /* Make text bold */
}
"""
EXAMPLES = [
"On which devices can the VIP and CSI2 modules operate simultaneously?",
"I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?",
"Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?"
]
file_path = "ticketNames.txt"
with open(file_path, "r") as file:
content = file.read()
ticket_names = json.loads(content)
dropdown = gr.Dropdown(label="Sample queries", choices=ticket_names)
tab1 = gr.Interface(
fn=doc_retrieval_gen.qa_infer_gradio,
inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")],
allow_flagging='never',
examples=EXAMPLES,
cache_examples=False,
outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")],
css=css_code
)
tab2 = gr.Interface(
fn=doc_retrieval_gen.qa_infer_gradio,
inputs=[dropdown],
allow_flagging='never',
outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")],
css=css_code
)
gr.TabbedInterface(
[tab1, tab2],
["Textbox Input", "FAQs"],
title="TI E2E FORUM",
css=css_code
).launch(debug=True)
launch_interface()