Spaces:
Runtime error
Runtime error
"Single Thread" | |
import os | |
import multiprocessing | |
import concurrent.futures | |
from langchain.document_loaders import TextLoader, DirectoryLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.vectorstores import FAISS | |
from sentence_transformers import SentenceTransformer | |
import faiss | |
import torch | |
import numpy as np | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig | |
from datetime import datetime | |
import json | |
import gradio as gr | |
import re | |
class DocumentRetrievalAndGeneration: | |
def __init__(self, embedding_model_name, lm_model_id, data_folder): | |
self.all_splits = self.load_documents(data_folder) | |
self.embeddings = SentenceTransformer(embedding_model_name) | |
self.gpu_index = self.create_faiss_index() | |
self.llm = self.initialize_llm(lm_model_id) | |
def load_documents(self, folder_path): | |
loader = DirectoryLoader(folder_path, loader_cls=TextLoader) | |
documents = loader.load() | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250) | |
all_splits = text_splitter.split_documents(documents) | |
print('Length of documents:', len(documents)) | |
print("LEN of all_splits", len(all_splits)) | |
for i in range(5): | |
print(all_splits[i].page_content) | |
return all_splits | |
def create_faiss_index(self): | |
all_texts = [split.page_content for split in self.all_splits] | |
embeddings = self.embeddings.encode(all_texts, convert_to_tensor=True).cpu().numpy() | |
index = faiss.IndexFlatL2(embeddings.shape[1]) | |
index.add(embeddings) | |
gpu_resource = faiss.StandardGpuResources() | |
gpu_index = faiss.index_cpu_to_gpu(gpu_resource, 0, index) | |
return gpu_index | |
def initialize_llm(self, model_id): | |
bnb_config = BitsAndBytesConfig( | |
load_in_4bit=True, | |
bnb_4bit_use_double_quant=True, | |
bnb_4bit_quant_type="nf4", | |
bnb_4bit_compute_dtype=torch.bfloat16 | |
) | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config) | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
generate_text = pipeline( | |
model=model, | |
tokenizer=tokenizer, | |
return_full_text=True, | |
task='text-generation', | |
temperature=0.6, | |
max_new_tokens=256, | |
) | |
return generate_text | |
def generate_response_with_timeout(self, model_inputs): | |
try: | |
with concurrent.futures.ThreadPoolExecutor() as executor: | |
future = executor.submit(self.llm.model.generate, model_inputs, max_new_tokens=1000, do_sample=True) | |
generated_ids = future.result(timeout=60) # Timeout set to 60 seconds | |
return generated_ids | |
except concurrent.futures.TimeoutError: | |
return "Text generation process timed out" | |
raise TimeoutError("Text generation process timed out") | |
def query_and_generate_response(self, query): | |
query_embedding = self.embeddings.encode(query, convert_to_tensor=True).cpu().numpy() | |
distances, indices = self.gpu_index.search(np.array([query_embedding]), k=5) | |
content = "" | |
for idx, distance in zip(indices[0], distances[0]): | |
content += "-" * 50 + "\n" | |
content += self.all_splits[idx].page_content + "\n" | |
print("CHUNK", idx) | |
print("Distance:", distance) | |
print(self.all_splits[idx].page_content) | |
print("############################") | |
# for idx in indices[0]: | |
# content += "-" * 50 + "\n" | |
# content += self.all_splits[idx].page_content + "\n" | |
# distance=distances[0][i] | |
# print("CHUNK", idx) | |
# print("Distance :",distance) | |
# print(self.all_splits[idx].page_content) | |
# print("############################") | |
prompt = f"""<s> | |
You are a knowledgeable assistant with access to a comprehensive database. | |
I need you to answer my question and provide related information in a specific format. | |
I have provided five relatable json files {content}, choose the most suitable chunks for answering the query | |
Here's what I need: | |
Include a final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point. | |
content | |
Here's my question: | |
Query:{query} | |
Solution==> | |
RETURN ONLY SOLUTION . IF THEIR IS NO ANSWER RELATABLE IN RETRIEVED CHUNKS , RETURN " NO SOLUTION AVAILABLE" | |
IF THE QUERY AND THE RETRIEVED CHUNKS DO NOT CORRELATE MEANINGFULLY, OR IF THE QUERY IS NOT RELEVANT TO TDA2 OR RELATED TOPICS, THEN "NO SOLUTION AVAILABLE." | |
Example1 | |
Query: "How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM", | |
Solution: "To use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM, you need to modify the configuration file of the NDK application. Specifically, change the processor reference from 'A15_0' to 'IPU1_0'.", | |
Example2 | |
Query: "Can BQ25896 support I2C interface?", | |
Solution: "Yes, the BQ25896 charger supports the I2C interface for communication." | |
Example3 | |
Query: "Who is the fastest runner in the world", | |
Solution:"NO SOLUTION AVAILABLE" | |
Example4 | |
Query:"What is the price of latest apple MACBOOK " | |
Solution:"NO SOLUTION AVAILABLE" | |
</s> | |
""" | |
# prompt = f"Query: {query}\nSolution: {content}\n" | |
# Encode and prepare inputs | |
messages = [{"role": "user", "content": prompt}] | |
encodeds = self.llm.tokenizer.apply_chat_template(messages, return_tensors="pt") | |
model_inputs = encodeds.to(self.llm.device) | |
# Perform inference and measure time | |
start_time = datetime.now() | |
generated_ids = self.generate_response_with_timeout(model_inputs) | |
# generated_ids = self.llm.model.generate(model_inputs, max_new_tokens=1000, do_sample=True) | |
elapsed_time = datetime.now() - start_time | |
# Decode and return output | |
decoded = self.llm.tokenizer.batch_decode(generated_ids) | |
generated_response = decoded[0] | |
match1 = re.search(r'\[/INST\](.*?)</s>', generated_response, re.DOTALL) | |
match2 = re.search(r'Solution:(.*?)</s>', generated_response, re.DOTALL | re.IGNORECASE) | |
if match1: | |
solution_text = match1.group(1).strip() | |
print(solution_text) | |
if "Solution:" in solution_text: | |
solution_text = solution_text.split("Solution:", 1)[1].strip() | |
elif match2: | |
solution_text = match2.group(1).strip() | |
print(solution_text) | |
else: | |
solution_text=generated_response | |
print("Generated response:", generated_response) | |
print("Time elapsed:", elapsed_time) | |
print("Device in use:", self.llm.device) | |
return solution_text, content | |
def qa_infer_gradio(self, query): | |
response = self.query_and_generate_response(query) | |
return response | |
if __name__ == "__main__": | |
# Example usage | |
embedding_model_name = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L12' | |
lm_model_id = "mistralai/Mistral-7B-Instruct-v0.2" | |
data_folder = 'sample_embedding_folder2' | |
doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder) | |
"""Dual Interface""" | |
def launch_interface(): | |
css_code = """ | |
.gradio-container { | |
background-color: #daccdb; | |
} | |
/* Button styling for all buttons */ | |
button { | |
background-color: #927fc7; /* Default color for all other buttons */ | |
color: black; | |
border: 1px solid black; | |
padding: 10px; | |
margin-right: 10px; | |
font-size: 16px; /* Increase font size */ | |
font-weight: bold; /* Make text bold */ | |
} | |
""" | |
EXAMPLES = [ | |
"On which devices can the VIP and CSI2 modules operate simultaneously?", | |
"I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?", | |
"Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?" | |
] | |
file_path = "ticketNames.txt" | |
# Read the file content | |
with open(file_path, "r") as file: | |
content = file.read() | |
ticket_names = json.loads(content) | |
dropdown = gr.Dropdown(label="Sample queries", choices=ticket_names) | |
# Define Gradio interfaces | |
tab1 = gr.Interface( | |
fn=doc_retrieval_gen.qa_infer_gradio, | |
inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")], | |
allow_flagging='never', | |
examples=EXAMPLES, | |
cache_examples=False, | |
outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")], | |
css=css_code | |
) | |
tab2 = gr.Interface( | |
fn=doc_retrieval_gen.qa_infer_gradio, | |
inputs=[dropdown], | |
allow_flagging='never', | |
outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")], | |
css=css_code | |
) | |
# Combine interfaces into a tabbed interface | |
gr.TabbedInterface( | |
[tab1, tab2], | |
["Textbox Input", "FAQs"], | |
title="TI E2E FORUM", | |
css=css_code | |
).launch(debug=True) | |
# Launch the interface | |
launch_interface() | |
"""Single Interface""" | |
# def launch_interface(): | |
# css_code = """ | |
# .gradio-container { | |
# background-color: #daccdb; | |
# } | |
# /* Button styling for all buttons */ | |
# button { | |
# background-color: #927fc7; /* Default color for all other buttons */ | |
# color: black; | |
# border: 1px solid black; | |
# padding: 10px; | |
# margin-right: 10px; | |
# font-size: 16px; /* Increase font size */ | |
# font-weight: bold; /* Make text bold */ | |
# } | |
# """ | |
# EXAMPLES = ["On which devices can the VIP and CSI2 modules operate simultaneously? ", | |
# "I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?", | |
# "Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?"] | |
# file_path = "ticketNames.txt" | |
# # Read the file content | |
# with open(file_path, "r") as file: | |
# content = file.read() | |
# ticket_names = json.loads(content) | |
# dropdown = gr.Dropdown(label="Sample queries", choices=ticket_names) | |
# # Define Gradio interface | |
# interface = gr.Interface( | |
# fn=doc_retrieval_gen.qa_infer_gradio, | |
# inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")], | |
# allow_flagging='never', | |
# examples=EXAMPLES, | |
# cache_examples=False, | |
# outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")], | |
# css=css_code | |
# ) | |
# # Launch Gradio interface | |
# interface.launch(debug=True) | |
# # Launch the interface | |
# launch_interface() | |