import os import multiprocessing import concurrent.futures from langchain.document_loaders import TextLoader, DirectoryLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import FAISS from sentence_transformers import SentenceTransformer import faiss import torch import numpy as np from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig from datetime import datetime import json import gradio as gr import re from threading import Thread from transformers.agents import Tool, HfEngine, ReactJsonAgent class DocumentRetrievalAndGeneration: def __init__(self, embedding_model_name, lm_model_id, data_folder): self.all_splits = self.load_documents(data_folder) self.embeddings = SentenceTransformer(embedding_model_name) self.gpu_index = self.create_faiss_index() self.tokenizer, self.model = self.initialize_llm(lm_model_id) self.retriever_tool = self.create_retriever_tool() self.agent = self.create_agent() def load_documents(self, folder_path): loader = DirectoryLoader(folder_path, loader_cls=TextLoader) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250) all_splits = text_splitter.split_documents(documents) print('Length of documents:', len(documents)) print("LEN of all_splits", len(all_splits)) for i in range(3): print(all_splits[i].page_content) return all_splits def create_faiss_index(self): all_texts = [split.page_content for split in self.all_splits] embeddings = self.embeddings.encode(all_texts, convert_to_tensor=True).cpu().numpy() index = faiss.IndexFlatL2(embeddings.shape[1]) index.add(embeddings) gpu_resource = faiss.StandardGpuResources() gpu_index = faiss.index_cpu_to_gpu(gpu_resource, 0, index) return gpu_index def initialize_llm(self, model_id): quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map="auto", quantization_config=quantization_config ) return tokenizer, model def generate_response_with_timeout(self, input_ids, max_new_tokens=1000): try: streamer = TextIteratorStreamer(self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True) generate_kwargs = dict( input_ids=input_ids, max_new_tokens=max_new_tokens, do_sample=True, top_p=1.0, top_k=20, temperature=0.8, repetition_penalty=1.2, eos_token_id=[128001, 128008, 128009], streamer=streamer, ) thread = Thread(target=self.model.generate, kwargs=generate_kwargs) thread.start() generated_text = "" for new_text in streamer: generated_text += new_text return generated_text except Exception as e: print(f"Error in generate_response_with_timeout: {str(e)}") return "Text generation process encountered an error" def create_retriever_tool(self): class RetrieverTool(Tool): name = "retriever" description = "Retrieves documents from the knowledge base that are semantically similar to the input query." inputs = { "query": { "type": "text", "description": "The query to perform. Use affirmative form rather than a question.", } } output_type = "text" def __init__(self, parent, **kwargs): super().__init__(**kwargs) self.parent = parent def forward(self, query: str) -> str: similarityThreshold = 1 query_embedding = self.parent.embeddings.encode(query, convert_to_tensor=True).cpu().numpy() distances, indices = self.parent.gpu_index.search(np.array([query_embedding]), k=3) content = "" for idx, distance in zip(indices[0], distances[0]): if distance <= similarityThreshold: content += "-" * 50 + "\n" content += self.parent.all_splits[idx].page_content + "\n" return content return RetrieverTool(self) def create_agent(self): llm_engine = HfEngine("meta-llama/Meta-Llama-3.1-8B-Instruct") return ReactJsonAgent(tools=[self.retriever_tool], llm_engine=llm_engine, max_iterations=4, verbose=2) def run_agentic_rag(self, question: str) -> str: enhanced_question = f"""Using the information in your knowledge base, accessible with the 'retriever' tool, give a comprehensive answer to the question below. Respond only to the question asked, be concise and relevant. If you can't find information, try calling your retriever again with different arguments. Make sure to cover the question completely by calling the retriever tool several times with semantically different queries. Your queries should be in affirmative form, not questions. Question: {question}""" return self.agent.run(enhanced_question) def query_and_generate_response(self, query): # Standard RAG similarityThreshold = 1 query_embedding = self.embeddings.encode(query, convert_to_tensor=True).cpu().numpy() distances, indices = self.gpu_index.search(np.array([query_embedding]), k=3) print("Distance", distances, "indices", indices) content = "" filtered_results = [] for idx, distance in zip(indices[0], distances[0]): if distance <= similarityThreshold: filtered_results.append(idx) for i in filtered_results: print(self.all_splits[i].page_content) content += "-" * 50 + "\n" content += self.all_splits[idx].page_content + "\n" print("CHUNK", idx) print("Distance:", distance) print("indices:", indices) print(self.all_splits[idx].page_content) print("############################") conversation = [ {"role": "system", "content": "You are a knowledgeable assistant with access to a comprehensive database."}, {"role": "user", "content": f""" I need you to answer my question and provide related information in a specific format. I have provided five relatable json files {content}, choose the most suitable chunks for answering the query. RETURN ONLY SOLUTION without additional comments, sign-offs, retrived chunks, refrence to any Ticket or extra phrases. Be direct and to the point. IF THERE IS NO ANSWER RELATABLE IN RETRIEVED CHUNKS, RETURN "NO SOLUTION AVAILABLE". DO NOT GIVE REFRENCE TO ANY CHUNKS OR TICKETS,BE ON POINT. Here's my question: Query: {query} Solution==> """} ] input_ids = self.tokenizer.apply_chat_template(conversation, return_tensors="pt").to(self.model.device) start_time = datetime.now() standard_response = self.generate_response_with_timeout(input_ids) elapsed_time = datetime.now() - start_time print("Generated standard response:", standard_response) print("Time elapsed:", elapsed_time) print("Device in use:", self.model.device) standard_solution_text = standard_response.strip() if "Solution:" in standard_solution_text: standard_solution_text = standard_solution_text.split("Solution:", 1)[1].strip() # Post-processing to remove "assistant" prefix standard_solution_text = re.sub(r'^assistant\s*', '', standard_solution_text, flags=re.IGNORECASE) standard_solution_text = standard_solution_text.strip() # Agentic RAG agentic_solution_text = self.run_agentic_rag(query) combined_solution = f"Standard RAG Solution:\n{standard_solution_text}\n\nAgentic RAG Solution:\n{agentic_solution_text}" return combined_solution, content def qa_infer_gradio(self, query): response = self.query_and_generate_response(query) return response if __name__ == "__main__": embedding_model_name = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L12' lm_model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct" data_folder = 'sample_embedding_folder2' doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder) def launch_interface(): css_code = """ .gradio-container { background-color: #daccdb; } button { background-color: #927fc7; color: black; border: 1px solid black; padding: 10px; margin-right: 10px; font-size: 16px; font-weight: bold; } """ EXAMPLES = [ "On which devices can the VIP and CSI2 modules operate simultaneously?", "I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?", "Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?" ] interface = gr.Interface( fn=doc_retrieval_gen.qa_infer_gradio, inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")], allow_flagging='never', examples=EXAMPLES, cache_examples=False, outputs=[gr.Textbox(label="RESPONSE"), gr.Textbox(label="RELATED QUERIES")], css=css_code, title="TI E2E FORUM" ) interface.launch(debug=True) launch_interface()