Spaces:

syedmudassir16 commited on
Commit
0217d37
1 Parent(s): 7a8787b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -156
app.py CHANGED
@@ -18,54 +18,8 @@ from threading import Thread
18
  from llama_index.core import VectorStoreIndex, Document
19
  from llama_index.core.tools import QueryEngineTool, ToolMetadata
20
  from llama_index.agent.openai import OpenAIAgent
21
-
22
-
23
- class Agent:
24
- def __init__(self, name, role, doc_retrieval_gen, tokenizer):
25
- self.name = name
26
- self.role = role
27
- self.doc_retrieval_gen = doc_retrieval_gen
28
- self.tokenizer = tokenizer
29
-
30
- def generate_response(self, query, context):
31
- if self.role == "Information Retrieval":
32
- return self.retriever_logic(query, context)
33
- elif self.role == "Content Analysis":
34
- return self.analyzer_logic(query, context)
35
- elif self.role == "Response Generation":
36
- return self.generator_logic(query, context)
37
- elif self.role == "Task Coordination":
38
- return self.coordinator_logic(query, context)
39
-
40
- def retriever_logic(self, query, all_splits):
41
- query_embedding = self.doc_retrieval_gen.embeddings.encode(query, convert_to_tensor=True).cpu().numpy()
42
- distances, indices = self.doc_retrieval_gen.gpu_index.search(np.array([query_embedding]), k=3)
43
- relevant_docs = [all_splits[i] for i in indices[0] if distances[0][i] <= 1]
44
- return relevant_docs
45
-
46
- def analyzer_logic(self, query, relevant_docs):
47
- analysis_prompt = f"Analyze the following documents in relation to the query: '{query}'\n\nDocuments:\n"
48
- for doc in relevant_docs:
49
- analysis_prompt += f"- {doc.page_content}\n"
50
- analysis_prompt += "\nProvide a concise analysis of the key points relevant to the query."
51
-
52
- input_ids = self.tokenizer.encode(analysis_prompt, return_tensors="pt").to(self.doc_retrieval_gen.model.device)
53
- analysis = self.doc_retrieval_gen.model.generate(input_ids, max_length=200, num_return_sequences=1)
54
- return self.tokenizer.decode(analysis[0], skip_special_tokens=True)
55
-
56
- def generator_logic(self, query, analyzed_content):
57
- generation_prompt = f"Based on the following analysis, generate a comprehensive answer to the query: '{query}'\n\nAnalysis:\n{analyzed_content}\n\nGenerate a detailed response:"
58
-
59
- input_ids = self.tokenizer.encode(generation_prompt, return_tensors="pt").to(self.doc_retrieval_gen.model.device)
60
- response = self.doc_retrieval_gen.model.generate(input_ids, max_length=300, num_return_sequences=1)
61
- return self.tokenizer.decode(response[0], skip_special_tokens=True)
62
-
63
- def coordinator_logic(self, query, final_response):
64
- coordination_prompt = f"As a coordinator, review and refine the following response to the query: '{query}'\n\nResponse:\n{final_response}\n\nProvide a final, polished answer:"
65
-
66
- input_ids = self.tokenizer.encode(coordination_prompt, return_tensors="pt").to(self.doc_retrieval_gen.model.device)
67
- coordinated_response = self.doc_retrieval_gen.model.generate(input_ids, max_length=350, num_return_sequences=1)
68
- return self.tokenizer.decode(coordinated_response[0], skip_special_tokens=True)
69
 
70
  class MultiDocumentAgentSystem:
71
  def __init__(self, documents_dict, llm, embed_model):
@@ -77,8 +31,8 @@ class MultiDocumentAgentSystem:
77
 
78
  def create_document_agents(self, documents_dict):
79
  for doc_name, doc_content in documents_dict.items():
80
- vector_index = VectorStoreIndex.from_documents([Document(doc_content)])
81
- summary_index = VectorStoreIndex.from_documents([Document(doc_content)])
82
 
83
  vector_query_engine = vector_index.as_query_engine(similarity_top_k=2)
84
  summary_query_engine = summary_index.as_query_engine()
@@ -131,27 +85,15 @@ class MultiDocumentAgentSystem:
131
 
132
  def query(self, user_input):
133
  return self.top_agent.chat(user_input)
134
-
135
  class DocumentRetrievalAndGeneration:
136
  def __init__(self, embedding_model_name, lm_model_id, data_folder):
137
- self.all_splits = self.load_documents(data_folder)
138
  self.embeddings = SentenceTransformer(embedding_model_name)
139
- self.gpu_index = self.create_faiss_index()
140
  self.tokenizer, self.model = self.initialize_llm(lm_model_id)
141
- self.agents = self.initialize_agents()
142
- documents_dict = self.load_documents(data_folder)
143
- self.multi_doc_system = MultiDocumentAgentSystem(documents_dict, self.model, self.embeddings)
144
-
145
-
146
-
147
- def initialize_agents(self):
148
- agents = [
149
- Agent("Retriever", "Information Retrieval", self, self.tokenizer),
150
- Agent("Analyzer", "Content Analysis", self, self.tokenizer),
151
- Agent("Generator", "Response Generation", self, self.tokenizer),
152
- Agent("Coordinator", "Task Coordination", self, self.tokenizer)
153
- ]
154
- return agents
155
 
156
  def load_documents(self, folder_path):
157
  documents_dict = {}
@@ -163,15 +105,6 @@ class DocumentRetrievalAndGeneration:
163
  documents_dict[file_name[:-4]] = content # Use filename without .txt as key
164
  return documents_dict
165
 
166
- def create_faiss_index(self):
167
- all_texts = [split.page_content for split in self.all_splits]
168
- embeddings = self.embeddings.encode(all_texts, convert_to_tensor=True).cpu().numpy()
169
- index = faiss.IndexFlatL2(embeddings.shape[1])
170
- index.add(embeddings)
171
- gpu_resource = faiss.StandardGpuResources()
172
- gpu_index = faiss.index_cpu_to_gpu(gpu_resource, 0, index)
173
- return gpu_index
174
-
175
  def initialize_llm(self, model_id):
176
  quantization_config = BitsAndBytesConfig(
177
  load_in_4bit=True,
@@ -184,36 +117,10 @@ class DocumentRetrievalAndGeneration:
184
  model_id,
185
  torch_dtype=torch.bfloat16,
186
  device_map="auto",
187
-
188
  quantization_config=quantization_config
189
  )
190
  return tokenizer, model
191
 
192
- def coordinate_agents(self, query):
193
- coordinator = next(agent for agent in self.agents if agent.name == "Coordinator")
194
-
195
- # Step 1: Information Retrieval
196
- retriever = next(agent for agent in self.agents if agent.name == "Retriever")
197
- relevant_docs = retriever.generate_response(query, self.all_splits)
198
-
199
- # Step 2: Content Analysis
200
- analyzer = next(agent for agent in self.agents if agent.name == "Analyzer")
201
- analyzed_content = analyzer.generate_response(query, relevant_docs)
202
-
203
- # Step 3: Response Generation
204
- generator = next(agent for agent in self.agents if agent.name == "Generator")
205
- final_response = generator.generate_response(query, analyzed_content)
206
-
207
- # Step 4: Coordination and Refinement
208
- coordinated_response = coordinator.generate_response(query, final_response)
209
-
210
- return coordinated_response, "\n".join([doc.page_content for doc in relevant_docs])
211
-
212
- def query_and_generate_response(self, query):
213
- response = self.multi_doc_system.query(query)
214
- return str(response), ""
215
-
216
-
217
  def generate_response_with_timeout(self, input_ids, max_new_tokens=1000):
218
  try:
219
  streamer = TextIteratorStreamer(self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
@@ -241,61 +148,9 @@ class DocumentRetrievalAndGeneration:
241
  print(f"Error in generate_response_with_timeout: {str(e)}")
242
  return "Text generation process encountered an error"
243
 
244
-
245
  def query_and_generate_response(self, query):
246
- similarityThreshold = 1
247
- query_embedding = self.embeddings.encode(query, convert_to_tensor=True).cpu().numpy()
248
- distances, indices = self.gpu_index.search(np.array([query_embedding]), k=3)
249
- print("Distance", distances, "indices", indices)
250
- content = ""
251
- filtered_results = []
252
- for idx, distance in zip(indices[0], distances[0]):
253
- if distance <= similarityThreshold:
254
- filtered_results.append(idx)
255
- for i in filtered_results:
256
- print(self.all_splits[i].page_content)
257
- content += "-" * 50 + "\n"
258
- content += self.all_splits[idx].page_content + "\n"
259
- print("CHUNK", idx)
260
- print("Distance:", distance)
261
- print("indices:", indices)
262
- print(self.all_splits[idx].page_content)
263
- print("############################")
264
-
265
- conversation = [
266
- {"role": "system", "content": "You are a knowledgeable assistant with access to a comprehensive database."},
267
- {"role": "user", "content": f"""
268
- I need you to answer my question and provide related information in a specific format.
269
- I have provided five relatable json files {content}, choose the most suitable chunks for answering the query.
270
- RETURN ONLY SOLUTION without additional comments, sign-offs, retrived chunks, refrence to any Ticket or extra phrases. Be direct and to the point.
271
- IF THERE IS NO ANSWER RELATABLE IN RETRIEVED CHUNKS, RETURN "NO SOLUTION AVAILABLE".
272
- DO NOT GIVE REFRENCE TO ANY CHUNKS OR TICKETS,BE ON POINT.
273
-
274
- Here's my question:
275
- Query: {query}
276
- Solution==>
277
- """}
278
- ]
279
- #Include a final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point.
280
- input_ids = self.tokenizer.apply_chat_template(conversation, return_tensors="pt").to(self.model.device)
281
-
282
- start_time = datetime.now()
283
- generated_response = self.generate_response_with_timeout(input_ids)
284
- elapsed_time = datetime.now() - start_time
285
-
286
- print("Generated response:", generated_response)
287
- print("Time elapsed:", elapsed_time)
288
- print("Device in use:", self.model.device)
289
-
290
- solution_text = generated_response.strip()
291
- if "Solution:" in solution_text:
292
- solution_text = solution_text.split("Solution:", 1)[1].strip()
293
-
294
- # Post-processing to remove "assistant" prefix
295
- solution_text = re.sub(r'^assistant\s*', '', solution_text, flags=re.IGNORECASE)
296
- solution_text = solution_text.strip()
297
-
298
- return solution_text, content
299
 
300
  def qa_infer_gradio(self, query):
301
  response, related_queries = self.query_and_generate_response(query)
 
18
  from llama_index.core import VectorStoreIndex, Document
19
  from llama_index.core.tools import QueryEngineTool, ToolMetadata
20
  from llama_index.agent.openai import OpenAIAgent
21
+ from llama_index.llms.openai import OpenAI
22
+ from llama_index.embeddings.openai import OpenAIEmbedding
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  class MultiDocumentAgentSystem:
25
  def __init__(self, documents_dict, llm, embed_model):
 
31
 
32
  def create_document_agents(self, documents_dict):
33
  for doc_name, doc_content in documents_dict.items():
34
+ vector_index = VectorStoreIndex.from_documents([Document(text=doc_content)])
35
+ summary_index = VectorStoreIndex.from_documents([Document(text=doc_content)])
36
 
37
  vector_query_engine = vector_index.as_query_engine(similarity_top_k=2)
38
  summary_query_engine = summary_index.as_query_engine()
 
85
 
86
  def query(self, user_input):
87
  return self.top_agent.chat(user_input)
88
+
89
  class DocumentRetrievalAndGeneration:
90
  def __init__(self, embedding_model_name, lm_model_id, data_folder):
91
+ self.documents_dict = self.load_documents(data_folder)
92
  self.embeddings = SentenceTransformer(embedding_model_name)
 
93
  self.tokenizer, self.model = self.initialize_llm(lm_model_id)
94
+ self.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
95
+ self.embed_model = OpenAIEmbedding()
96
+ self.multi_doc_system = MultiDocumentAgentSystem(self.documents_dict, self.llm, self.embed_model)
 
 
 
 
 
 
 
 
 
 
 
97
 
98
  def load_documents(self, folder_path):
99
  documents_dict = {}
 
105
  documents_dict[file_name[:-4]] = content # Use filename without .txt as key
106
  return documents_dict
107
 
 
 
 
 
 
 
 
 
 
108
  def initialize_llm(self, model_id):
109
  quantization_config = BitsAndBytesConfig(
110
  load_in_4bit=True,
 
117
  model_id,
118
  torch_dtype=torch.bfloat16,
119
  device_map="auto",
 
120
  quantization_config=quantization_config
121
  )
122
  return tokenizer, model
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  def generate_response_with_timeout(self, input_ids, max_new_tokens=1000):
125
  try:
126
  streamer = TextIteratorStreamer(self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
 
148
  print(f"Error in generate_response_with_timeout: {str(e)}")
149
  return "Text generation process encountered an error"
150
 
 
151
  def query_and_generate_response(self, query):
152
+ response = self.multi_doc_system.query(query)
153
+ return str(response), ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
  def qa_infer_gradio(self, query):
156
  response, related_queries = self.query_and_generate_response(query)