Shreyas094 commited on
Commit
63d903a
1 Parent(s): a2c0e0e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -64
app.py CHANGED
@@ -1,16 +1,103 @@
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceApi
3
- from duckduckgo_search import DDGS
4
  import requests
5
- import json
6
  from typing import List
7
  from pydantic import BaseModel, Field
8
- import os
 
 
 
 
9
 
10
  # Environment variables and configurations
11
  huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- # Function to perform a DuckDuckGo search
14
  def duckduckgo_search(query):
15
  with DDGS() as ddgs:
16
  results = ddgs.text(query, max_results=5)
@@ -23,81 +110,106 @@ class CitingSources(BaseModel):
23
  )
24
 
25
  def get_response_with_search(query):
26
- # Perform the web search
27
  search_results = duckduckgo_search(query)
28
-
29
- # Use the search results as context for the model
30
  context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
31
  for result in search_results if 'body' in result)
32
 
33
- # Prompt formatted for Mistral-7B-Instruct
34
  prompt = f"""<s>[INST] Using the following context:
35
  {context}
36
  Write a detailed and complete research document that fulfills the following user request: '{query}'
37
  After writing the document, please provide a list of sources used in your response. [/INST]"""
38
 
39
- # API endpoint for Mistral-7B-Instruct-v0.3
40
- API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
41
 
42
- # Headers
43
- headers = {"Authorization": f"Bearer {huggingface_token}"}
 
44
 
45
- # Payload
46
- payload = {
47
- "inputs": prompt,
48
- "parameters": {
49
- "max_new_tokens": 1000,
50
- "temperature": 0.7,
51
- "top_p": 0.95,
52
- "top_k": 40,
53
- "repetition_penalty": 1.1
54
- }
55
- }
56
 
57
- # Make the API call
58
- response = requests.post(API_URL, headers=headers, json=payload)
59
-
60
- if response.status_code == 200:
61
- result = response.json()
62
- if isinstance(result, list) and len(result) > 0:
63
- generated_text = result[0].get('generated_text', 'No text generated')
64
-
65
- # Remove the instruction part
66
- content_start = generated_text.find("[/INST]")
67
- if content_start != -1:
68
- generated_text = generated_text[content_start + 7:].strip()
69
-
70
- # Split the response into main content and sources
71
- parts = generated_text.split("Sources:", 1)
72
- main_content = parts[0].strip()
73
- sources = parts[1].strip() if len(parts) > 1 else ""
74
-
75
- return main_content, sources
76
- else:
77
- return f"Unexpected response format: {result}", ""
78
  else:
79
- return f"Error: API returned status code {response.status_code}", ""
 
 
 
 
 
 
 
 
 
80
 
81
- def chatbot_interface(message, history):
82
- main_content, sources = get_response_with_search(message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  formatted_response = f"{main_content}\n\nSources:\n{sources}"
84
  return formatted_response
85
 
86
- # Gradio chatbot interface
87
- iface = gr.ChatInterface(
88
- fn=chatbot_interface,
89
- title="AI-powered Web Search Assistant",
90
- description="Ask questions, and I'll search the web and provide answers using the Mistral-7B-Instruct model.",
91
- examples=[
92
- ["What are the latest developments in AI?"],
93
- ["Tell me about recent updates on GitHub"],
94
- ["What are the best hotels in Galapagos, Ecuador?"],
95
- ["Summarize recent advancements in Python programming"],
96
- ],
97
- retry_btn="Retry",
98
- undo_btn="Undo",
99
- clear_btn="Clear",
100
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
  if __name__ == "__main__":
103
- iface.launch(share=True)
 
1
+ import os
2
+ import json
3
+ import re
4
  import gradio as gr
 
 
5
  import requests
6
+ from duckduckgo_search import DDGS
7
  from typing import List
8
  from pydantic import BaseModel, Field
9
+ from tempfile import NamedTemporaryFile
10
+ from langchain_community.vectorstores import FAISS
11
+ from langchain_community.document_loaders import PyPDFLoader
12
+ from langchain_community.embeddings import HuggingFaceEmbeddings
13
+ from llama_parse import LlamaParse
14
 
15
  # Environment variables and configurations
16
  huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
17
+ llama_cloud_api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
18
+
19
+ # Initialize LlamaParse
20
+ llama_parser = LlamaParse(
21
+ api_key=llama_cloud_api_key,
22
+ result_type="markdown",
23
+ num_workers=4,
24
+ verbose=True,
25
+ language="en",
26
+ )
27
+
28
+ def load_document(file: NamedTemporaryFile, parser: str = "pypdf") -> List[dict]:
29
+ """Loads and splits the document into pages."""
30
+ if parser == "pypdf":
31
+ loader = PyPDFLoader(file.name)
32
+ return loader.load_and_split()
33
+ elif parser == "llamaparse":
34
+ try:
35
+ documents = llama_parser.load_data(file.name)
36
+ return [{"page_content": doc.text, "metadata": {"source": file.name}} for doc in documents]
37
+ except Exception as e:
38
+ print(f"Error using Llama Parse: {str(e)}")
39
+ print("Falling back to PyPDF parser")
40
+ loader = PyPDFLoader(file.name)
41
+ return loader.load_and_split()
42
+ else:
43
+ raise ValueError("Invalid parser specified. Use 'pypdf' or 'llamaparse'.")
44
+
45
+ def get_embeddings():
46
+ return HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
47
+
48
+ def update_vectors(files, parser):
49
+ if not files:
50
+ return "Please upload at least one PDF file."
51
+
52
+ embed = get_embeddings()
53
+ total_chunks = 0
54
+
55
+ all_data = []
56
+ for file in files:
57
+ data = load_document(file, parser)
58
+ all_data.extend(data)
59
+ total_chunks += len(data)
60
+
61
+ if os.path.exists("faiss_database"):
62
+ database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
63
+ database.add_documents(all_data)
64
+ else:
65
+ database = FAISS.from_documents(all_data, embed)
66
+
67
+ database.save_local("faiss_database")
68
+
69
+ return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
70
+
71
+ def generate_chunked_response(prompt, max_tokens=1000, max_chunks=5):
72
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3"
73
+ headers = {"Authorization": f"Bearer {huggingface_token}"}
74
+ payload = {
75
+ "inputs": prompt,
76
+ "parameters": {
77
+ "max_new_tokens": max_tokens,
78
+ "temperature": 0.7,
79
+ "top_p": 0.95,
80
+ "top_k": 40,
81
+ "repetition_penalty": 1.1
82
+ }
83
+ }
84
+
85
+ full_response = ""
86
+ for _ in range(max_chunks):
87
+ response = requests.post(API_URL, headers=headers, json=payload)
88
+ if response.status_code == 200:
89
+ result = response.json()
90
+ if isinstance(result, list) and len(result) > 0:
91
+ chunk = result[0].get('generated_text', '')
92
+ full_response += chunk
93
+ if chunk.endswith((".", "!", "?")):
94
+ break
95
+ else:
96
+ break
97
+ else:
98
+ break
99
+ return full_response.strip()
100
 
 
101
  def duckduckgo_search(query):
102
  with DDGS() as ddgs:
103
  results = ddgs.text(query, max_results=5)
 
110
  )
111
 
112
  def get_response_with_search(query):
 
113
  search_results = duckduckgo_search(query)
 
 
114
  context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
115
  for result in search_results if 'body' in result)
116
 
 
117
  prompt = f"""<s>[INST] Using the following context:
118
  {context}
119
  Write a detailed and complete research document that fulfills the following user request: '{query}'
120
  After writing the document, please provide a list of sources used in your response. [/INST]"""
121
 
122
+ generated_text = generate_chunked_response(prompt)
 
123
 
124
+ content_start = generated_text.find("[/INST]")
125
+ if content_start != -1:
126
+ generated_text = generated_text[content_start + 7:].strip()
127
 
128
+ parts = generated_text.split("Sources:", 1)
129
+ main_content = parts[0].strip()
130
+ sources = parts[1].strip() if len(parts) > 1 else ""
 
 
 
 
 
 
 
 
131
 
132
+ return main_content, sources
133
+
134
+ def get_response_from_pdf(query):
135
+ embed = get_embeddings()
136
+ if os.path.exists("faiss_database"):
137
+ database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  else:
139
+ return "No documents available. Please upload PDF documents to answer questions.", ""
140
+
141
+ retriever = database.as_retriever()
142
+ relevant_docs = retriever.get_relevant_documents(query)
143
+ context_str = "\n".join([doc.page_content for doc in relevant_docs])
144
+
145
+ prompt = f"""<s>[INST] Using the following context from the PDF documents:
146
+ {context_str}
147
+ Write a detailed and complete response that answers the following user question: '{query}'
148
+ After writing the response, please provide a list of sources used (document names) in your answer. [/INST]"""
149
 
150
+ generated_text = generate_chunked_response(prompt)
151
+
152
+ content_start = generated_text.find("[/INST]")
153
+ if content_start != -1:
154
+ generated_text = generated_text[content_start + 7:].strip()
155
+
156
+ parts = generated_text.split("Sources:", 1)
157
+ main_content = parts[0].strip()
158
+ sources = parts[1].strip() if len(parts) > 1 else ""
159
+
160
+ return main_content, sources
161
+
162
+ def chatbot_interface(message, history, use_web_search):
163
+ if use_web_search:
164
+ main_content, sources = get_response_with_search(message)
165
+ else:
166
+ main_content, sources = get_response_from_pdf(message)
167
+
168
  formatted_response = f"{main_content}\n\nSources:\n{sources}"
169
  return formatted_response
170
 
171
+ # Gradio interface
172
+ with gr.Blocks() as demo:
173
+ gr.Markdown("# AI-powered Web Search and PDF Chat Assistant")
174
+
175
+ with gr.Row():
176
+ file_input = gr.Files(label="Upload your PDF documents", file_types=[".pdf"])
177
+ parser_dropdown = gr.Dropdown(choices=["pypdf", "llamaparse"], label="Select PDF Parser", value="pypdf")
178
+ update_button = gr.Button("Upload Document")
179
+
180
+ update_output = gr.Textbox(label="Update Status")
181
+ update_button.click(update_vectors, inputs=[file_input, parser_dropdown], outputs=update_output)
182
+
183
+ with gr.Row():
184
+ chatbot = gr.Chatbot(label="Conversation")
185
+ with gr.Column():
186
+ msg = gr.Textbox(label="Ask a question")
187
+ use_web_search = gr.Checkbox(label="Use Web Search", value=False)
188
+ submit = gr.Button("Submit")
189
+
190
+ gr.Examples(
191
+ examples=[
192
+ ["What are the latest developments in AI?"],
193
+ ["Tell me about recent updates on GitHub"],
194
+ ["What are the best hotels in Galapagos, Ecuador?"],
195
+ ["Summarize recent advancements in Python programming"],
196
+ ],
197
+ inputs=msg,
198
+ )
199
+
200
+ submit.click(chatbot_interface, inputs=[msg, chatbot, use_web_search], outputs=[chatbot])
201
+ msg.submit(chatbot_interface, inputs=[msg, chatbot, use_web_search], outputs=[chatbot])
202
+
203
+ gr.Markdown(
204
+ """
205
+ ## How to use
206
+ 1. Upload PDF documents using the file input at the top.
207
+ 2. Select the PDF parser (pypdf or llamaparse) and click "Upload Document" to update the vector store.
208
+ 3. Ask questions in the textbox.
209
+ 4. Toggle "Use Web Search" to switch between PDF chat and web search.
210
+ 5. Click "Submit" or press Enter to get a response.
211
+ """
212
+ )
213
 
214
  if __name__ == "__main__":
215
+ demo.launch(share=True)