Shreyas094 commited on
Commit
97c6d6a
·
verified ·
1 Parent(s): 6e829a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +252 -47
app.py CHANGED
@@ -1,53 +1,258 @@
1
- import requests
2
  import os
3
- from dotenv import load_dotenv
 
4
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- # Load environment variables from .env file
7
- load_dotenv()
8
-
9
- # Replace with your actual Cloudflare API token
10
- API_TOKEN = os.environ.get("CLOUDFLARE_AUTH_TOKEN")
11
-
12
- def get_cloudflare_accounts():
13
- if not API_TOKEN:
14
- return "Please set the CLOUDFLARE_API_TOKEN environment variable"
15
-
16
- # Cloudflare API endpoint for getting account details
17
- url = "https://api.cloudflare.com/client/v4/accounts"
18
-
19
- # Headers for the API request
20
- headers = {
21
- "Authorization": f"Bearer {API_TOKEN}",
22
- "Content-Type": "application/json"
23
- }
24
-
25
- # Making the API request
26
- response = requests.get(url, headers=headers)
27
-
28
- # Checking if the request was successful
29
- if response.status_code == 200:
30
- # Parsing the JSON response
31
- data = response.json()
32
- if data['success']:
33
- accounts = data['result']
34
- result = ""
35
- for account in accounts:
36
- account_id = account['id']
37
- account_name = account['name']
38
- result += f"Account Name: {account_name}, Account ID: {account_id}\n"
39
- return result
40
- else:
41
- return f"Error fetching account details: {data['errors']}"
42
  else:
43
- return f"Failed to fetch account details. HTTP Status Code: {response.status_code}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- # Creating a Gradio interface
46
- iface = gr.Interface(fn=get_cloudflare_accounts,
47
- inputs=None,
48
- outputs="text",
49
- title="Cloudflare Account Details",
50
- description="Fetch and display Cloudflare account details using the API.")
 
 
 
 
 
51
 
52
- # Launch the interface
53
- iface.launch()
 
 
1
  import os
2
+ import json
3
+ import re
4
  import gradio as gr
5
+ import requests
6
+ from duckduckgo_search import DDGS
7
+ from typing import List
8
+ from pydantic import BaseModel, Field
9
+ from tempfile import NamedTemporaryFile
10
+ from langchain_community.vectorstores import FAISS
11
+ from langchain_community.document_loaders import PyPDFLoader
12
+ from langchain_community.embeddings import HuggingFaceEmbeddings
13
+ from llama_parse import LlamaParse
14
+ from langchain_core.documents import Document
15
+ from huggingface_hub import InferenceClient
16
+ import inspect
17
+
18
+ # Environment variables and configurations
19
+ huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
20
+ llama_cloud_api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
21
+
22
+ MODELS = [
23
+ "google/gemma-2-9b",
24
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
25
+ "mistralai/Mistral-7B-Instruct-v0.3",
26
+ "microsoft/Phi-3-mini-4k-instruct"
27
+ ]
28
+
29
+ # Initialize LlamaParse
30
+ llama_parser = LlamaParse(
31
+ api_key=llama_cloud_api_key,
32
+ result_type="markdown",
33
+ num_workers=4,
34
+ verbose=True,
35
+ language="en",
36
+ )
37
+
38
+ def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[Document]:
39
+ """Loads and splits the document into pages."""
40
+ if parser == "pypdf":
41
+ loader = PyPDFLoader(file.name)
42
+ return loader.load_and_split()
43
+ elif parser == "llamaparse":
44
+ try:
45
+ documents = llama_parser.load_data(file.name)
46
+ return [Document(page_content=doc.text, metadata={"source": file.name}) for doc in documents]
47
+ except Exception as e:
48
+ print(f"Error using Llama Parse: {str(e)}")
49
+ print("Falling back to PyPDF parser")
50
+ loader = PyPDFLoader(file.name)
51
+ return loader.load_and_split()
52
+ else:
53
+ raise ValueError("Invalid parser specified. Use 'pypdf' or 'llamaparse'.")
54
+
55
+ def get_embeddings():
56
+ return HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
57
 
58
+ def update_vectors(files, parser):
59
+ if not files:
60
+ return "Please upload at least one PDF file."
61
+
62
+ embed = get_embeddings()
63
+ total_chunks = 0
64
+
65
+ all_data = []
66
+ for file in files:
67
+ data = load_document(file, parser)
68
+ all_data.extend(data)
69
+ total_chunks += len(data)
70
+
71
+ if os.path.exists("faiss_database"):
72
+ database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
73
+ database.add_documents(all_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  else:
75
+ database = FAISS.from_documents(all_data, embed)
76
+
77
+ database.save_local("faiss_database")
78
+
79
+ return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
80
+
81
+ def generate_chunked_response(prompt, model, max_tokens=1000, max_chunks=5, temperature=0.7):
82
+ client = InferenceClient(
83
+ model,
84
+ token=huggingface_token,
85
+ )
86
+
87
+ full_response = ""
88
+ messages = [{"role": "user", "content": prompt}]
89
+
90
+ try:
91
+ for message in client.chat_completion(
92
+ messages=messages,
93
+ max_tokens=max_tokens,
94
+ temperature=temperature,
95
+ stream=True,
96
+ ):
97
+ chunk = message.choices[0].delta.content
98
+ if chunk:
99
+ full_response += chunk
100
+
101
+ except Exception as e:
102
+ print(f"Error in generating response: {str(e)}")
103
+
104
+ # Clean up the response
105
+ clean_response = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', full_response, flags=re.DOTALL)
106
+ clean_response = clean_response.replace("Using the following context:", "").strip()
107
+ clean_response = clean_response.replace("Using the following context from the PDF documents:", "").strip()
108
+
109
+ return clean_response
110
+
111
+ def duckduckgo_search(query):
112
+ with DDGS() as ddgs:
113
+ results = ddgs.text(query, max_results=5)
114
+ return results
115
+
116
+ class CitingSources(BaseModel):
117
+ sources: List[str] = Field(
118
+ ...,
119
+ description="List of sources to cite. Should be an URL of the source."
120
+ )
121
+
122
+ def get_response_from_pdf(query, model, temperature=0.7):
123
+ embed = get_embeddings()
124
+ if os.path.exists("faiss_database"):
125
+ database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
126
+ else:
127
+ return "No documents available. Please upload PDF documents to answer questions."
128
+
129
+ retriever = database.as_retriever()
130
+ relevant_docs = retriever.get_relevant_documents(query)
131
+ context_str = "\n".join([doc.page_content for doc in relevant_docs])
132
+
133
+ prompt = f"""<s>[INST] Using the following context from the PDF documents:
134
+ {context_str}
135
+ Write a detailed and complete response that answers the following user question: '{query}'
136
+ Do not include a list of sources in your response. [/INST]"""
137
+
138
+ generated_text = generate_chunked_response(prompt, model, temperature=temperature)
139
+
140
+ # Clean the response
141
+ clean_text = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', generated_text, flags=re.DOTALL)
142
+ clean_text = clean_text.replace("Using the following context from the PDF documents:", "").strip()
143
+
144
+ return clean_text
145
+
146
+ def get_response_with_search(query, model, temperature=0.7):
147
+ search_results = duckduckgo_search(query)
148
+ context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
149
+ for result in search_results if 'body' in result)
150
+
151
+ prompt = f"""<s>[INST] Using the following context:
152
+ {context}
153
+ Write a detailed and complete research document that fulfills the following user request: '{query}'
154
+ After writing the document, please provide a list of sources used in your response. [/INST]"""
155
+
156
+ generated_text = generate_chunked_response(prompt, model, temperature=temperature)
157
+
158
+ # Clean the response
159
+ clean_text = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', generated_text, flags=re.DOTALL)
160
+ clean_text = clean_text.replace("Using the following context:", "").strip()
161
+
162
+ # Split the content and sources
163
+ parts = clean_text.split("Sources:", 1)
164
+ main_content = parts[0].strip()
165
+ sources = parts[1].strip() if len(parts) > 1 else ""
166
+
167
+ return main_content, sources
168
+
169
+ def chatbot_interface(message, history, use_web_search, model, temperature):
170
+ if not message.strip(): # Check if the message is empty or just whitespace
171
+ return history
172
+
173
+ if use_web_search:
174
+ main_content, sources = get_response_with_search(message, model, temperature)
175
+ formatted_response = f"{main_content}\n\nSources:\n{sources}"
176
+ else:
177
+ response = get_response_from_pdf(message, model, temperature)
178
+ formatted_response = response
179
+
180
+ # Check if the last message in history is the same as the current message
181
+ if history and history[-1][0] == message:
182
+ # Replace the last response instead of adding a new one
183
+ history[-1] = (message, formatted_response)
184
+ else:
185
+ # Add the new message-response pair
186
+ history.append((message, formatted_response))
187
+
188
+ return history
189
+
190
+
191
+ def clear_and_update_chat(message, history, use_web_search, model, temperature):
192
+ updated_history = chatbot_interface(message, history, use_web_search, model, temperature)
193
+ return "", updated_history # Return empty string to clear the input
194
+
195
+ # Gradio interface
196
+ with gr.Blocks() as demo:
197
+
198
+ is_generating = gr.State(False)
199
+
200
+ def protected_clear_and_update_chat(message, history, use_web_search, model, temperature, is_generating):
201
+ if is_generating:
202
+ return message, history, is_generating
203
+ is_generating = True
204
+ updated_message, updated_history = clear_and_update_chat(message, history, use_web_search, model, temperature)
205
+ is_generating = False
206
+ return updated_message, updated_history, is_generating
207
+
208
+ gr.Markdown("# AI-powered Web Search and PDF Chat Assistant")
209
+
210
+ with gr.Row():
211
+ file_input = gr.Files(label="Upload your PDF documents", file_types=[".pdf"])
212
+ parser_dropdown = gr.Dropdown(choices=["pypdf", "llamaparse"], label="Select PDF Parser", value="llamaparse")
213
+ update_button = gr.Button("Upload Document")
214
+
215
+ update_output = gr.Textbox(label="Update Status")
216
+ update_button.click(update_vectors, inputs=[file_input, parser_dropdown], outputs=update_output)
217
+
218
+ chatbot = gr.Chatbot(label="Conversation")
219
+ msg = gr.Textbox(label="Ask a question")
220
+ use_web_search = gr.Checkbox(label="Use Web Search", value=False)
221
+
222
+ with gr.Row():
223
+ model_dropdown = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2])
224
+ temperature_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature")
225
+
226
+ submit = gr.Button("Submit")
227
+
228
+ gr.Examples(
229
+ examples=[
230
+ ["What are the latest developments in AI?"],
231
+ ["Tell me about recent updates on GitHub"],
232
+ ["What are the best hotels in Galapagos, Ecuador?"],
233
+ ["Summarize recent advancements in Python programming"],
234
+ ],
235
+ inputs=msg,
236
+ )
237
+
238
+ submit.click(protected_clear_and_update_chat,
239
+ inputs=[msg, chatbot, use_web_search, model_dropdown, temperature_slider, is_generating],
240
+ outputs=[msg, chatbot, is_generating])
241
+ msg.submit(protected_clear_and_update_chat,
242
+ inputs=[msg, chatbot, use_web_search, model_dropdown, temperature_slider, is_generating],
243
+ outputs=[msg, chatbot, is_generating])
244
 
245
+ gr.Markdown(
246
+ """
247
+ ## How to use
248
+ 1. Upload PDF documents using the file input at the top.
249
+ 2. Select the PDF parser (pypdf or llamaparse) and click "Upload Document" to update the vector store.
250
+ 3. Ask questions in the textbox.
251
+ 4. Toggle "Use Web Search" to switch between PDF chat and web search.
252
+ 5. Adjust Temperature and Repetition Penalty sliders to fine-tune the response generation.
253
+ 6. Click "Submit" or press Enter to get a response.
254
+ """
255
+ )
256
 
257
+ if __name__ == "__main__":
258
+ demo.launch(share=True)