shukdevdattaEX commited on
Commit
6897c6e
Β·
verified Β·
1 Parent(s): 010e6f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +272 -272
app.py CHANGED
@@ -1,273 +1,273 @@
1
- import gradio as gr
2
- import PyPDF2
3
- import io
4
- import time
5
- import os
6
- from together import Together
7
- import textwrap
8
- import tempfile
9
-
10
- def extract_text_from_pdf(pdf_file):
11
- """Extract text from a PDF file"""
12
- text = ""
13
- try:
14
- # Check if the pdf_file is already in bytes format or needs conversion
15
- if hasattr(pdf_file, 'read'):
16
- # If it's a file-like object (from gradio upload)
17
- pdf_content = pdf_file.read()
18
- # Reset the file pointer for potential future reads
19
- if hasattr(pdf_file, 'seek'):
20
- pdf_file.seek(0)
21
- else:
22
- # If it's already bytes
23
- pdf_content = pdf_file
24
-
25
- # Read the PDF file
26
- pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_content))
27
-
28
- # Extract text from each page
29
- for page_num in range(len(pdf_reader.pages)):
30
- page_text = pdf_reader.pages[page_num].extract_text()
31
- if page_text: # Check if text extraction worked
32
- text += page_text + "\n\n"
33
- else:
34
- text += f"[Page {page_num+1} - No extractable text found]\n\n"
35
-
36
- if not text.strip():
37
- return "No text could be extracted from the PDF. The document may be scanned or image-based."
38
-
39
- return text
40
- except Exception as e:
41
- return f"Error extracting text from PDF: {str(e)}"
42
-
43
- def format_chat_history(history):
44
- """Format the chat history for display"""
45
- formatted_history = []
46
- for user_msg, bot_msg in history:
47
- formatted_history.append((user_msg, bot_msg))
48
- return formatted_history
49
-
50
- def chat_with_pdf(api_key, pdf_text, user_question, history):
51
- """Chat with the PDF using Together API"""
52
- if not api_key.strip():
53
- return history + [(user_question, "Error: Please enter your Together API key.")], history
54
-
55
- if not pdf_text.strip() or pdf_text.startswith("Error") or pdf_text.startswith("No text"):
56
- return history + [(user_question, "Error: Please upload a valid PDF file with extractable text first.")], history
57
-
58
- if not user_question.strip():
59
- return history + [(user_question, "Error: Please enter a question.")], history
60
-
61
- try:
62
- # Initialize Together client with the API key
63
- client = Together(api_key=api_key)
64
-
65
- # Create the system message with PDF context
66
- # Truncate the PDF text if it's too long (model context limit handling)
67
- max_context_length = 10000 #10000
68
-
69
- if len(pdf_text) > max_context_length:
70
- # More sophisticated truncation that preserves beginning and end
71
- half_length = max_context_length // 2
72
- pdf_context = pdf_text[:half_length] + "\n\n[...Content truncated due to length...]\n\n" + pdf_text[-half_length:]
73
- else:
74
- pdf_context = pdf_text
75
-
76
- system_message = f"""You are an intelligent assistant designed to read, understand, and extract information from PDF documents.
77
- Based on any question or query the user asksβ€”whether it's about content, summaries, data extraction, definitions, insights, or interpretationβ€”you will
78
- analyze the following PDF content and provide an accurate, helpful response grounded in the document. Always respond with clear, concise, and context-aware information.
79
-
80
- PDF CONTENT:
81
- {pdf_context}
82
-
83
- Answer the user's questions only based on the PDF content above. If the answer cannot be found in the PDF, politely state that the information is not available in the provided document."""
84
-
85
- # Prepare message history for Together API
86
- messages = [
87
- {"role": "system", "content": system_message},
88
- ]
89
-
90
- # Add chat history
91
- for h_user, h_bot in history:
92
- messages.append({"role": "user", "content": h_user})
93
- messages.append({"role": "assistant", "content": h_bot})
94
-
95
- # Add the current user question
96
- messages.append({"role": "user", "content": user_question})
97
-
98
- # Call the Together API
99
- response = client.chat.completions.create(
100
- model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
101
- messages=messages,
102
- max_tokens=5000, #5000
103
- temperature=0.7,
104
- )
105
-
106
- # Extract the assistant's response
107
- assistant_response = response.choices[0].message.content
108
-
109
- # Update the chat history
110
- new_history = history + [(user_question, assistant_response)]
111
-
112
- return new_history, new_history
113
-
114
- except Exception as e:
115
- error_message = f"Error: {str(e)}"
116
- return history + [(user_question, error_message)], history
117
-
118
- def process_pdf(pdf_file, api_key_input):
119
- """Process the uploaded PDF file"""
120
- if pdf_file is None:
121
- return "Please upload a PDF file.", "", []
122
-
123
- try:
124
- # Get the file name
125
- file_name = os.path.basename(pdf_file.name) if hasattr(pdf_file, 'name') else "Uploaded PDF"
126
-
127
- # Extract text from the PDF
128
- pdf_text = extract_text_from_pdf(pdf_file)
129
-
130
- # Check if there was an error in extraction
131
- if pdf_text.startswith("Error extracting text from PDF"):
132
- return f"❌ {pdf_text}", "", []
133
-
134
- if not pdf_text.strip() or pdf_text.startswith("No text could be extracted"):
135
- return f"⚠️ {pdf_text}", "", []
136
-
137
- # Count words for information
138
- word_count = len(pdf_text.split())
139
-
140
- # Return a message with the file name and text content
141
- status_message = f"βœ… Successfully processed PDF: {file_name} ({word_count} words extracted)"
142
-
143
- # Also return an empty history
144
- return status_message, pdf_text, []
145
- except Exception as e:
146
- return f"❌ Error processing PDF: {str(e)}", "", []
147
-
148
- def validate_api_key(api_key):
149
- """Simple validation for API key format"""
150
- if not api_key or not api_key.strip():
151
- return "❌ API Key is required"
152
-
153
- if len(api_key.strip()) < 10:
154
- return "❌ API Key appears to be too short"
155
-
156
- return "βœ“ API Key format looks valid (not verified with server)"
157
-
158
- # Create the Gradio interface
159
- with gr.Blocks(title="ChatPDF with Together AI") as app:
160
- gr.Markdown("# πŸ“„ ChatPDF with Together AI")
161
- gr.Markdown("Upload a PDF and chat with it using the Llama-3.3-70B model.")
162
-
163
- with gr.Row():
164
- with gr.Column(scale=1):
165
- # API Key input
166
- api_key_input = gr.Textbox(
167
- label="Together API Key",
168
- placeholder="Enter your Together API key here...",
169
- type="password"
170
- )
171
-
172
- # API key validation
173
- api_key_status = gr.Textbox(
174
- label="API Key Status",
175
- interactive=False
176
- )
177
-
178
- # PDF upload
179
- pdf_file = gr.File(
180
- label="Upload PDF",
181
- file_types=[".pdf"],
182
- type="binary" # Ensure we get binary data
183
- )
184
-
185
- # Process PDF button
186
- process_button = gr.Button("Process PDF")
187
-
188
- # Status message
189
- status_message = gr.Textbox(
190
- label="Status",
191
- interactive=False
192
- )
193
-
194
- # Hidden field to store the PDF text
195
- pdf_text = gr.Textbox(visible=False)
196
-
197
- # Optional: Show PDF preview
198
- with gr.Accordion("PDF Content Preview", open=False):
199
- pdf_preview = gr.Textbox(
200
- label="Extracted Text Preview",
201
- interactive=False,
202
- max_lines=10,
203
- show_copy_button=True
204
- )
205
-
206
- with gr.Column(scale=2):
207
- # Chat interface
208
- chatbot = gr.Chatbot(
209
- label="Chat with PDF",
210
- height=500,
211
- show_copy_button=True
212
- )
213
-
214
- # Question input
215
- question = gr.Textbox(
216
- label="Ask a question about the PDF",
217
- placeholder="What is the main topic of this document?",
218
- lines=2
219
- )
220
-
221
- # Submit button
222
- submit_button = gr.Button("Submit Question")
223
-
224
- # Event handlers
225
- def update_preview(text):
226
- """Update the preview with the first few lines of the PDF text"""
227
- if not text or text.startswith("Error") or text.startswith("No text"):
228
- return text
229
-
230
- # Get the first ~500 characters for preview
231
- preview = text[:500]
232
- if len(text) > 500:
233
- preview += "...\n[Text truncated for preview. Full text will be used for chat.]"
234
- return preview
235
-
236
- # API key validation event
237
- api_key_input.change(
238
- fn=validate_api_key,
239
- inputs=[api_key_input],
240
- outputs=[api_key_status]
241
- )
242
-
243
- process_button.click(
244
- fn=process_pdf,
245
- inputs=[pdf_file, api_key_input],
246
- outputs=[status_message, pdf_text, chatbot]
247
- ).then(
248
- fn=update_preview,
249
- inputs=[pdf_text],
250
- outputs=[pdf_preview]
251
- )
252
-
253
- submit_button.click(
254
- fn=chat_with_pdf,
255
- inputs=[api_key_input, pdf_text, question, chatbot],
256
- outputs=[chatbot, chatbot]
257
- ).then(
258
- fn=lambda: "",
259
- outputs=question
260
- )
261
-
262
- question.submit(
263
- fn=chat_with_pdf,
264
- inputs=[api_key_input, pdf_text, question, chatbot],
265
- outputs=[chatbot, chatbot]
266
- ).then(
267
- fn=lambda: "",
268
- outputs=question
269
- )
270
-
271
- # Launch the app
272
- if __name__ == "__main__":
273
  app.launch(share=True)
 
1
+ import gradio as gr
2
+ import PyPDF2
3
+ import io
4
+ import time
5
+ import os
6
+ from together import Together
7
+ import textwrap
8
+ import tempfile
9
+
10
+ def extract_text_from_pdf(pdf_file):
11
+ """Extract text from a PDF file"""
12
+ text = ""
13
+ try:
14
+ # Check if the pdf_file is already in bytes format or needs conversion
15
+ if hasattr(pdf_file, 'read'):
16
+ # If it's a file-like object (from gradio upload)
17
+ pdf_content = pdf_file.read()
18
+ # Reset the file pointer for potential future reads
19
+ if hasattr(pdf_file, 'seek'):
20
+ pdf_file.seek(0)
21
+ else:
22
+ # If it's already bytes
23
+ pdf_content = pdf_file
24
+
25
+ # Read the PDF file
26
+ pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_content))
27
+
28
+ # Extract text from each page
29
+ for page_num in range(len(pdf_reader.pages)):
30
+ page_text = pdf_reader.pages[page_num].extract_text()
31
+ if page_text: # Check if text extraction worked
32
+ text += page_text + "\n\n"
33
+ else:
34
+ text += f"[Page {page_num+1} - No extractable text found]\n\n"
35
+
36
+ if not text.strip():
37
+ return "No text could be extracted from the PDF. The document may be scanned or image-based."
38
+
39
+ return text
40
+ except Exception as e:
41
+ return f"Error extracting text from PDF: {str(e)}"
42
+
43
+ def format_chat_history(history):
44
+ """Format the chat history for display"""
45
+ formatted_history = []
46
+ for user_msg, bot_msg in history:
47
+ formatted_history.append((user_msg, bot_msg))
48
+ return formatted_history
49
+
50
+ def chat_with_pdf(api_key, pdf_text, user_question, history):
51
+ """Chat with the PDF using Together API"""
52
+ if not api_key.strip():
53
+ return history + [(user_question, "Error: Please enter your Together API key.")], history
54
+
55
+ if not pdf_text.strip() or pdf_text.startswith("Error") or pdf_text.startswith("No text"):
56
+ return history + [(user_question, "Error: Please upload a valid PDF file with extractable text first.")], history
57
+
58
+ if not user_question.strip():
59
+ return history + [(user_question, "Error: Please enter a question.")], history
60
+
61
+ try:
62
+ # Initialize Together client with the API key
63
+ client = Together(api_key=api_key)
64
+
65
+ # Create the system message with PDF context
66
+ # Truncate the PDF text if it's too long (model context limit handling)
67
+ max_context_length = 10000 #10000
68
+
69
+ if len(pdf_text) > max_context_length:
70
+ # More sophisticated truncation that preserves beginning and end
71
+ half_length = max_context_length // 2
72
+ pdf_context = pdf_text[:half_length] + "\n\n[...Content truncated due to length...]\n\n" + pdf_text[-half_length:]
73
+ else:
74
+ pdf_context = pdf_text
75
+
76
+ system_message = f"""You are an intelligent assistant designed to read, understand, and extract information from PDF documents.
77
+ Based on any question or query the user asksβ€”whether it's about content, summaries, data extraction, definitions, insights, or interpretationβ€”you will
78
+ analyze the following PDF content and provide an accurate, helpful response grounded in the document. Always respond with clear, concise, and context-aware information.
79
+
80
+ PDF CONTENT:
81
+ {pdf_context}
82
+
83
+ Answer the user's questions only based on the PDF content above. If the answer cannot be found in the PDF, politely state that the information is not available in the provided document."""
84
+
85
+ # Prepare message history for Together API
86
+ messages = [
87
+ {"role": "system", "content": system_message},
88
+ ]
89
+
90
+ # Add chat history
91
+ for h_user, h_bot in history:
92
+ messages.append({"role": "user", "content": h_user})
93
+ messages.append({"role": "assistant", "content": h_bot})
94
+
95
+ # Add the current user question
96
+ messages.append({"role": "user", "content": user_question})
97
+
98
+ # Call the Together API
99
+ response = client.chat.completions.create(
100
+ model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
101
+ messages=messages,
102
+ max_tokens=5000, #5000
103
+ temperature=0.7,
104
+ )
105
+
106
+ # Extract the assistant's response
107
+ assistant_response = response.choices[0].message.content
108
+
109
+ # Update the chat history
110
+ new_history = history + [(user_question, assistant_response)]
111
+
112
+ return new_history, new_history
113
+
114
+ except Exception as e:
115
+ error_message = f"Error: {str(e)}"
116
+ return history + [(user_question, error_message)], history
117
+
118
+ def process_pdf(pdf_file, api_key_input):
119
+ """Process the uploaded PDF file"""
120
+ if pdf_file is None:
121
+ return "Please upload a PDF file.", "", []
122
+
123
+ try:
124
+ # Get the file name
125
+ file_name = os.path.basename(pdf_file.name) if hasattr(pdf_file, 'name') else "Uploaded PDF"
126
+
127
+ # Extract text from the PDF
128
+ pdf_text = extract_text_from_pdf(pdf_file)
129
+
130
+ # Check if there was an error in extraction
131
+ if pdf_text.startswith("Error extracting text from PDF"):
132
+ return f"❌ {pdf_text}", "", []
133
+
134
+ if not pdf_text.strip() or pdf_text.startswith("No text could be extracted"):
135
+ return f"⚠️ {pdf_text}", "", []
136
+
137
+ # Count words for information
138
+ word_count = len(pdf_text.split())
139
+
140
+ # Return a message with the file name and text content
141
+ status_message = f"βœ… Successfully processed PDF: {file_name} ({word_count} words extracted)"
142
+
143
+ # Also return an empty history
144
+ return status_message, pdf_text, []
145
+ except Exception as e:
146
+ return f"❌ Error processing PDF: {str(e)}", "", []
147
+
148
+ def validate_api_key(api_key):
149
+ """Simple validation for API key format"""
150
+ if not api_key or not api_key.strip():
151
+ return "❌ API Key is required"
152
+
153
+ if len(api_key.strip()) < 10:
154
+ return "❌ API Key appears to be too short"
155
+
156
+ return "βœ“ API Key format looks valid (not verified with server)"
157
+
158
+ # Create the Gradio interface
159
+ with gr.Blocks(title="ChatPDF with Together AI", theme=gr.themes.Ocean()) as app:
160
+ gr.Markdown("# πŸ“„ ChatPDF with Together AI")
161
+ gr.Markdown("Upload a PDF and chat with it using the Llama-3.3-70B model.")
162
+
163
+ with gr.Row():
164
+ with gr.Column(scale=1):
165
+ # API Key input
166
+ api_key_input = gr.Textbox(
167
+ label="Together API Key",
168
+ placeholder="Enter your Together API key here...",
169
+ type="password"
170
+ )
171
+
172
+ # API key validation
173
+ api_key_status = gr.Textbox(
174
+ label="API Key Status",
175
+ interactive=False
176
+ )
177
+
178
+ # PDF upload
179
+ pdf_file = gr.File(
180
+ label="Upload PDF",
181
+ file_types=[".pdf"],
182
+ type="binary" # Ensure we get binary data
183
+ )
184
+
185
+ # Process PDF button
186
+ process_button = gr.Button("Process PDF")
187
+
188
+ # Status message
189
+ status_message = gr.Textbox(
190
+ label="Status",
191
+ interactive=False
192
+ )
193
+
194
+ # Hidden field to store the PDF text
195
+ pdf_text = gr.Textbox(visible=False)
196
+
197
+ # Optional: Show PDF preview
198
+ with gr.Accordion("PDF Content Preview", open=False):
199
+ pdf_preview = gr.Textbox(
200
+ label="Extracted Text Preview",
201
+ interactive=False,
202
+ max_lines=10,
203
+ show_copy_button=True
204
+ )
205
+
206
+ with gr.Column(scale=2):
207
+ # Chat interface
208
+ chatbot = gr.Chatbot(
209
+ label="Chat with PDF",
210
+ height=500,
211
+ show_copy_button=True
212
+ )
213
+
214
+ # Question input
215
+ question = gr.Textbox(
216
+ label="Ask a question about the PDF",
217
+ placeholder="What is the main topic of this document?",
218
+ lines=2
219
+ )
220
+
221
+ # Submit button
222
+ submit_button = gr.Button("Submit Question")
223
+
224
+ # Event handlers
225
+ def update_preview(text):
226
+ """Update the preview with the first few lines of the PDF text"""
227
+ if not text or text.startswith("Error") or text.startswith("No text"):
228
+ return text
229
+
230
+ # Get the first ~500 characters for preview
231
+ preview = text[:500]
232
+ if len(text) > 500:
233
+ preview += "...\n[Text truncated for preview. Full text will be used for chat.]"
234
+ return preview
235
+
236
+ # API key validation event
237
+ api_key_input.change(
238
+ fn=validate_api_key,
239
+ inputs=[api_key_input],
240
+ outputs=[api_key_status]
241
+ )
242
+
243
+ process_button.click(
244
+ fn=process_pdf,
245
+ inputs=[pdf_file, api_key_input],
246
+ outputs=[status_message, pdf_text, chatbot]
247
+ ).then(
248
+ fn=update_preview,
249
+ inputs=[pdf_text],
250
+ outputs=[pdf_preview]
251
+ )
252
+
253
+ submit_button.click(
254
+ fn=chat_with_pdf,
255
+ inputs=[api_key_input, pdf_text, question, chatbot],
256
+ outputs=[chatbot, chatbot]
257
+ ).then(
258
+ fn=lambda: "",
259
+ outputs=question
260
+ )
261
+
262
+ question.submit(
263
+ fn=chat_with_pdf,
264
+ inputs=[api_key_input, pdf_text, question, chatbot],
265
+ outputs=[chatbot, chatbot]
266
+ ).then(
267
+ fn=lambda: "",
268
+ outputs=question
269
+ )
270
+
271
+ # Launch the app
272
+ if __name__ == "__main__":
273
  app.launch(share=True)