arjunanand13 commited on
Commit
bb5f164
·
verified ·
1 Parent(s): 0319e0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -47
app.py CHANGED
@@ -5,53 +5,166 @@ from config import OPENAI_API_KEY
5
  import os
6
  openai.api_key = os.getenv("OPENAI_API_KEY")
7
 
8
- def extract_text_from_pdf(pdf_file):
9
- text = ""
10
- with open(pdf_file.name, "rb") as file:
11
- reader = PyPDF2.PdfReader(file)
12
- for page in reader.pages:
13
- text += page.extract_text() + "\n"
14
- return text
15
-
16
- def answer_question(pdf_file, question):
17
- # Extract text from the PDF
18
- text = extract_text_from_pdf(pdf_file)
19
-
20
- # Define the assistant's behavior
21
- assistant_prompt = f"""
22
- You are a helpful assistant that answers questions based on the content of the PDF provided.
23
- Here is the content of the PDF:
24
- {text}
25
-
26
- User question: {question}
27
- """
28
-
29
- # Call OpenAI API to get the answer using GPT-4 Turbo
30
- response = openai.ChatCompletion.create(
31
- model="gpt-4-turbo",
32
- messages=[
33
- {"role": "system", "content": "You are a helpful assistant."},
34
- {"role": "user", "content": assistant_prompt}
 
 
 
 
 
 
 
 
 
 
 
 
35
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  )
37
 
38
- answer = response.choices[0].message['content']
39
- return answer
40
-
41
- # Create Gradio interface using the updated input/output classes
42
- iface = gr.Interface(
43
- fn=answer_question,
44
- inputs=[
45
- gr.File(label="Upload PDF"),
46
- gr.Textbox(label="Ask a question about the PDF", placeholder="What do you want to know?")
47
- ],
48
- outputs="text",
49
- title="PDF Q&A with OpenAI Assistant",
50
- description="Upload a PDF document and ask questions about its content. The assistant will provide answers based on the PDF.",
51
- examples=[
52
- ["renesas-ra6m1-group-datasheet.pdf", "Which Renesas products are mentioned in this PDF?"]
53
- ]
54
- )
55
-
56
- # Launch the interface
57
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
5
  import os
6
  openai.api_key = os.getenv("OPENAI_API_KEY")
7
 
8
+ import gradio as gr
9
+ import PyPDF2
10
+ import openai
11
+ from config import OPENAI_API_KEY
12
+ import os
13
+
14
+ openai.api_key = OPENAI_API_KEY
15
+
16
+ class PDFChat:
17
+ def __init__(self):
18
+ self.pdf_text = ""
19
+
20
+ def extract_text_from_pdf(self, pdf_file):
21
+ """Extract text from PDF file and store it"""
22
+ if not pdf_file:
23
+ return "Please upload a PDF file first."
24
+
25
+ try:
26
+ self.pdf_text = "" # Clear previous content
27
+ with open(pdf_file.name, "rb") as file:
28
+ reader = PyPDF2.PdfReader(file)
29
+ for page in reader.pages:
30
+ self.pdf_text += page.extract_text() + "\n"
31
+ return "PDF loaded successfully! You can now ask questions."
32
+ except Exception as e:
33
+ return f"Error loading PDF: {str(e)}"
34
+
35
+ def answer_question(self, question, chat_history):
36
+ """Generate answer based on PDF content and conversation history"""
37
+ if not self.pdf_text:
38
+ return [[question, "Please upload and load a PDF file first."]]
39
+
40
+ if not question:
41
+ return chat_history
42
+
43
+ # Construct the conversation context
44
+ messages = [
45
+ {"role": "system", "content": "You are a helpful assistant that answers questions based on the PDF content."},
46
+ {"role": "system", "content": f"PDF Content: {self.pdf_text}"}
47
  ]
48
+
49
+ # Add conversation history
50
+ for human, assistant in chat_history:
51
+ messages.append({"role": "user", "content": human})
52
+ messages.append({"role": "assistant", "content": assistant})
53
+
54
+ # Add current question
55
+ messages.append({"role": "user", "content": question})
56
+
57
+ try:
58
+ response = openai.ChatCompletion.create(
59
+ model="gpt-4-turbo",
60
+ messages=messages
61
+ )
62
+ answer = response.choices[0].message['content']
63
+
64
+ # Update chat history with new question and answer
65
+ chat_history.append((question, answer))
66
+ return chat_history
67
+ except Exception as e:
68
+ error_message = f"Error generating response: {str(e)}"
69
+ chat_history.append((question, error_message))
70
+ return chat_history
71
+
72
+ def clear_history(self):
73
+ """Clear conversation history"""
74
+ return []
75
+
76
+ css = """
77
+ .container {
78
+ max-width: 800px;
79
+ margin: auto;
80
+ }
81
+ .chat-window {
82
+ height: 600px;
83
+ overflow-y: auto;
84
+ }
85
+ """
86
+
87
+ # Create PDF Chat instance
88
+ pdf_chat = PDFChat()
89
+
90
+ # Create the Gradio interface
91
+ with gr.Blocks(css=css, theme='Taithrah/Minimal') as demo:
92
+ gr.Markdown("# Renesas PDF Chatbot")
93
+
94
+ with gr.Row():
95
+ with gr.Column(scale=2):
96
+ pdf_input = gr.File(
97
+ label="Upload PDF",
98
+ file_types=[".pdf"]
99
+ )
100
+ load_button = gr.Button("Load PDF")
101
+ status_text = gr.Textbox(
102
+ label="Status",
103
+ interactive=False
104
+ )
105
+
106
+ with gr.Row():
107
+ chatbot = gr.Chatbot(
108
+ [],
109
+ elem_id="chatbot",
110
+ label="Chat History",
111
+ height=400
112
+ )
113
+
114
+ with gr.Row():
115
+ question_input = gr.Textbox(
116
+ label="Ask a question",
117
+ placeholder="What would you like to know about the PDF?",
118
+ scale=4
119
+ )
120
+ submit_button = gr.Button("Send", scale=1)
121
+ clear_button = gr.Button("Clear History", scale=1)
122
+
123
+ # Example queries
124
+ gr.Examples(
125
+ examples=[
126
+ ["renesas-ra6m1-group-datasheet.pdf", "Which Renesas products are mentioned in this PDF?"],
127
+ ["renesas-ra6m1-group-datasheet.pdf", "What are the key features of the microcontroller?"],
128
+ ["renesas-ra6m1-group-datasheet.pdf", "Explain the power consumption specifications."]
129
+ ],
130
+ inputs=[pdf_input, question_input],
131
+ label="Example Queries"
132
+ )
133
+
134
+ # Event handlers
135
+ load_button.click(
136
+ pdf_chat.extract_text_from_pdf,
137
+ inputs=[pdf_input],
138
+ outputs=[status_text]
139
  )
140
 
141
+ # Function to clear input after sending
142
+ def clear_input():
143
+ return ""
144
+
145
+ question_input.submit(
146
+ pdf_chat.answer_question,
147
+ inputs=[question_input, chatbot],
148
+ outputs=[chatbot]
149
+ ).then(
150
+ clear_input,
151
+ outputs=[question_input]
152
+ )
153
+
154
+ submit_button.click(
155
+ pdf_chat.answer_question,
156
+ inputs=[question_input, chatbot],
157
+ outputs=[chatbot]
158
+ ).then(
159
+ clear_input,
160
+ outputs=[question_input]
161
+ )
162
+
163
+ clear_button.click(
164
+ pdf_chat.clear_history,
165
+ outputs=[chatbot]
166
+ )
167
+
168
+ # Launch the interface with sharing enabled
169
+ if __name__ == "__main__":
170
+ demo.launch(debug=True)