aiqcamp commited on
Commit
9809596
·
verified ·
1 Parent(s): c8b48ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -234
app.py CHANGED
@@ -1,239 +1,8 @@
1
- import os
2
- import gradio as gr
3
- from gradio import ChatMessage
4
- from typing import Iterator
5
- import google.generativeai as genai
6
-
7
- # get Gemini API Key from the environ variable
8
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
9
- genai.configure(api_key=GEMINI_API_KEY)
10
-
11
- # we will be using the Gemini 2.0 Flash model with Thinking capabilities
12
- model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-1219") # Consider Gemini Pro Vision for Image input
13
-
14
- def format_chat_history(messages: list) -> list:
15
- """
16
- Formats the chat history into a structure Gemini can understand
17
- """
18
- formatted_history = []
19
- for message in messages:
20
- # Skip thinking messages (messages with metadata)
21
- if not (message.get("role") == "assistant" and "metadata" in message):
22
- formatted_history.append({
23
- "role": "user" if message.get("role") == "user" else "assistant",
24
- "parts": [message.get("content", "")]
25
- })
26
- return formatted_history
27
-
28
- def stream_gemini_response(message_input: str|gr.File, messages: list) -> Iterator[list]:
29
- """
30
- Streams thoughts and response with conversation history support, handling text or file input.
31
- """
32
- user_message = ""
33
- input_file = None
34
-
35
  if isinstance(message_input, str):
36
  user_message = message_input
37
  print(f"\n=== New Request (Text) ===")
38
  print(f"User message: {user_message}")
39
- elif isinstance(message_input, gr.File): #gr.File directly should be used with newer gradio versions (v4+)
40
- input_file = message_input.name # Access the temporary file path
41
- file_type = message_input.original_name.split('.')[-1].lower() #Get original filename's extension
42
- print(f"\n=== New Request (File) ===")
43
- print(f"File uploaded: {input_file}, type: {file_type}")
44
-
45
- try:
46
- with open(input_file, "rb") as f: #Open file in binary mode for universal handling
47
- file_data = f.read()
48
-
49
- if file_type in ['png', 'jpg', 'jpeg', 'gif']: #Example Image Types - expand as needed
50
- user_message = {"inline_data": {"mime_type": f"image/{file_type}", "data": file_data}} #Prepare image part for Gemini
51
- elif file_type == 'csv':
52
- user_message = {"inline_data": {"mime_type": "text/csv", "data": file_data}} #Prepare csv part
53
-
54
- except Exception as e:
55
- print(f"Error reading file: {e}")
56
- messages.append(ChatMessage(role="assistant", content=f"Error reading file: {e}"))
57
  yield messages
58
- return
59
- else:
60
- messages.append(ChatMessage(role="assistant", content="Sorry, I cannot understand this input format."))
61
- yield messages
62
- return
63
-
64
-
65
- try:
66
- # Format chat history for Gemini
67
- chat_history = format_chat_history(messages)
68
-
69
- # Initialize Gemini chat
70
- chat = model.start_chat(history=chat_history)
71
- response = chat.send_message(user_message, stream=True) #Send the message part as is
72
-
73
- # Initialize buffers and flags - same as before
74
- thought_buffer = ""
75
- response_buffer = ""
76
- thinking_complete = False
77
-
78
-
79
- # Add initial thinking message - same as before
80
- messages.append(
81
- ChatMessage(
82
- role="assistant",
83
- content="",
84
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
85
- )
86
- )
87
-
88
- for chunk in response: #streaming logic - same as before
89
- parts = chunk.candidates[0].content.parts
90
- current_chunk = parts[0].text
91
-
92
- if len(parts) == 2 and not thinking_complete:
93
- # Complete thought and start response
94
- thought_buffer += current_chunk
95
- print(f"\n=== Complete Thought ===\n{thought_buffer}")
96
-
97
- messages[-1] = ChatMessage(
98
- role="assistant",
99
- content=thought_buffer,
100
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
101
- )
102
- yield messages
103
-
104
- # Start response
105
- response_buffer = parts[1].text
106
- print(f"\n=== Starting Response ===\n{response_buffer}")
107
-
108
- messages.append(
109
- ChatMessage(
110
- role="assistant",
111
- content=response_buffer
112
- )
113
- )
114
- thinking_complete = True
115
-
116
- elif thinking_complete:
117
- # Stream response
118
- response_buffer += current_chunk
119
- print(f"\n=== Response Chunk ===\n{current_chunk}")
120
-
121
- messages[-1] = ChatMessage(
122
- role="assistant",
123
- content=response_buffer
124
- )
125
-
126
- else:
127
- # Stream thinking
128
- thought_buffer += current_chunk
129
- print(f"\n=== Thinking Chunk ===\n{thought_buffer}")
130
-
131
- messages[-1] = ChatMessage(
132
- role="assistant",
133
- content=thought_buffer,
134
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
135
- )
136
-
137
- yield messages
138
-
139
- print(f"\n=== Final Response ===\n{response_buffer}")
140
-
141
-
142
- except Exception as e:
143
- print(f"\n=== Error ===\n{str(e)}")
144
- messages.append(
145
- ChatMessage(
146
- role="assistant",
147
- content=f"I apologize, but I encountered an error: {str(e)}"
148
- )
149
- )
150
- yield messages
151
-
152
- def user_message(message_text, file_upload, history: list) -> tuple[str, None, list]:
153
- """Adds user message to chat history"""
154
- msg = message_text if message_text else file_upload
155
- history.append(ChatMessage(role="user", content=msg if isinstance(msg, str) else msg.name)) #Store message or filename in history.
156
- return "", None, history #clear both input fields
157
-
158
- # Create the Gradio interface
159
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
160
- gr.Markdown("# Gemini 2.0 Flash 'Thinking' Chatbot 💭")
161
-
162
- chatbot = gr.Chatbot(
163
- type="messages",
164
- label="Gemini2.0 'Thinking' Chatbot",
165
- render_markdown=True,
166
- scale=1,
167
- avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu")
168
- )
169
-
170
- with gr.Row(equal_height=True):
171
- input_box = gr.Textbox(
172
- lines=1,
173
- label="Chat Message",
174
- placeholder="Type your message here...",
175
- scale=3
176
- )
177
- file_upload = gr.File(label="Upload File", file_types=["image", ".csv"], scale=2) # Allow image and CSV files
178
-
179
- clear_button = gr.Button("Clear Chat", scale=1)
180
-
181
- # Set up event handlers
182
- msg_store = gr.State("") # Store for preserving user message
183
-
184
-
185
- input_box.submit(
186
- user_message,
187
- inputs=[input_box, file_upload, chatbot],
188
- outputs=[input_box, file_upload, chatbot],
189
- queue=False
190
- ).then(
191
- stream_gemini_response,
192
- inputs=[input_box, chatbot], # Input either from text box or file, logic inside stream_gemini_response
193
- outputs=chatbot
194
- )
195
-
196
- file_upload.upload(
197
- user_message,
198
- inputs=[input_box, file_upload, chatbot], # even textbox is input here so clearing both will work
199
- outputs=[input_box, file_upload, chatbot],
200
- queue=False
201
- ).then(
202
- stream_gemini_response,
203
- inputs=[file_upload, chatbot], # Input is now the uploaded file.
204
- outputs=chatbot
205
- )
206
-
207
-
208
- clear_button.click(
209
- lambda: ([], "", ""),
210
- outputs=[chatbot, input_box, msg_store],
211
- queue=False
212
- )
213
-
214
- gr.Markdown( # Description moved to the bottom
215
- """
216
- <br><br><br> <!-- Add some vertical space -->
217
- ---
218
- ### About this Chatbot
219
- This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model.
220
- You can observe the model's thought process as it generates responses, displayed with the "⚙️ Thinking" prefix.
221
- **Key Features:**
222
- * Powered by Google's **Gemini 2.0 Flash** model.
223
- * Shows the model's **thoughts** before the final answer (experimental feature).
224
- * Supports **conversation history** for multi-turn chats.
225
- * Supports **Image and CSV file uploads** for analysis.
226
- * Uses **streaming** for a more interactive experience.
227
- **Instructions:**
228
- 1. Type your message in the input box or Upload a file below.
229
- 2. Press Enter/Submit or Upload to send.
230
- 3. Observe the chatbot's "Thinking" process followed by the final response.
231
- 4. Use the "Clear Chat" button to start a new conversation.
232
- *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary. File analysis capabilities may be limited depending on the model's experimental features.
233
- """
234
- )
235
-
236
-
237
- # Launch the interface
238
- if __name__ == "__main__":
239
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  if isinstance(message_input, str):
2
  user_message = message_input
3
  print(f"\n=== New Request (Text) ===")
4
  print(f"User message: {user_message}")
5
+ if not user_message.strip(): # Check if text message is empty or whitespace
6
+ messages.append(ChatMessage(role="assistant", content="Please input a text message or upload a file. Empty input is not allowed."))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  yield messages
8
+ return