Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
from gradio import ChatMessage | |
from typing import Iterator | |
import google.generativeai as genai | |
import time # Import time module for potential debugging/delay | |
# get Gemini API Key from the environ variable | |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | |
genai.configure(api_key=GEMINI_API_KEY) | |
# we will be using the Gemini 2.0 Flash model with Thinking capabilities | |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-1219") | |
def format_chat_history(messages: list) -> list: | |
""" | |
Formats the chat history into a structure Gemini can understand | |
""" | |
formatted_history = [] | |
for message in messages: | |
# Skip thinking messages (messages with metadata) | |
if not (message.get("role") == "assistant" and "metadata" in message): | |
formatted_history.append({ | |
"role": "user" if message.get("role") == "user" else "assistant", | |
"parts": [message.get("content", "")] | |
}) | |
return formatted_history | |
def stream_gemini_response(message_input: str|gr.File, messages: list) -> Iterator[list]: | |
""" | |
Streams thoughts and response with conversation history support, handling text or file input. | |
""" | |
user_message = "" | |
input_file = None | |
if isinstance(message_input, str): | |
user_message = message_input | |
print(f"\n=== New Request (Text) ===") | |
print(f"User message: {user_message}") | |
if not user_message.strip(): # Robust check: if text message is empty or whitespace | |
messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message or upload a file.")) # More specific message | |
yield messages | |
return | |
elif isinstance(message_input, gr.File): #gr.File directly should be used with newer gradio versions (v4+) | |
input_file = message_input.name # Access the temporary file path | |
file_type = message_input.original_name.split('.')[-1].lower() #Get original filename's extension | |
print(f"\n=== New Request (File) ===") | |
print(f"File uploaded: {input_file}, type: {file_type}") | |
try: | |
with open(input_file, "rb") as f: #Open file in binary mode for universal handling | |
file_data = f.read() | |
if file_type in ['png', 'jpg', 'jpeg', 'gif']: #Example Image Types - expand as needed | |
user_message = {"inline_data": {"mime_type": f"image/{file_type}", "data": file_data}} #Prepare image part for Gemini | |
elif file_type == 'csv': | |
user_message = {"inline_data": {"mime_type": "text/csv", "data": file_data}} #Prepare csv part | |
except Exception as e: | |
print(f"Error reading file: {e}") | |
messages.append(ChatMessage(role="assistant", content=f"Error reading file: {e}")) | |
yield messages | |
return | |
else: | |
messages.append(ChatMessage(role="assistant", content="Sorry, I cannot understand this input format. Please use text or upload a valid file.")) # More informative error | |
yield messages | |
return | |
try: | |
# Format chat history for Gemini | |
chat_history = format_chat_history(messages) | |
# Initialize Gemini chat | |
chat = model.start_chat(history=chat_history) | |
response = chat.send_message(user_message, stream=True) #Send the message part as is | |
# Initialize buffers and flags - same as before | |
thought_buffer = "" | |
response_buffer = "" | |
thinking_complete = False | |
# Add initial thinking message - same as before | |
messages.append( | |
ChatMessage( | |
role="assistant", | |
content="", | |
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"} | |
) | |
) | |
for chunk in response: #streaming logic - same as before | |
parts = chunk.candidates[0].content.parts | |
current_chunk = parts[0].text | |
if len(parts) == 2 and not thinking_complete: | |
# Complete thought and start response | |
thought_buffer += current_chunk | |
print(f"\n=== Complete Thought ===\n{thought_buffer}") | |
messages[-1] = ChatMessage( | |
role="assistant", | |
content=thought_buffer, | |
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"} | |
) | |
yield messages | |
# Start response | |
response_buffer = parts[1].text | |
print(f"\n=== Starting Response ===\n{response_buffer}") | |
messages.append( | |
ChatMessage( | |
role="assistant", | |
content=response_buffer | |
) | |
) | |
thinking_complete = True | |
elif thinking_complete: | |
# Stream response | |
response_buffer += current_chunk | |
print(f"\n=== Response Chunk ===\n{current_chunk}") | |
messages[-1] = ChatMessage( | |
role="assistant", | |
content=response_buffer | |
) | |
else: | |
# Stream thinking | |
thought_buffer += current_chunk | |
print(f"\n=== Thinking Chunk ===\n{current_chunk}") | |
messages[-1] = ChatMessage( | |
role="assistant", | |
content=thought_buffer, | |
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"} | |
) | |
#time.sleep(0.05) #Optional: Uncomment this line to add a slight delay for debugging/visualization of streaming. Remove for final version | |
yield messages | |
print(f"\n=== Final Response ===\n{response_buffer}") | |
except Exception as e: | |
print(f"\n=== Error ===\n{str(e)}") | |
messages.append( | |
ChatMessage( | |
role="assistant", | |
content=f"I apologize, but I encountered an error: {str(e)}" | |
) | |
) | |
yield messages | |
def user_message(message_text, file_upload, history: list) -> tuple[str, None, list]: | |
"""Adds user message to chat history""" | |
msg = message_text if message_text else file_upload | |
history.append(ChatMessage(role="user", content=msg if isinstance(msg, str) else msg.name)) #Store message or filename in history. | |
return "", None, history #clear both input fields | |
# Create the Gradio interface | |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo: | |
gr.Markdown("# Gemini 2.0 Flash 'Thinking' Chatbot 💭") | |
chatbot = gr.Chatbot( | |
type="messages", | |
label="Gemini2.0 'Thinking' Chatbot (Streaming Output)", #Label now indicates streaming | |
render_markdown=True, | |
scale=1, | |
avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu") | |
) | |
with gr.Row(equal_height=True): | |
input_box = gr.Textbox( | |
lines=1, | |
label="Chat Message", | |
placeholder="Type your message here...", | |
scale=3 | |
) | |
file_upload = gr.File(label="Upload File", file_types=["image", ".csv"], scale=2) # Allow image and CSV files | |
clear_button = gr.Button("Clear Chat", scale=1) | |
# Add example prompts | |
example_prompts = [ | |
["Write a short poem about the sunset."], | |
["Explain the theory of relativity in simple terms."], | |
["If a train leaves Chicago at 6am traveling at 60mph, and another train leaves New York at 8am traveling at 80mph, at what time will they meet?"], | |
["Summarize the plot of Hamlet."], | |
["Write a haiku about a cat."] | |
] | |
gr.Examples( | |
examples=example_prompts, | |
inputs=[input_box], | |
label="Examples: Get Gemini to show its thinking process with these prompts!", | |
examples_per_page=5 # Adjust as needed | |
) | |
# Set up event handlers | |
msg_store = gr.State("") # Store for preserving user message | |
input_box.submit( | |
user_message, | |
inputs=[input_box, file_upload, chatbot], | |
outputs=[input_box, file_upload, chatbot], | |
queue=False | |
).then( | |
stream_gemini_response, | |
inputs=[input_box, chatbot], # Input either from text box or file, logic inside stream_gemini_response | |
outputs=chatbot | |
) | |
file_upload.upload( | |
user_message, | |
inputs=[input_box, file_upload, chatbot], # even textbox is input here so clearing both will work | |
outputs=[input_box, file_upload, chatbot], | |
queue=False | |
).then( | |
stream_gemini_response, | |
inputs=[file_upload, chatbot], # Input is now the uploaded file. | |
outputs=chatbot | |
) | |
clear_button.click( | |
lambda: ([], "", ""), | |
outputs=[chatbot, input_box, msg_store], | |
queue=False | |
) | |
gr.Markdown( # Description moved to the bottom | |
""" | |
<br><br><br> <!-- Add some vertical space --> | |
--- | |
### About this Chatbot | |
This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model. | |
You can observe the model's thought process as it generates responses, displayed with the "⚙️ Thinking" prefix. | |
**Try out the example prompts below to see Gemini in action!** | |
**Key Features:** | |
* Powered by Google's **Gemini 2.0 Flash** model. | |
* Shows the model's **thoughts** before the final answer (experimental feature). | |
* Supports **conversation history** for multi-turn chats. | |
* Supports **Image and CSV file uploads** for analysis. | |
* Uses **streaming** for a more interactive experience. | |
**Instructions:** | |
1. Type your message in the input box or Upload a file below. | |
2. Press Enter/Submit or Upload to send. | |
3. Observe the chatbot's "Thinking" process followed by the final response. | |
4. Use the "Clear Chat" button to start a new conversation. | |
*Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary. File analysis capabilities may be limited depending on the model's experimental features. | |
""" | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
demo.launch(debug=True) |