JeCabrera commited on
Commit
a6341df
·
verified ·
1 Parent(s): ccf1751

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -169
app.py CHANGED
@@ -3,175 +3,32 @@ import gradio as gr
3
  from gradio import ChatMessage
4
  from typing import Iterator
5
  import google.generativeai as genai
6
-
7
- # get Gemini API Key from the environ variable
8
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
9
- genai.configure(api_key=GEMINI_API_KEY)
10
-
11
- # we will be using the Gemini 2.0 Flash model with Thinking capabilities
12
- model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-1219")
13
-
14
-
15
- def format_chat_history(messages: list) -> list:
16
- """
17
- Formats the chat history into a structure Gemini can understand
18
- """
19
- formatted_history = []
20
- for message in messages:
21
- # Skip thinking messages (messages with metadata)
22
- if not (message.get("role") == "assistant" and "metadata" in message):
23
- formatted_history.append({
24
- "role": "user" if message.get("role") == "user" else "assistant",
25
- "parts": [message.get("content", "")]
26
- })
27
- return formatted_history
28
-
29
- def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
30
- """
31
- Streams thoughts and response with conversation history support.
32
- """
33
- try:
34
- print(f"\n=== New Request ===")
35
- print(f"User message: {user_message}")
36
-
37
- # Format chat history for Gemini
38
- chat_history = format_chat_history(messages)
39
-
40
- # Initialize Gemini chat
41
- chat = model.start_chat(history=chat_history)
42
- response = chat.send_message(user_message, stream=True)
43
-
44
- # Initialize buffers and flags
45
- thought_buffer = ""
46
- response_buffer = ""
47
- thinking_complete = False
48
-
49
- # Add initial thinking message
50
- messages.append(
51
- ChatMessage(
52
- role="assistant",
53
- content="",
54
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
55
- )
56
- )
57
-
58
- for chunk in response:
59
- parts = chunk.candidates[0].content.parts
60
- current_chunk = parts[0].text
61
-
62
- if len(parts) == 2 and not thinking_complete:
63
- # Complete thought and start response
64
- thought_buffer += current_chunk
65
- print(f"\n=== Complete Thought ===\n{thought_buffer}")
66
-
67
- messages[-1] = ChatMessage(
68
- role="assistant",
69
- content=thought_buffer,
70
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
71
- )
72
- yield messages
73
-
74
- # Start response
75
- response_buffer = parts[1].text
76
- print(f"\n=== Starting Response ===\n{response_buffer}")
77
-
78
- messages.append(
79
- ChatMessage(
80
- role="assistant",
81
- content=response_buffer
82
- )
83
- )
84
- thinking_complete = True
85
-
86
- elif thinking_complete:
87
- # Stream response
88
- response_buffer += current_chunk
89
- print(f"\n=== Response Chunk ===\n{current_chunk}")
90
-
91
- messages[-1] = ChatMessage(
92
- role="assistant",
93
- content=response_buffer
94
- )
95
-
96
- else:
97
- # Stream thinking
98
- thought_buffer += current_chunk
99
- print(f"\n=== Thinking Chunk ===\n{current_chunk}")
100
-
101
- messages[-1] = ChatMessage(
102
- role="assistant",
103
- content=thought_buffer,
104
- metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
105
- )
106
-
107
- yield messages
108
-
109
- print(f"\n=== Final Response ===\n{response_buffer}")
110
-
111
- except Exception as e:
112
- print(f"\n=== Error ===\n{str(e)}")
113
- messages.append(
114
- ChatMessage(
115
- role="assistant",
116
- content=f"I apologize, but I encountered an error: {str(e)}"
117
- )
118
  )
119
- yield messages
120
-
121
- def user_message(msg: str, history: list) -> tuple[str, list]:
122
- """Adds user message to chat history"""
123
- history.append(ChatMessage(role="user", content=msg))
124
- return "", history
125
-
126
-
127
- # Create the Gradio interface
128
- with gr.Blocks(theme=gr.themes.Citrus(), fill_height=True) as demo:
129
- #with gr.Column():
130
- gr.Markdown("# Chat with Gemini 2.0 Flash and See its Thoughts 💭")
131
-
132
- chatbot = gr.Chatbot(
133
- type="messages",
134
- label="Gemini2.0 'Thinking' Chatbot",
135
- render_markdown=True,
136
- scale=1,
137
- avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu")
138
- )
139
-
140
- with gr.Row(equal_height=True):
141
- input_box = gr.Textbox(
142
- lines=1,
143
- label="Chat Message",
144
- placeholder="Type your message here...",
145
- scale=4
146
- )
147
-
148
- clear_button = gr.Button("Clear Chat", scale=1)
149
-
150
- # Set up event handlers
151
- msg_store = gr.State("") # Store for preserving user message
152
-
153
- input_box.submit(
154
- lambda msg: (msg, msg, ""), # Store message and clear input
155
- inputs=[input_box],
156
- outputs=[msg_store, input_box, input_box],
157
- queue=False
158
- ).then(
159
- user_message, # Add user message to chat
160
- inputs=[msg_store, chatbot],
161
- outputs=[input_box, chatbot],
162
- queue=False
163
- ).then(
164
- stream_gemini_response, # Generate and stream response
165
- inputs=[msg_store, chatbot],
166
- outputs=chatbot
167
- )
168
 
169
- clear_button.click(
170
- lambda: ([], "", ""),
171
- outputs=[chatbot, input_box, msg_store],
172
- queue=False
173
- )
174
 
175
- # Launch the interface
176
- if __name__ == "__main__":
177
- demo.launch(debug=True)
 
3
  from gradio import ChatMessage
4
  from typing import Iterator
5
  import google.generativeai as genai
6
+ import random
7
+
8
+ example_code = """
9
+ Here's an example Python lambda function:
10
+
11
+ lambda x: x + {}
12
+
13
+ Is this correct?
14
+ """
15
+
16
+ def chat(message, history):
17
+ if message == "Yes, that's correct.":
18
+ return "Great!"
19
+ else:
20
+ return gr.ChatMessage(
21
+ content=example_code.format(random.randint(1, 100)),
22
+ options=[
23
+ {"value": "Yes, that's correct.", "label": "Yes"},
24
+ {"value": "No"}
25
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ demo = gr.ChatInterface(
29
+ chat,
30
+ type="messages",
31
+ examples=["Write an example Python lambda function."]
32
+ )
33
 
34
+ demo.launch()