freddyaboulton HF staff commited on
Commit
9b35006
·
1 Parent(s): 29d30f2
Files changed (2) hide show
  1. app.py +112 -110
  2. requirements.txt +10 -99
app.py CHANGED
@@ -4,36 +4,15 @@ import json
4
  import os
5
  from dataclasses import dataclass
6
  from typing import Any
 
7
  import gradio as gr
 
8
  from httpx import AsyncClient
9
- from groq import Groq
10
- import numpy as np
11
- from gradio_webrtc import WebRTC, AdditionalOutputs, ReplyOnPause, audio_to_bytes
12
-
13
  from pydantic_ai import Agent, ModelRetry, RunContext
14
- from pydantic_ai.messages import ModelStructuredResponse, ToolReturn, ModelTextResponse
15
-
16
- from dotenv import load_dotenv
17
 
18
  load_dotenv()
19
 
20
- import logging
21
-
22
- # Configure the root logger to WARNING to suppress debug messages from other libraries
23
- logging.basicConfig(level=logging.WARNING)
24
-
25
- # Create a console handler
26
- console_handler = logging.StreamHandler()
27
- console_handler.setLevel(logging.DEBUG)
28
-
29
- # Create a formatter
30
- formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
31
- console_handler.setFormatter(formatter)
32
-
33
- # Configure the logger for your specific library
34
- logger = logging.getLogger("gradio_webrtc")
35
- logger.setLevel(logging.DEBUG)
36
- logger.addHandler(console_handler)
37
 
38
  @dataclass
39
  class Deps:
@@ -43,8 +22,8 @@ class Deps:
43
 
44
 
45
  weather_agent = Agent(
46
- 'openai:gpt-4o',
47
- system_prompt='You are an expert packer. A user will ask you for help packing for a trip given a destination. Use your weather tools to provide a concise and effective packing list. Also ask follow up questions if neccessary.',
48
  deps_type=Deps,
49
  retries=2,
50
  )
@@ -62,20 +41,20 @@ async def get_lat_lng(
62
  """
63
  if ctx.deps.geo_api_key is None:
64
  # if no API key is provided, return a dummy response (London)
65
- return {'lat': 51.1, 'lng': -0.1}
66
 
67
  params = {
68
- 'q': location_description,
69
- 'api_key': ctx.deps.geo_api_key,
70
  }
71
- r = await ctx.deps.client.get('https://geocode.maps.co/search', params=params)
72
  r.raise_for_status()
73
  data = r.json()
74
 
75
  if data:
76
- return {'lat': data[0]['lat'], 'lng': data[0]['lon']}
77
  else:
78
- raise ModelRetry('Could not find the location')
79
 
80
 
81
  @weather_agent.tool
@@ -89,107 +68,115 @@ async def get_weather(ctx: RunContext[Deps], lat: float, lng: float) -> dict[str
89
  """
90
  if ctx.deps.weather_api_key is None:
91
  # if no API key is provided, return a dummy response
92
- return {'temperature': '21 °C', 'description': 'Sunny'}
93
 
94
  params = {
95
- 'apikey': ctx.deps.weather_api_key,
96
- 'location': f'{lat},{lng}',
97
- 'units': 'metric',
98
  }
99
  r = await ctx.deps.client.get(
100
- 'https://api.tomorrow.io/v4/weather/realtime', params=params
101
  )
102
  r.raise_for_status()
103
  data = r.json()
104
 
105
- values = data['data']['values']
106
  # https://docs.tomorrow.io/reference/data-layers-weather-codes
107
  code_lookup = {
108
- 1000: 'Clear, Sunny',
109
- 1100: 'Mostly Clear',
110
- 1101: 'Partly Cloudy',
111
- 1102: 'Mostly Cloudy',
112
- 1001: 'Cloudy',
113
- 2000: 'Fog',
114
- 2100: 'Light Fog',
115
- 4000: 'Drizzle',
116
- 4001: 'Rain',
117
- 4200: 'Light Rain',
118
- 4201: 'Heavy Rain',
119
- 5000: 'Snow',
120
- 5001: 'Flurries',
121
- 5100: 'Light Snow',
122
- 5101: 'Heavy Snow',
123
- 6000: 'Freezing Drizzle',
124
- 6001: 'Freezing Rain',
125
- 6200: 'Light Freezing Rain',
126
- 6201: 'Heavy Freezing Rain',
127
- 7000: 'Ice Pellets',
128
- 7101: 'Heavy Ice Pellets',
129
- 7102: 'Light Ice Pellets',
130
- 8000: 'Thunderstorm',
131
  }
132
  return {
133
- 'temperature': f'{values["temperatureApparent"]:0.0f}°C',
134
- 'description': code_lookup.get(values['weatherCode'], 'Unknown'),
135
  }
136
 
137
 
138
- TOOL_TO_DISPLAY_NAME = {
139
- 'get_lat_lng': 'Geocoding API',
140
- "get_weather": "Weather API"
141
- }
142
-
143
-
144
- groq_client = Groq()
145
 
146
  client = AsyncClient()
147
- weather_api_key = os.getenv('WEATHER_API_KEY')
148
  # create a free API key at https://geocode.maps.co/
149
- geo_api_key = os.getenv('GEO_API_KEY')
150
- deps = Deps(
151
- client=client, weather_api_key=weather_api_key, geo_api_key=geo_api_key
152
- )
153
-
154
-
155
- async def stream_from_agent(audio: tuple[int, np.ndarray], chatbot: list[dict], past_messages: list):
156
 
157
- prompt = groq_client.audio.transcriptions.create(
158
- file=("audio-file.mp3", audio_to_bytes(audio)),
159
- model="whisper-large-v3-turbo",
160
- response_format="verbose_json",
161
- ).text
162
 
163
- chatbot.append({'role': 'user', 'content': prompt})
164
- yield AdditionalOutputs(chatbot, gr.skip())
165
- async with weather_agent.run_stream(prompt, deps=deps, message_history=past_messages) as result:
 
 
 
166
  for message in result.new_messages():
167
  past_messages.append(message)
168
  if isinstance(message, ModelStructuredResponse):
169
  for call in message.calls:
170
- gr_message = {"role": "assistant",
171
- "content": "",
172
- "metadata": {"title": f"### 🛠️ Using {TOOL_TO_DISPLAY_NAME[call.tool_name]}",
173
- "id": call.tool_id}
 
 
 
174
  }
175
  chatbot.append(gr_message)
176
  if isinstance(message, ToolReturn):
177
  for gr_message in chatbot:
178
- if gr_message.get('metadata', {}).get('id', "") == message.tool_id:
179
- gr_message['content'] = f"Output: {json.dumps(message.content)}"
180
- yield AdditionalOutputs(chatbot, gr.skip())
181
- chatbot.append({'role': 'assistant', 'content': ""})
182
  async for message in result.stream_text():
183
  chatbot[-1]["content"] = message
184
- yield AdditionalOutputs(chatbot, gr.skip())
185
  data = await result.get_data()
186
  past_messages.append(ModelTextResponse(content=data))
187
- yield AdditionalOutputs(gr.skip(), past_messages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
 
190
  with gr.Blocks() as demo:
191
  gr.HTML(
192
- """
193
  <div style="display: flex; justify-content: center; align-items: center; gap: 2rem; padding: 1rem; width: 100%">
194
  <img src="https://ai.pydantic.dev/img/logo-white.svg" style="max-width: 200px; height: auto">
195
  <div>
@@ -203,19 +190,34 @@ with gr.Blocks() as demo:
203
  </div>
204
  </div>
205
  """
206
- )
207
  past_messages = gr.State([])
208
- chatbot = gr.Chatbot(label="Packing Assistant", type="messages",
209
- avatar_images=(None, "https://ai.pydantic.dev/img/logo-white.svg"))
210
- audio= WebRTC(label="Talk with the Agent",
211
- modality="audio",
212
- mode="send")
213
- audio.stream(ReplyOnPause(stream_from_agent), inputs=[audio, chatbot, past_messages],
214
- outputs=[audio])
215
- audio.on_additional_outputs(lambda c,s: (c, s), outputs=[chatbot, past_messages],
216
- queue=False, show_progress="hidden")
217
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
 
220
- if __name__ == '__main__':
221
- demo.launch()
 
4
  import os
5
  from dataclasses import dataclass
6
  from typing import Any
7
+
8
  import gradio as gr
9
+ from dotenv import load_dotenv
10
  from httpx import AsyncClient
 
 
 
 
11
  from pydantic_ai import Agent, ModelRetry, RunContext
12
+ from pydantic_ai.messages import ModelStructuredResponse, ModelTextResponse, ToolReturn
 
 
13
 
14
  load_dotenv()
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  @dataclass
18
  class Deps:
 
22
 
23
 
24
  weather_agent = Agent(
25
+ "openai:gpt-4o",
26
+ system_prompt="You are an expert packer. A user will ask you for help packing for a trip given a destination. Use your weather tools to provide a concise and effective packing list. Also ask follow up questions if neccessary.",
27
  deps_type=Deps,
28
  retries=2,
29
  )
 
41
  """
42
  if ctx.deps.geo_api_key is None:
43
  # if no API key is provided, return a dummy response (London)
44
+ return {"lat": 51.1, "lng": -0.1}
45
 
46
  params = {
47
+ "q": location_description,
48
+ "api_key": ctx.deps.geo_api_key,
49
  }
50
+ r = await ctx.deps.client.get("https://geocode.maps.co/search", params=params)
51
  r.raise_for_status()
52
  data = r.json()
53
 
54
  if data:
55
+ return {"lat": data[0]["lat"], "lng": data[0]["lon"]}
56
  else:
57
+ raise ModelRetry("Could not find the location")
58
 
59
 
60
  @weather_agent.tool
 
68
  """
69
  if ctx.deps.weather_api_key is None:
70
  # if no API key is provided, return a dummy response
71
+ return {"temperature": "21 °C", "description": "Sunny"}
72
 
73
  params = {
74
+ "apikey": ctx.deps.weather_api_key,
75
+ "location": f"{lat},{lng}",
76
+ "units": "metric",
77
  }
78
  r = await ctx.deps.client.get(
79
+ "https://api.tomorrow.io/v4/weather/realtime", params=params
80
  )
81
  r.raise_for_status()
82
  data = r.json()
83
 
84
+ values = data["data"]["values"]
85
  # https://docs.tomorrow.io/reference/data-layers-weather-codes
86
  code_lookup = {
87
+ 1000: "Clear, Sunny",
88
+ 1100: "Mostly Clear",
89
+ 1101: "Partly Cloudy",
90
+ 1102: "Mostly Cloudy",
91
+ 1001: "Cloudy",
92
+ 2000: "Fog",
93
+ 2100: "Light Fog",
94
+ 4000: "Drizzle",
95
+ 4001: "Rain",
96
+ 4200: "Light Rain",
97
+ 4201: "Heavy Rain",
98
+ 5000: "Snow",
99
+ 5001: "Flurries",
100
+ 5100: "Light Snow",
101
+ 5101: "Heavy Snow",
102
+ 6000: "Freezing Drizzle",
103
+ 6001: "Freezing Rain",
104
+ 6200: "Light Freezing Rain",
105
+ 6201: "Heavy Freezing Rain",
106
+ 7000: "Ice Pellets",
107
+ 7101: "Heavy Ice Pellets",
108
+ 7102: "Light Ice Pellets",
109
+ 8000: "Thunderstorm",
110
  }
111
  return {
112
+ "temperature": f'{values["temperatureApparent"]:0.0f}°C',
113
+ "description": code_lookup.get(values["weatherCode"], "Unknown"),
114
  }
115
 
116
 
117
+ TOOL_TO_DISPLAY_NAME = {"get_lat_lng": "Geocoding API", "get_weather": "Weather API"}
 
 
 
 
 
 
118
 
119
  client = AsyncClient()
120
+ weather_api_key = os.getenv("WEATHER_API_KEY")
121
  # create a free API key at https://geocode.maps.co/
122
+ geo_api_key = os.getenv("GEO_API_KEY")
123
+ deps = Deps(client=client, weather_api_key=weather_api_key, geo_api_key=geo_api_key)
 
 
 
 
 
124
 
 
 
 
 
 
125
 
126
+ async def stream_from_agent(prompt: str, chatbot: list[dict], past_messages: list):
127
+ chatbot.append({"role": "user", "content": prompt})
128
+ yield gr.Textbox(interactive=False, value=""), chatbot, gr.skip()
129
+ async with weather_agent.run_stream(
130
+ prompt, deps=deps, message_history=past_messages
131
+ ) as result:
132
  for message in result.new_messages():
133
  past_messages.append(message)
134
  if isinstance(message, ModelStructuredResponse):
135
  for call in message.calls:
136
+ gr_message = {
137
+ "role": "assistant",
138
+ "content": "",
139
+ "metadata": {
140
+ "title": f"### 🛠️ Using {TOOL_TO_DISPLAY_NAME[call.tool_name]}",
141
+ "id": call.tool_id,
142
+ },
143
  }
144
  chatbot.append(gr_message)
145
  if isinstance(message, ToolReturn):
146
  for gr_message in chatbot:
147
+ if gr_message.get("metadata", {}).get("id", "") == message.tool_id:
148
+ gr_message["content"] = f"Output: {json.dumps(message.content)}"
149
+ yield gr.skip(), chatbot, gr.skip()
150
+ chatbot.append({"role": "assistant", "content": ""})
151
  async for message in result.stream_text():
152
  chatbot[-1]["content"] = message
153
+ yield gr.skip(), chatbot, gr.skip()
154
  data = await result.get_data()
155
  past_messages.append(ModelTextResponse(content=data))
156
+ yield gr.Textbox(interactive=True), gr.skip(), past_messages
157
+
158
+
159
+ async def handle_retry(chatbot, past_messages: list, retry_data: gr.RetryData):
160
+ new_history = chatbot[: retry_data.index]
161
+ previous_prompt = chatbot[retry_data.index]["content"]
162
+ past_messages = past_messages[: retry_data.index]
163
+ async for update in stream_from_agent(previous_prompt, new_history, past_messages):
164
+ yield update
165
+
166
+
167
+ def undo(chatbot, past_messages: list, undo_data: gr.UndoData):
168
+ new_history = chatbot[: undo_data.index]
169
+ past_messages = past_messages[: undo_data.index]
170
+ return chatbot[undo_data.index]["content"], new_history, past_messages
171
+
172
+
173
+ def select_data(message: gr.SelectData) -> str:
174
+ return message.value["text"]
175
 
176
 
177
  with gr.Blocks() as demo:
178
  gr.HTML(
179
+ """
180
  <div style="display: flex; justify-content: center; align-items: center; gap: 2rem; padding: 1rem; width: 100%">
181
  <img src="https://ai.pydantic.dev/img/logo-white.svg" style="max-width: 200px; height: auto">
182
  <div>
 
190
  </div>
191
  </div>
192
  """
193
+ )
194
  past_messages = gr.State([])
195
+ chatbot = gr.Chatbot(
196
+ label="Packing Assistant",
197
+ type="messages",
198
+ avatar_images=(None, "https://ai.pydantic.dev/img/logo-white.svg"),
199
+ examples=[
200
+ {"text": "I am going to Paris for the holidays, what should I pack?"},
201
+ {"text": "I am going to Tokyo this week."},
202
+ ],
203
+ )
204
+ with gr.Row():
205
+ prompt = gr.Textbox(
206
+ lines=1,
207
+ show_label=False,
208
+ placeholder="I am planning a trip to Miami, what should I pack?",
209
+ )
210
+ generation = prompt.submit(
211
+ stream_from_agent,
212
+ inputs=[prompt, chatbot, past_messages],
213
+ outputs=[prompt, chatbot, past_messages],
214
+ )
215
+ chatbot.example_select(select_data, None, [prompt])
216
+ chatbot.retry(
217
+ handle_retry, [chatbot, past_messages], [prompt, chatbot, past_messages]
218
+ )
219
+ chatbot.undo(undo, [chatbot, past_messages], [prompt, chatbot, past_messages])
220
 
221
 
222
+ if __name__ == "__main__":
223
+ demo.launch()
requirements.txt CHANGED
@@ -2,10 +2,6 @@
2
  # uv pip compile requirements.in -o requirements.txt
3
  aiofiles==23.2.1
4
  # via gradio
5
- aioice==0.9.0
6
- # via aiortc
7
- aiortc==1.9.0
8
- # via gradio-webrtc
9
  annotated-types==0.7.0
10
  # via pydantic
11
  anyio==4.6.2.post1
@@ -15,10 +11,6 @@ anyio==4.6.2.post1
15
  # httpx
16
  # openai
17
  # starlette
18
- audioread==3.0.1
19
- # via librosa
20
- av==12.3.0
21
- # via aiortc
22
  cachetools==5.5.0
23
  # via google-auth
24
  certifi==2024.8.30
@@ -26,12 +18,6 @@ certifi==2024.8.30
26
  # httpcore
27
  # httpx
28
  # requests
29
- cffi==1.17.1
30
- # via
31
- # aiortc
32
- # cryptography
33
- # pylibsrtp
34
- # soundfile
35
  charset-normalizer==3.4.0
36
  # via requests
37
  click==8.1.7
@@ -40,50 +26,32 @@ click==8.1.7
40
  # uvicorn
41
  colorama==0.4.6
42
  # via griffe
43
- coloredlogs==15.0.1
44
- # via onnxruntime
45
- cryptography==44.0.0
46
- # via
47
- # aiortc
48
- # pyopenssl
49
- decorator==5.1.1
50
- # via librosa
51
  distro==1.9.0
52
  # via
53
  # groq
54
  # openai
55
- dnspython==2.7.0
56
- # via aioice
57
  eval-type-backport==0.2.0
58
  # via pydantic-ai-slim
59
- fastapi==0.115.5
60
  # via gradio
61
  ffmpy==0.4.0
62
  # via gradio
63
  filelock==3.16.1
64
  # via huggingface-hub
65
- flatbuffers==24.3.25
66
- # via onnxruntime
67
  fsspec==2024.10.0
68
  # via
69
  # gradio-client
70
  # huggingface-hub
71
  google-auth==2.36.0
72
  # via pydantic-ai-slim
73
- google-crc32c==1.6.0
74
- # via aiortc
75
  gradio==5.7.1
76
- # via gradio-webrtc
77
  gradio-client==1.5.0
78
  # via gradio
79
- gradio-webrtc==0.0.16rc1
80
- # via -r requirements.in
81
  griffe==1.5.1
82
  # via pydantic-ai-slim
83
  groq==0.13.0
84
- # via
85
- # -r requirements.in
86
- # pydantic-ai-slim
87
  h11==0.14.0
88
  # via
89
  # httpcore
@@ -102,27 +70,15 @@ huggingface-hub==0.26.3
102
  # via
103
  # gradio
104
  # gradio-client
105
- humanfriendly==10.0
106
- # via coloredlogs
107
  idna==3.10
108
  # via
109
  # anyio
110
  # httpx
111
  # requests
112
- ifaddr==0.2.0
113
- # via aioice
114
  jinja2==3.1.4
115
  # via gradio
116
  jiter==0.8.0
117
  # via openai
118
- joblib==1.4.2
119
- # via
120
- # librosa
121
- # scikit-learn
122
- lazy-loader==0.4
123
- # via librosa
124
- librosa==0.10.2.post1
125
- # via gradio-webrtc
126
  llvmlite==0.43.0
127
  # via numba
128
  logfire-api==2.6.0
@@ -135,28 +91,17 @@ markupsafe==2.1.5
135
  # jinja2
136
  mdurl==0.1.2
137
  # via markdown-it-py
138
- mpmath==1.3.0
139
- # via sympy
140
- msgpack==1.1.0
141
- # via librosa
142
  numba==0.60.0
143
- # via
144
- # -r requirements.in
145
- # librosa
146
  numpy==2.0.2
147
  # via
148
  # gradio
149
- # librosa
150
  # numba
151
- # onnxruntime
152
  # pandas
153
- # scikit-learn
154
- # scipy
155
- # soxr
156
- onnxruntime==1.20.1
157
- # via gradio-webrtc
158
- openai==1.56.1
159
- # via pydantic-ai-slim
160
  orjson==3.10.12
161
  # via gradio
162
  packaging==24.2
@@ -164,27 +109,16 @@ packaging==24.2
164
  # gradio
165
  # gradio-client
166
  # huggingface-hub
167
- # lazy-loader
168
- # onnxruntime
169
- # pooch
170
  pandas==2.2.3
171
  # via gradio
172
  pillow==11.0.0
173
  # via gradio
174
- platformdirs==4.3.6
175
- # via pooch
176
- pooch==1.8.2
177
- # via librosa
178
- protobuf==5.29.0
179
- # via onnxruntime
180
  pyasn1==0.6.1
181
  # via
182
  # pyasn1-modules
183
  # rsa
184
  pyasn1-modules==0.4.1
185
  # via google-auth
186
- pycparser==2.22
187
- # via cffi
188
  pydantic==2.10.3
189
  # via
190
  # fastapi
@@ -192,22 +126,16 @@ pydantic==2.10.3
192
  # groq
193
  # openai
194
  # pydantic-ai-slim
195
- pydantic-ai==0.0.8
196
  # via -r requirements.in
197
- pydantic-ai-slim==0.0.8
198
  # via pydantic-ai
199
  pydantic-core==2.27.1
200
  # via pydantic
201
  pydub==0.25.1
202
  # via gradio
203
- pyee==12.1.1
204
- # via aiortc
205
  pygments==2.18.0
206
  # via rich
207
- pylibsrtp==0.10.0
208
- # via aiortc
209
- pyopenssl==24.3.0
210
- # via aiortc
211
  python-dateutil==2.9.0.post0
212
  # via pandas
213
  python-dotenv==1.0.1
@@ -223,7 +151,6 @@ pyyaml==6.0.2
223
  requests==2.32.3
224
  # via
225
  # huggingface-hub
226
- # pooch
227
  # pydantic-ai-slim
228
  rich==13.9.4
229
  # via typer
@@ -233,12 +160,6 @@ ruff==0.8.1
233
  # via gradio
234
  safehttpx==0.1.6
235
  # via gradio
236
- scikit-learn==1.5.2
237
- # via librosa
238
- scipy==1.14.1
239
- # via
240
- # librosa
241
- # scikit-learn
242
  semantic-version==2.10.0
243
  # via gradio
244
  shellingham==1.5.4
@@ -250,18 +171,10 @@ sniffio==1.3.1
250
  # anyio
251
  # groq
252
  # openai
253
- soundfile==0.12.1
254
- # via librosa
255
- soxr==0.5.0.post1
256
- # via librosa
257
  starlette==0.41.3
258
  # via
259
  # fastapi
260
  # gradio
261
- sympy==1.13.3
262
- # via onnxruntime
263
- threadpoolctl==3.5.0
264
- # via scikit-learn
265
  tomlkit==0.12.0
266
  # via gradio
267
  tqdm==4.67.1
@@ -277,11 +190,9 @@ typing-extensions==4.12.2
277
  # gradio-client
278
  # groq
279
  # huggingface-hub
280
- # librosa
281
  # openai
282
  # pydantic
283
  # pydantic-core
284
- # pyee
285
  # typer
286
  tzdata==2024.2
287
  # via pandas
 
2
  # uv pip compile requirements.in -o requirements.txt
3
  aiofiles==23.2.1
4
  # via gradio
 
 
 
 
5
  annotated-types==0.7.0
6
  # via pydantic
7
  anyio==4.6.2.post1
 
11
  # httpx
12
  # openai
13
  # starlette
 
 
 
 
14
  cachetools==5.5.0
15
  # via google-auth
16
  certifi==2024.8.30
 
18
  # httpcore
19
  # httpx
20
  # requests
 
 
 
 
 
 
21
  charset-normalizer==3.4.0
22
  # via requests
23
  click==8.1.7
 
26
  # uvicorn
27
  colorama==0.4.6
28
  # via griffe
 
 
 
 
 
 
 
 
29
  distro==1.9.0
30
  # via
31
  # groq
32
  # openai
 
 
33
  eval-type-backport==0.2.0
34
  # via pydantic-ai-slim
35
+ fastapi==0.115.6
36
  # via gradio
37
  ffmpy==0.4.0
38
  # via gradio
39
  filelock==3.16.1
40
  # via huggingface-hub
 
 
41
  fsspec==2024.10.0
42
  # via
43
  # gradio-client
44
  # huggingface-hub
45
  google-auth==2.36.0
46
  # via pydantic-ai-slim
 
 
47
  gradio==5.7.1
48
+ # via -r requirements.in
49
  gradio-client==1.5.0
50
  # via gradio
 
 
51
  griffe==1.5.1
52
  # via pydantic-ai-slim
53
  groq==0.13.0
54
+ # via pydantic-ai-slim
 
 
55
  h11==0.14.0
56
  # via
57
  # httpcore
 
70
  # via
71
  # gradio
72
  # gradio-client
 
 
73
  idna==3.10
74
  # via
75
  # anyio
76
  # httpx
77
  # requests
 
 
78
  jinja2==3.1.4
79
  # via gradio
80
  jiter==0.8.0
81
  # via openai
 
 
 
 
 
 
 
 
82
  llvmlite==0.43.0
83
  # via numba
84
  logfire-api==2.6.0
 
91
  # jinja2
92
  mdurl==0.1.2
93
  # via markdown-it-py
 
 
 
 
94
  numba==0.60.0
95
+ # via -r requirements.in
 
 
96
  numpy==2.0.2
97
  # via
98
  # gradio
 
99
  # numba
 
100
  # pandas
101
+ openai==1.56.2
102
+ # via
103
+ # -r requirements.in
104
+ # pydantic-ai-slim
 
 
 
105
  orjson==3.10.12
106
  # via gradio
107
  packaging==24.2
 
109
  # gradio
110
  # gradio-client
111
  # huggingface-hub
 
 
 
112
  pandas==2.2.3
113
  # via gradio
114
  pillow==11.0.0
115
  # via gradio
 
 
 
 
 
 
116
  pyasn1==0.6.1
117
  # via
118
  # pyasn1-modules
119
  # rsa
120
  pyasn1-modules==0.4.1
121
  # via google-auth
 
 
122
  pydantic==2.10.3
123
  # via
124
  # fastapi
 
126
  # groq
127
  # openai
128
  # pydantic-ai-slim
129
+ pydantic-ai==0.0.9
130
  # via -r requirements.in
131
+ pydantic-ai-slim==0.0.9
132
  # via pydantic-ai
133
  pydantic-core==2.27.1
134
  # via pydantic
135
  pydub==0.25.1
136
  # via gradio
 
 
137
  pygments==2.18.0
138
  # via rich
 
 
 
 
139
  python-dateutil==2.9.0.post0
140
  # via pandas
141
  python-dotenv==1.0.1
 
151
  requests==2.32.3
152
  # via
153
  # huggingface-hub
 
154
  # pydantic-ai-slim
155
  rich==13.9.4
156
  # via typer
 
160
  # via gradio
161
  safehttpx==0.1.6
162
  # via gradio
 
 
 
 
 
 
163
  semantic-version==2.10.0
164
  # via gradio
165
  shellingham==1.5.4
 
171
  # anyio
172
  # groq
173
  # openai
 
 
 
 
174
  starlette==0.41.3
175
  # via
176
  # fastapi
177
  # gradio
 
 
 
 
178
  tomlkit==0.12.0
179
  # via gradio
180
  tqdm==4.67.1
 
190
  # gradio-client
191
  # groq
192
  # huggingface-hub
 
193
  # openai
194
  # pydantic
195
  # pydantic-core
 
196
  # typer
197
  tzdata==2024.2
198
  # via pandas