Americo commited on
Commit
bdafc7b
verified
1 Parent(s): a0857c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -101
app.py CHANGED
@@ -10,6 +10,7 @@ from google.adk.models.lite_llm import LiteLlm
10
  import uuid
11
  import asyncio
12
 
 
13
  #download_files_from_drive_tool = FunctionTool(func=download_files_from_drive)
14
  # --- Definici贸n de Mod
15
 
@@ -130,19 +131,13 @@ def validar_plan_de_afiliado(plan: str, practica: str) -> Dict[str, Any]:
130
  "comment": f"Validaci贸n correcta. El Plan Plata tiene cobertura para la pr谩ctica Resonancia de Columna Lumbar."
131
  }
132
 
133
- APP_NAME = "predoc_app"
134
-
135
- USER_ID = str(uuid.uuid4())
136
- SESSION_ID = str(uuid.uuid4())
137
-
138
 
 
139
 
 
140
  root_agent = LlmAgent(
141
- #model="gemini-2.5-flash",
142
  model=LiteLlm(model="openai/gpt-4.1"),
143
- generate_content_config=types.GenerateContentConfig(
144
- temperature=0.0,
145
- ),
146
  name="PreDoc",
147
  instruction=instruction,
148
  description="Asistente m茅dico para renovaci贸n de recetas",
@@ -151,99 +146,76 @@ root_agent = LlmAgent(
151
 
152
  session_service = InMemorySessionService()
153
 
154
- async def initialize():
155
- global session
156
- session = await session_service.create_session(
157
- app_name=APP_NAME,
158
- user_id=USER_ID,
159
- session_id=SESSION_ID
160
- )
161
-
162
- # Ejecutar la funci贸n async (esto va al final del archivo principal o en tu entrypoint)
163
- asyncio.run(initialize())
164
- runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
165
-
166
-
167
-
168
-
169
- def call_agent_text(query):
170
-
171
- content = types.Content(role='user', parts=[types.Part(text=query)])
172
- events = runner.run(user_id=USER_ID, session_id=SESSION_ID, new_message=content)
173
-
174
- for event in events:
175
- print(event)
176
- if event.is_final_response():
177
- final_response = event.content.parts[0].text
178
- print("Agent Response: ", final_response)
179
- return final_response
180
-
181
- def call_agent_image(query):
182
- images=[]
183
- for q in query:
184
- with open(q, 'rb') as f:
185
- image_bytes = f.read()
186
- images.append(types.Part.from_bytes(
187
- data=image_bytes,
188
- mime_type='image/jpeg',
189
- ))
190
-
191
-
192
- content = types.Content(role='user', parts=images)
193
- events = runner.run(user_id=USER_ID, session_id=SESSION_ID, new_message=content)
194
-
195
- for event in events:
196
- print(event)
197
- if event.is_final_response():
198
- final_response = event.content.parts[0].text
199
- print("Agent Response: ", final_response)
200
- return final_response
201
-
202
- def call_agent_both(image,text):
203
- with open(image[0], 'rb') as f:
204
- image_bytes = f.read()
205
-
206
- content = types.Content(
207
- role='user',
208
- parts=[
209
- types.Part.from_bytes(data=image_bytes, mime_type='image/jpeg'),
210
- types.Part(text=text)
211
- ]
212
- )
213
-
214
- print("CONTENT",content)
215
- events = runner.run(user_id=USER_ID, session_id=SESSION_ID, new_message=content)
216
-
217
- for event in events:
218
- print(event)
219
- if event.is_final_response():
220
- final_response = event.content.parts[0].text
221
- print("Agent Response: ", final_response)
222
- return final_response
223
-
224
- def respond(message, history):
225
- num_files = len(message["files"])
226
- print("FILE", message)
227
  if message['text'] != '' and len(message['files']) > 0:
228
- #ambas
229
- print("AMBAS")
230
- res = call_agent_both(message['files'],message['text'])
231
- return res
232
-
233
  elif message['text'] == '' and len(message['files']) > 0:
234
- #imagenes
235
- res = call_agent_image(message['files'])
236
- return res
237
-
238
- elif message['text'] !='' and len(message['files']) == 0:
239
- #texto
240
- res = call_agent_text(message['text'])
241
- return res
242
  else:
243
- #vacio
244
- return "Escribe algo para que pueda contestarte."
245
-
246
-
247
- demo = gr.ChatInterface(fn=respond, title="Agente Revisor", multimodal=True)
248
 
249
- demo.launch(debug=True)
 
 
 
10
  import uuid
11
  import asyncio
12
 
13
+
14
  #download_files_from_drive_tool = FunctionTool(func=download_files_from_drive)
15
  # --- Definici贸n de Mod
16
 
 
131
  "comment": f"Validaci贸n correcta. El Plan Plata tiene cobertura para la pr谩ctica Resonancia de Columna Lumbar."
132
  }
133
 
 
 
 
 
 
134
 
135
+ APP_NAME = "predoc_app"
136
 
137
+ # Definici贸n del agente
138
  root_agent = LlmAgent(
 
139
  model=LiteLlm(model="openai/gpt-4.1"),
140
+ generate_content_config=types.GenerateContentConfig(temperature=0.0),
 
 
141
  name="PreDoc",
142
  instruction=instruction,
143
  description="Asistente m茅dico para renovaci贸n de recetas",
 
146
 
147
  session_service = InMemorySessionService()
148
 
149
+ # Funci贸n principal con state
150
+ def respond(message, history, state):
151
+ # Inicializamos state si es la primera vez
152
+ if state is None:
153
+ user_id = str(uuid.uuid4())
154
+ session_id = str(uuid.uuid4())
155
+ state = {"user_id": user_id, "session_id": session_id}
156
+ print(f"馃攧 Nueva sesi贸n: {session_id}")
157
+
158
+ # Crear sesi贸n ADK de forma as铆ncrona
159
+ async def create_session():
160
+ await session_service.create_session(
161
+ app_name=APP_NAME,
162
+ user_id=user_id,
163
+ session_id=session_id
164
+ )
165
+ asyncio.run(create_session())
166
+
167
+ user_id = state["user_id"]
168
+ session_id = state["session_id"]
169
+ runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
170
+
171
+ def call_agent_text(query):
172
+ content = types.Content(role='user', parts=[types.Part(text=query)])
173
+ events = runner.run(user_id=user_id, session_id=session_id, new_message=content)
174
+ for event in events:
175
+ if event.is_final_response():
176
+ return event.content.parts[0].text
177
+ return "No se obtuvo respuesta."
178
+
179
+ def call_agent_image(query):
180
+ images = []
181
+ for q in query:
182
+ with open(q, 'rb') as f:
183
+ image_bytes = f.read()
184
+ images.append(types.Part.from_bytes(data=image_bytes, mime_type='image/jpeg'))
185
+
186
+ content = types.Content(role='user', parts=images)
187
+ events = runner.run(user_id=user_id, session_id=session_id, new_message=content)
188
+ for event in events:
189
+ if event.is_final_response():
190
+ return event.content.parts[0].text
191
+ return "No se obtuvo respuesta."
192
+
193
+ def call_agent_both(image, text):
194
+ with open(image[0], 'rb') as f:
195
+ image_bytes = f.read()
196
+ content = types.Content(
197
+ role='user',
198
+ parts=[
199
+ types.Part.from_bytes(data=image_bytes, mime_type='image/jpeg'),
200
+ types.Part(text=text)
201
+ ]
202
+ )
203
+ events = runner.run(user_id=user_id, session_id=session_id, new_message=content)
204
+ for event in events:
205
+ if event.is_final_response():
206
+ return event.content.parts[0].text
207
+ return "No se obtuvo respuesta."
208
+
209
+ # Dispatcher seg煤n tipo de input
 
 
 
 
 
 
 
 
 
 
 
 
210
  if message['text'] != '' and len(message['files']) > 0:
211
+ return call_agent_both(message['files'], message['text']), state
 
 
 
 
212
  elif message['text'] == '' and len(message['files']) > 0:
213
+ return call_agent_image(message['files']), state
214
+ elif message['text'] != '' and len(message['files']) == 0:
215
+ return call_agent_text(message['text']), state
 
 
 
 
 
216
  else:
217
+ return "Escribe algo para que pueda contestarte.", state
 
 
 
 
218
 
219
+ # Gradio interface con estado
220
+ demo = gr.ChatInterface(fn=respond, title="Agente Revisor", multimodal=True, state=gr.State())
221
+ demo.launch(debug=True)