Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import os
|
2 |
-
import time
|
3 |
import gradio as gr
|
4 |
import google.generativeai as genai
|
5 |
from dotenv import load_dotenv
|
@@ -14,28 +13,24 @@ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
14 |
model = genai.GenerativeModel("gemini-2.0-flash")
|
15 |
|
16 |
def chat(message, history):
|
17 |
-
"""Env铆a el mensaje del usuario a Gemini con historial y devuelve la respuesta
|
18 |
try:
|
19 |
# Convertir historial a formato adecuado para Gemini
|
20 |
chat_history = [{"role": "user", "parts": [msg[0]]} for msg in history] + [{"role": "user", "parts": [message]}]
|
21 |
|
22 |
-
#
|
23 |
-
|
24 |
-
|
25 |
-
#
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
full_response = response.text[: i + 1]
|
30 |
-
yield history + [(message, full_response)] # Retorna el historial actualizado progresivamente
|
31 |
-
|
32 |
except Exception as e:
|
33 |
-
yield
|
34 |
|
35 |
# Crear la interfaz de chat con historial
|
36 |
demo = gr.ChatInterface(
|
37 |
fn=chat,
|
38 |
-
type="messages",
|
39 |
examples=["Write an example Python lambda function."],
|
40 |
title="Gemini Chatbot",
|
41 |
description="Chatbot interactivo con historial de conversaci贸n usando Gemini AI."
|
|
|
1 |
import os
|
|
|
2 |
import gradio as gr
|
3 |
import google.generativeai as genai
|
4 |
from dotenv import load_dotenv
|
|
|
13 |
model = genai.GenerativeModel("gemini-2.0-flash")
|
14 |
|
15 |
def chat(message, history):
|
16 |
+
"""Env铆a el mensaje del usuario a Gemini con historial y devuelve la respuesta en streaming."""
|
17 |
try:
|
18 |
# Convertir historial a formato adecuado para Gemini
|
19 |
chat_history = [{"role": "user", "parts": [msg[0]]} for msg in history] + [{"role": "user", "parts": [message]}]
|
20 |
|
21 |
+
# Usar streaming para obtener la respuesta en fragmentos
|
22 |
+
response_stream = model.generate_content_stream(chat_history)
|
23 |
+
|
24 |
+
# Devolver los fragmentos como un flujo (streaming) en el chat
|
25 |
+
for chunk in response_stream:
|
26 |
+
yield chunk.text # Genera cada fragmento uno por uno
|
27 |
+
|
|
|
|
|
|
|
28 |
except Exception as e:
|
29 |
+
yield f"Error: {e}"
|
30 |
|
31 |
# Crear la interfaz de chat con historial
|
32 |
demo = gr.ChatInterface(
|
33 |
fn=chat,
|
|
|
34 |
examples=["Write an example Python lambda function."],
|
35 |
title="Gemini Chatbot",
|
36 |
description="Chatbot interactivo con historial de conversaci贸n usando Gemini AI."
|