Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import requests | |
# Configura tu cliente de modelo de Hugging Face | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
# Tu clave de API de Google Custom Search | |
GOOGLE_API_KEY = "AIzaSyDI48Q_Ez8-KXQ6Dfe_r7JyOkk-dloER0I" | |
# Tu ID de motor de b煤squeda | |
SEARCH_ENGINE_ID = "030a88810b398467c" | |
def web_search(query): | |
# Realiza la b煤squeda en Google | |
url = f"https://www.googleapis.com/customsearch/v1?q={query}&key={GOOGLE_API_KEY}&cx={SEARCH_ENGINE_ID}" | |
try: | |
response = requests.get(url) | |
response.raise_for_status() # Lanza un error si la respuesta no es exitosa | |
results = response.json() | |
if "items" in results: | |
# Devuelve un resumen de los primeros resultados | |
search_results = [] | |
for item in results["items"]: | |
title = item.get("title", "No title") | |
link = item.get("link", "") | |
snippet = item.get("snippet", "") | |
search_results.append(f"{title}: {snippet} ({link})") | |
return "\n".join(search_results) # Devuelve los resultados como texto | |
else: | |
return "No se encontraron resultados relevantes." | |
except requests.exceptions.RequestException as e: | |
# Maneja errores de la API, como problemas de conexi贸n | |
return f"Error al realizar la b煤squeda: {e}" | |
# Define la funci贸n del chatbot con navegaci贸n web | |
def respond(message, history, system_message, max_tokens, temperature, top_p): | |
# Prepara el contexto de la conversaci贸n para el modelo | |
messages = [{"role": "system", "content": system_message}] | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
# Realiza la b煤squeda en la web | |
search_summary = web_search(message) | |
# Incluye los resultados de la b煤squeda en el contexto para el modelo | |
messages.append({"role": "system", "content": f"Search results:\n{search_summary}"}) | |
# Genera la respuesta del modelo | |
response = client.text_completion( | |
prompt=message, max_tokens=max_tokens, temperature=temperature, top_p=top_p | |
) | |
return response | |
# Interfaz de Gradio | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox() | |
clear = gr.Button("Clear") | |
def chat_interface(user_message, history=[]): | |
output = respond( | |
user_message, history, "You are a helpful assistant.", 200, 0.7, 0.9 | |
) | |
history.append((user_message, output)) | |
return history, chatbot.update(history) | |
msg.submit(chat_interface, inputs=[msg, chatbot], outputs=[chatbot, chatbot]) | |
clear.click(lambda: [], None, chatbot) | |
demo.launch() |