reparbot / app.py
gnosticdev's picture
Update app.py
b5ab9e4 verified
raw
history blame
935 Bytes
import os
import gradio as gr
import torch
from transformers import pipeline
# Iniciar sesi贸n en Hugging Face usando el secreto
huggingface_token = os.getenv('reparbot2') # Aseg煤rate de que el nombre coincida
if huggingface_token is None:
raise ValueError("El token de Hugging Face no est谩 configurado en las variables de entorno.")
login(huggingface_token)
# Configurar el modelo
model_id = "meta-llama/Llama-3.3-70B-Instruct"
pipeline_model = pipeline(
"text-generation",
model=model_id,
model_kwargs={"torch_dtype": torch.bfloat16},
device_map="auto",
)
# Funci贸n para responder a la consulta
def respond_to_query(user_input):
messages = [
{"role": "user", "content": user_input},
]
outputs = pipeline_model(messages, max_new_tokens=256)
return outputs[0]["generated_text"]
# Crear la interfaz de Gradio
gr.Interface(fn=respond_to_query, inputs="text", outputs="text").launch()