Spaces:
Sleeping
Sleeping
File size: 935 Bytes
83e59ed 975cab3 b5ab9e4 83e59ed 3c72dac f2cbe50 83e59ed 975cab3 b5ab9e4 7af39c3 38683ac 7af39c3 b5ab9e4 7af39c3 b5ab9e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import os
import gradio as gr
import torch
from transformers import pipeline
# Iniciar sesión en Hugging Face usando el secreto
huggingface_token = os.getenv('reparbot2') # Asegúrate de que el nombre coincida
if huggingface_token is None:
raise ValueError("El token de Hugging Face no está configurado en las variables de entorno.")
login(huggingface_token)
# Configurar el modelo
model_id = "meta-llama/Llama-3.3-70B-Instruct"
pipeline_model = pipeline(
"text-generation",
model=model_id,
model_kwargs={"torch_dtype": torch.bfloat16},
device_map="auto",
)
# Función para responder a la consulta
def respond_to_query(user_input):
messages = [
{"role": "user", "content": user_input},
]
outputs = pipeline_model(messages, max_new_tokens=256)
return outputs[0]["generated_text"]
# Crear la interfaz de Gradio
gr.Interface(fn=respond_to_query, inputs="text", outputs="text").launch()
|