Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,11 @@ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
|
|
8 |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
|
9 |
|
10 |
# Cargar tu conjunto de datos
|
11 |
-
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# Preprocesar los datos
|
14 |
def preprocess_function(examples):
|
@@ -58,11 +62,21 @@ chat_history_ids = None
|
|
58 |
# FunciΓ³n de chat
|
59 |
def chat_with_bot(user_input):
|
60 |
global chat_history_ids
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
# Crear la interfaz de Gradio
|
67 |
iface = gr.Interface(fn=chat_with_bot, inputs="text", outputs="text", title="Chatbot Entrenado")
|
68 |
iface.launch()
|
|
|
|
8 |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
|
9 |
|
10 |
# Cargar tu conjunto de datos
|
11 |
+
try:
|
12 |
+
dataset = load_dataset('csv', data_files='alpaca.csv')
|
13 |
+
print("Conjunto de datos cargado correctamente.")
|
14 |
+
except Exception as e:
|
15 |
+
print(f"Error al cargar el conjunto de datos: {e}")
|
16 |
|
17 |
# Preprocesar los datos
|
18 |
def preprocess_function(examples):
|
|
|
62 |
# FunciΓ³n de chat
|
63 |
def chat_with_bot(user_input):
|
64 |
global chat_history_ids
|
65 |
+
try:
|
66 |
+
new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
|
67 |
+
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if chat_history_ids is not None else new_user_input_ids
|
68 |
+
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
|
69 |
+
response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
|
70 |
+
|
71 |
+
# Si la respuesta es vacΓa o no tiene sentido, devuelve una respuesta predeterminada
|
72 |
+
if not response.strip():
|
73 |
+
return "Lo siento, no entiendo la pregunta."
|
74 |
+
|
75 |
+
return response
|
76 |
+
except Exception as e:
|
77 |
+
return f"Error: {e}. No pude procesar tu pregunta."
|
78 |
|
79 |
# Crear la interfaz de Gradio
|
80 |
iface = gr.Interface(fn=chat_with_bot, inputs="text", outputs="text", title="Chatbot Entrenado")
|
81 |
iface.launch()
|
82 |
+
|