Use responses.txt instead of dataset.json
Browse files
test.py
CHANGED
@@ -6,11 +6,11 @@ from keras_self_attention import SeqSelfAttention
|
|
6 |
from model_settings import *
|
7 |
|
8 |
|
9 |
-
with open("
|
10 |
-
|
11 |
|
12 |
tokenizer = Tokenizer() # a tokenizer is a thing to split text into words, it might have some other stuff like making all the letters lowercase, etc.
|
13 |
-
tokenizer.fit_on_texts(
|
14 |
|
15 |
model = load_model("chatbot.keras", custom_objects={"SeqSelfAttention": SeqSelfAttention})
|
16 |
|
@@ -22,7 +22,7 @@ def generate(text):
|
|
22 |
tokens = (tokens+[0,]*inp_len)[:inp_len] # cutting off the sentence after inp_len words
|
23 |
prediction = model.predict(np.array([tokens,]))[0]
|
24 |
line = find_line_number(prediction)
|
25 |
-
return
|
26 |
|
27 |
if __name__ == "__main__": # if this code is not being imported, open the chat
|
28 |
while True:
|
|
|
6 |
from model_settings import *
|
7 |
|
8 |
|
9 |
+
with open("responses.txt", "r") as f:
|
10 |
+
lines = [x.rstrip("\n") for x in f.readlines()]
|
11 |
|
12 |
tokenizer = Tokenizer() # a tokenizer is a thing to split text into words, it might have some other stuff like making all the letters lowercase, etc.
|
13 |
+
tokenizer.fit_on_texts(lines)
|
14 |
|
15 |
model = load_model("chatbot.keras", custom_objects={"SeqSelfAttention": SeqSelfAttention})
|
16 |
|
|
|
22 |
tokens = (tokens+[0,]*inp_len)[:inp_len] # cutting off the sentence after inp_len words
|
23 |
prediction = model.predict(np.array([tokens,]))[0]
|
24 |
line = find_line_number(prediction)
|
25 |
+
return lines[line]
|
26 |
|
27 |
if __name__ == "__main__": # if this code is not being imported, open the chat
|
28 |
while True:
|