Spaces:
Paused
Paused
DHRUV SHEKHAWAT
commited on
Commit
·
f1215a7
1
Parent(s):
41505f0
Update app.py
Browse files
app.py
CHANGED
@@ -41,57 +41,7 @@ class TransformerChatbot(Model):
|
|
41 |
def create_padding_mask(self, seq):
|
42 |
mask = tf.cast(tf.math.equal(seq, 0), tf.float32)
|
43 |
return mask[:, tf.newaxis, tf.newaxis, :]
|
44 |
-
def completion_model(vocab_size, max_len, d_model, n_head, ff_dim, dropout_rate,weights,datafile,dict,len2,text2):
|
45 |
|
46 |
-
with open(datafile,"r") as f:
|
47 |
-
text = f.read()
|
48 |
-
text = text.lower()
|
49 |
-
words = text.split()
|
50 |
-
loaded_dict = np.load(dict, allow_pickle=True)
|
51 |
-
word_to_num = loaded_dict["word_to_num"].item()
|
52 |
-
num_to_word = loaded_dict["num_to_word"].item()
|
53 |
-
X = []
|
54 |
-
Y = []
|
55 |
-
for i in range(len(words)-1):
|
56 |
-
word = words[i]
|
57 |
-
next_word = words[i+1]
|
58 |
-
X.append(word_to_num[word])
|
59 |
-
Y.append(word_to_num[next_word])
|
60 |
-
Y.append(0)
|
61 |
-
|
62 |
-
X.append(word_to_num[words[-1]])
|
63 |
-
X_train = pad_sequences([X])
|
64 |
-
y_train = pad_sequences([Y])
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
chatbot = TransformerChatbot(vocab_size, max_len, d_model, n_head, ff_dim, dropout_rate)
|
69 |
-
chatbot.load_weights(weights)
|
70 |
-
chatbot.build(input_shape=(None, max_len)) # Build the model
|
71 |
-
chatbot.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
|
72 |
-
|
73 |
-
for i in range(1):
|
74 |
-
other_text2 = text2
|
75 |
-
other_text2 = other_text2.lower()
|
76 |
-
other_words2 = other_text2.split()
|
77 |
-
other_num2 = [word_to_num[word] for word in other_words2]
|
78 |
-
given_X2 = other_num2
|
79 |
-
input_sequence2 = pad_sequences([given_X2], maxlen=max_len, padding='post')
|
80 |
-
output_sentence = other_text2 + ""
|
81 |
-
for _ in range(len2):
|
82 |
-
predicted_token = np.argmax(chatbot.predict(input_sequence2), axis=-1)
|
83 |
-
predicted_token = predicted_token.item()
|
84 |
-
out = num_to_word[predicted_token]
|
85 |
-
# if out == ".":
|
86 |
-
# break
|
87 |
-
|
88 |
-
output_sentence += " " + out
|
89 |
-
given_X2 = given_X2[1:]
|
90 |
-
given_X2.append(predicted_token)
|
91 |
-
input_sequence2 = pad_sequences([given_X2], maxlen=max_len, padding='post')
|
92 |
-
|
93 |
-
out2 = output_sentence
|
94 |
-
return out2
|
95 |
st.title("UniGLM TEXT completion Model")
|
96 |
st.subheader("Next Word Prediction AI Model by Webraft-AI")
|
97 |
#Picking what NLP task you want to do
|
@@ -143,7 +93,7 @@ if option == '13M_OLD':
|
|
143 |
chatbot.load_weights(weights)
|
144 |
chatbot.build(input_shape=(None, max_len)) # Build the model
|
145 |
chatbot.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
|
146 |
-
|
147 |
for i in range(1):
|
148 |
other_text2 = text2
|
149 |
other_text2 = other_text2.lower()
|
@@ -212,7 +162,7 @@ elif option=="26M_OLD":
|
|
212 |
chatbot.load_weights(weights)
|
213 |
chatbot.build(input_shape=(None, max_len)) # Build the model
|
214 |
chatbot.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
|
215 |
-
|
216 |
for i in range(1):
|
217 |
other_text2 = text2
|
218 |
other_text2 = other_text2.lower()
|
|
|
41 |
def create_padding_mask(self, seq):
|
42 |
mask = tf.cast(tf.math.equal(seq, 0), tf.float32)
|
43 |
return mask[:, tf.newaxis, tf.newaxis, :]
|
|
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
st.title("UniGLM TEXT completion Model")
|
46 |
st.subheader("Next Word Prediction AI Model by Webraft-AI")
|
47 |
#Picking what NLP task you want to do
|
|
|
93 |
chatbot.load_weights(weights)
|
94 |
chatbot.build(input_shape=(None, max_len)) # Build the model
|
95 |
chatbot.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
|
96 |
+
chatbot.fit(X_train, y_train, epochs=1, batch_size=64)
|
97 |
for i in range(1):
|
98 |
other_text2 = text2
|
99 |
other_text2 = other_text2.lower()
|
|
|
162 |
chatbot.load_weights(weights)
|
163 |
chatbot.build(input_shape=(None, max_len)) # Build the model
|
164 |
chatbot.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
|
165 |
+
chatbot.fit(X_train, y_train, epochs=1, batch_size=64)
|
166 |
for i in range(1):
|
167 |
other_text2 = text2
|
168 |
other_text2 = other_text2.lower()
|