jaifar530
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -229,7 +229,7 @@ with open('tokenizer.pkl', 'rb') as handle:
|
|
| 229 |
with open('label_encoder.pkl', 'rb') as handle:
|
| 230 |
label_encoder = pickle.load(handle)
|
| 231 |
|
| 232 |
-
max_length = 300
|
| 233 |
|
| 234 |
############### End Load CNN Model ############
|
| 235 |
|
|
@@ -256,7 +256,6 @@ press_me_button = st.button("Human or Robot?")
|
|
| 256 |
if press_me_button:
|
| 257 |
|
| 258 |
########## ML
|
| 259 |
-
|
| 260 |
word_count = len(re.findall(r'\w+', new_text))
|
| 261 |
st.write(f"Words Count: {word_count}")
|
| 262 |
|
|
@@ -297,14 +296,14 @@ if press_me_button:
|
|
| 297 |
with open(f"{file_prefix}_vectorizer.pkl", 'rb') as file:
|
| 298 |
vectorizer = pickle.load(file)
|
| 299 |
|
| 300 |
-
#
|
| 301 |
user_input_transformed = vectorizer.transform([new_text])
|
| 302 |
|
| 303 |
-
#
|
| 304 |
ridge_prediction = ridge_model.predict(user_input_transformed)
|
| 305 |
extra_trees_prediction = extra_trees_model.predict(user_input_transformed)
|
| 306 |
|
| 307 |
-
|
| 308 |
predicted_author, author_probabilities = predict_author(new_text, loaded_model, tokenizer, label_encoder)
|
| 309 |
sorted_probabilities = sorted(author_probabilities.items(), key=lambda x: x[1], reverse=True)
|
| 310 |
|
|
@@ -332,7 +331,7 @@ if press_me_button:
|
|
| 332 |
max_cnn_prob_name = sorted_probabilities[0][0]
|
| 333 |
max_cnn_prob = float(sorted_probabilities[0][1])
|
| 334 |
|
| 335 |
-
if word_count < 10 or word_count > 1081:
|
| 336 |
st.info("For better prediction input texts between 10 and 1081", icon="ℹ️")
|
| 337 |
|
| 338 |
elif word_count < 256:
|
|
|
|
| 229 |
with open('label_encoder.pkl', 'rb') as handle:
|
| 230 |
label_encoder = pickle.load(handle)
|
| 231 |
|
| 232 |
+
max_length = 300
|
| 233 |
|
| 234 |
############### End Load CNN Model ############
|
| 235 |
|
|
|
|
| 256 |
if press_me_button:
|
| 257 |
|
| 258 |
########## ML
|
|
|
|
| 259 |
word_count = len(re.findall(r'\w+', new_text))
|
| 260 |
st.write(f"Words Count: {word_count}")
|
| 261 |
|
|
|
|
| 296 |
with open(f"{file_prefix}_vectorizer.pkl", 'rb') as file:
|
| 297 |
vectorizer = pickle.load(file)
|
| 298 |
|
| 299 |
+
# ML Vectorizing the input
|
| 300 |
user_input_transformed = vectorizer.transform([new_text])
|
| 301 |
|
| 302 |
+
# ML predictions
|
| 303 |
ridge_prediction = ridge_model.predict(user_input_transformed)
|
| 304 |
extra_trees_prediction = extra_trees_model.predict(user_input_transformed)
|
| 305 |
|
| 306 |
+
# CNN prediction + Vectorizing the input
|
| 307 |
predicted_author, author_probabilities = predict_author(new_text, loaded_model, tokenizer, label_encoder)
|
| 308 |
sorted_probabilities = sorted(author_probabilities.items(), key=lambda x: x[1], reverse=True)
|
| 309 |
|
|
|
|
| 331 |
max_cnn_prob_name = sorted_probabilities[0][0]
|
| 332 |
max_cnn_prob = float(sorted_probabilities[0][1])
|
| 333 |
|
| 334 |
+
if word_count < 10.0 or word_count > 1081.0:
|
| 335 |
st.info("For better prediction input texts between 10 and 1081", icon="ℹ️")
|
| 336 |
|
| 337 |
elif word_count < 256:
|