Spaces:
Sleeping
Sleeping
Add requirements.txt
Browse files- app.py +8 -8
- requirements.txt +6 -0
app.py
CHANGED
@@ -26,7 +26,7 @@ nlp.vocab.add_flag(lambda s: s.lower() in spacy.lang.en.stop_words.STOP_WORDS, s
|
|
26 |
|
27 |
def preprocess_text(text):
|
28 |
"""Preprocess the input text using SpaCy and return word indices."""
|
29 |
-
doc = nlp(text)
|
30 |
word_seq = []
|
31 |
for token in doc:
|
32 |
if token.pos_ != "PUNCT":
|
@@ -38,17 +38,17 @@ def preprocess_text(text):
|
|
38 |
def classify_question(text):
|
39 |
# Preprocess the text
|
40 |
seq = preprocess_text(text)
|
41 |
-
padded_seq = tf.keras.preprocessing.sequence.pad_sequences([seq], maxlen=
|
42 |
-
|
43 |
# Get predictions from each model
|
44 |
-
pred1 = 0.15 * np.squeeze(model_1.predict(padded_seq, batch_size=
|
45 |
-
pred2 = 0.35 * np.squeeze(model_2.predict(padded_seq, batch_size=
|
46 |
-
pred3 = 0.15 * np.squeeze(model_3.predict(padded_seq, batch_size=
|
47 |
-
pred4 = 0.35 * np.squeeze(model_4.predict(padded_seq, batch_size=
|
48 |
|
49 |
# Combine predictions
|
50 |
avg_pred = pred1 + pred2 + pred3 + pred4
|
51 |
-
label = "Insincere" if avg_pred > 0.
|
52 |
|
53 |
# Create a list of probabilities for each model
|
54 |
probs = {
|
|
|
26 |
|
27 |
def preprocess_text(text):
|
28 |
"""Preprocess the input text using SpaCy and return word indices."""
|
29 |
+
doc = nlp.pipe(text, n_process=1)
|
30 |
word_seq = []
|
31 |
for token in doc:
|
32 |
if token.pos_ != "PUNCT":
|
|
|
38 |
def classify_question(text):
|
39 |
# Preprocess the text
|
40 |
seq = preprocess_text(text)
|
41 |
+
padded_seq = tf.keras.preprocessing.sequence.pad_sequences([seq], maxlen=55) # Adjust maxlen if needed
|
42 |
+
BATCH_SIZE = 512
|
43 |
# Get predictions from each model
|
44 |
+
pred1 = 0.15 * np.squeeze(model_1.predict(padded_seq, batch_size=BATCH_SIZE, verbose=2))
|
45 |
+
pred2 = 0.35 * np.squeeze(model_2.predict(padded_seq, batch_size=BATCH_SIZE, verbose=2))
|
46 |
+
pred3 = 0.15 * np.squeeze(model_3.predict(padded_seq, batch_size=BATCH_SIZE, verbose=2))
|
47 |
+
pred4 = 0.35 * np.squeeze(model_4.predict(padded_seq, batch_size=BATCH_SIZE, verbose=2))
|
48 |
|
49 |
# Combine predictions
|
50 |
avg_pred = pred1 + pred2 + pred3 + pred4
|
51 |
+
label = "Insincere" if avg_pred > 0.35 else "Sincere"
|
52 |
|
53 |
# Create a list of probabilities for each model
|
54 |
probs = {
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow
|
2 |
+
gradio
|
3 |
+
spacy
|
4 |
+
tqdm
|
5 |
+
numpy
|
6 |
+
pandas
|