Spaces:
Runtime error
Runtime error
Loading time
Browse files
app.py
CHANGED
@@ -37,7 +37,7 @@ lookup_words = read_text('lookup_words')
|
|
37 |
obj_pronouns = read_text('obj_pronouns')
|
38 |
profanities = read_text('profanities', 'json')
|
39 |
|
40 |
-
|
41 |
|
42 |
def fuzzy_lookup(tweet):
|
43 |
|
@@ -84,16 +84,12 @@ def fuzzy_lookup(tweet):
|
|
84 |
|
85 |
def preprocess(tweet):
|
86 |
|
87 |
-
# Lowercase
|
88 |
tweet = tweet.lower()
|
89 |
-
|
90 |
-
# Remove emojis
|
91 |
tweet = emoji.replace_emoji(tweet, replace='')
|
92 |
|
93 |
-
#
|
94 |
tweet = re.sub(r'(.)\1{2,}', r'\1', tweet)
|
95 |
|
96 |
-
# Split sentence into list of words
|
97 |
row_split = tweet.split()
|
98 |
|
99 |
for index, word in enumerate(row_split):
|
@@ -113,7 +109,6 @@ def preprocess(tweet):
|
|
113 |
# Fuzzy Lookup
|
114 |
preprocessed_tweet, matches = fuzzy_lookup(preprocessed_tweet)
|
115 |
|
116 |
-
# Check if output contains single word then return null
|
117 |
if len(preprocessed_tweet.split()) == 1:
|
118 |
return preprocessed_tweet, matches
|
119 |
|
@@ -124,33 +119,22 @@ def preprocess(tweet):
|
|
124 |
return preprocessed_tweet, matches
|
125 |
|
126 |
|
127 |
-
def countdown(seconds):
|
128 |
-
start_time = time.time()
|
129 |
-
while True:
|
130 |
-
elapsed_time = int(time.time() - start_time)
|
131 |
-
remaining_time = max(seconds - elapsed_time, 0)
|
132 |
-
if remaining_time == 0:
|
133 |
-
print("Time's up!")
|
134 |
-
break
|
135 |
-
print(remaining_time)
|
136 |
-
yield remaining_time
|
137 |
-
time.sleep(1)
|
138 |
-
|
139 |
|
140 |
def predict(tweet):
|
|
|
141 |
|
142 |
preprocessed_tweet, matched_profanity = preprocess(tweet)
|
143 |
|
144 |
prediction = query(preprocessed_tweet)
|
145 |
|
146 |
-
if type(prediction)
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
return f"Model is loading. Try again after {next(cd)} seconds."
|
151 |
|
152 |
if bool(matched_profanity) == False:
|
153 |
-
return "No
|
|
|
154 |
|
155 |
prediction = [tuple(i.values()) for i in prediction[0]]
|
156 |
prediction = dict((x, y) for x, y in prediction)
|
@@ -161,6 +145,7 @@ def predict(tweet):
|
|
161 |
|
162 |
return prediction
|
163 |
|
|
|
164 |
demo = gr.Interface(
|
165 |
fn=predict,
|
166 |
|
|
|
37 |
obj_pronouns = read_text('obj_pronouns')
|
38 |
profanities = read_text('profanities', 'json')
|
39 |
|
40 |
+
loading_countdown = 0
|
41 |
|
42 |
def fuzzy_lookup(tweet):
|
43 |
|
|
|
84 |
|
85 |
def preprocess(tweet):
|
86 |
|
|
|
87 |
tweet = tweet.lower()
|
|
|
|
|
88 |
tweet = emoji.replace_emoji(tweet, replace='')
|
89 |
|
90 |
+
# Elongated words conversion
|
91 |
tweet = re.sub(r'(.)\1{2,}', r'\1', tweet)
|
92 |
|
|
|
93 |
row_split = tweet.split()
|
94 |
|
95 |
for index, word in enumerate(row_split):
|
|
|
109 |
# Fuzzy Lookup
|
110 |
preprocessed_tweet, matches = fuzzy_lookup(preprocessed_tweet)
|
111 |
|
|
|
112 |
if len(preprocessed_tweet.split()) == 1:
|
113 |
return preprocessed_tweet, matches
|
114 |
|
|
|
119 |
return preprocessed_tweet, matches
|
120 |
|
121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
def predict(tweet):
|
124 |
+
global loading_countdown
|
125 |
|
126 |
preprocessed_tweet, matched_profanity = preprocess(tweet)
|
127 |
|
128 |
prediction = query(preprocessed_tweet)
|
129 |
|
130 |
+
if type(prediction) == dict:
|
131 |
+
loading_time = prediction['estimated_time']
|
132 |
+
return f"Loading Model (Estimated Time: {loading_time} Seconds)"
|
133 |
+
|
|
|
134 |
|
135 |
if bool(matched_profanity) == False:
|
136 |
+
return "No Profanity"
|
137 |
+
|
138 |
|
139 |
prediction = [tuple(i.values()) for i in prediction[0]]
|
140 |
prediction = dict((x, y) for x, y in prediction)
|
|
|
145 |
|
146 |
return prediction
|
147 |
|
148 |
+
|
149 |
demo = gr.Interface(
|
150 |
fn=predict,
|
151 |
|