Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -15,9 +15,6 @@ from tensorflow import keras
|
|
15 |
|
16 |
from youtube_comment_downloader import *
|
17 |
|
18 |
-
sw = ["i","me","my","myself","we","our","ours","ourselves","you","you're","you've","you'll","you'd","your","yours","yourself","yourselves","he","him","his","himself","she","she's","her","hers","herself","it","it's","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","that'll","these","those","am","is","are","was","were","be","been","being","have","has","had","having","do","does","did","doing","a","an","the","and","but","if","or","because","as","until","while","of","at","by","for","with","about","against","between","into","through","during","before","after","above","below","to","from","up","down","in","out","on","off","over","under","again","further","then","once","here","there","when","where","why","how","all","any","both","each","few","more","most","other","some","such","no","nor","not","only","own","same","so","than","too","very","s","t","can","will","just","don","don't","should","should've","now","d","ll","m","o","re","ve","y","ain","aren","aren't","couldn","couldn't","didn","didn't","doesn","doesn't","hadn","hadn't","hasn","hasn't","haven","haven't","isn","isn't","ma","mightn","mightn't","mustn","mustn't","needn","needn't","shan","shan't","shouldn","shouldn't","wasn","wasn't","weren","weren't","won","won't","wouldn","wouldn't"]
|
19 |
-
lemmatizer = WordNetLemmatizer()
|
20 |
-
|
21 |
# get YouTube ID
|
22 |
def getID(url):
|
23 |
print("Getting YouTube ID...")
|
@@ -25,6 +22,9 @@ def getID(url):
|
|
25 |
|
26 |
# function to clean comments
|
27 |
def clean_text(text):
|
|
|
|
|
|
|
28 |
# remove symbols and Emojis
|
29 |
text = text.lower()
|
30 |
text = re.sub('@', '', text)
|
|
|
15 |
|
16 |
from youtube_comment_downloader import *
|
17 |
|
|
|
|
|
|
|
18 |
# get YouTube ID
|
19 |
def getID(url):
|
20 |
print("Getting YouTube ID...")
|
|
|
22 |
|
23 |
# function to clean comments
|
24 |
def clean_text(text):
|
25 |
+
lemmatizer = WordNetLemmatizer()
|
26 |
+
# stopwords
|
27 |
+
sw = ["i","me","my","myself","we","our","ours","ourselves","you","you're","you've","you'll","you'd","your","yours","yourself","yourselves","he","him","his","himself","she","she's","her","hers","herself","it","it's","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","that'll","these","those","am","is","are","was","were","be","been","being","have","has","had","having","do","does","did","doing","a","an","the","and","but","if","or","because","as","until","while","of","at","by","for","with","about","against","between","into","through","during","before","after","above","below","to","from","up","down","in","out","on","off","over","under","again","further","then","once","here","there","when","where","why","how","all","any","both","each","few","more","most","other","some","such","no","nor","not","only","own","same","so","than","too","very","s","t","can","will","just","don","don't","should","should've","now","d","ll","m","o","re","ve","y","ain","aren","aren't","couldn","couldn't","didn","didn't","doesn","doesn't","hadn","hadn't","hasn","hasn't","haven","haven't","isn","isn't","ma","mightn","mightn't","mustn","mustn't","needn","needn't","shan","shan't","shouldn","shouldn't","wasn","wasn't","weren","weren't","won","won't","wouldn","wouldn't"]
|
28 |
# remove symbols and Emojis
|
29 |
text = text.lower()
|
30 |
text = re.sub('@', '', text)
|