Spaces:
Sleeping
Sleeping
Create clean_data.py
Browse files- clean_data.py +86 -0
clean_data.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nltk
|
2 |
+
from nltk.corpus import stopwords
|
3 |
+
from nltk.stem import WordNetLemmatizer
|
4 |
+
import warnings
|
5 |
+
import re
|
6 |
+
nltk.download("stopwords")
|
7 |
+
nltk.download("wordnet")
|
8 |
+
nltk.download("words")
|
9 |
+
lemmatizer = WordNetLemmatizer()
|
10 |
+
|
11 |
+
stop_words = set(stopwords.words('english'))
|
12 |
+
|
13 |
+
contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not",
|
14 |
+
"didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not",
|
15 |
+
"he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is",
|
16 |
+
"I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would",
|
17 |
+
"i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would",
|
18 |
+
"it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam",
|
19 |
+
"mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have",
|
20 |
+
"mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock",
|
21 |
+
"oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have",
|
22 |
+
"she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is",
|
23 |
+
"should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as",
|
24 |
+
"this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would",
|
25 |
+
"there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have",
|
26 |
+
"they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have",
|
27 |
+
"wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are",
|
28 |
+
"we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are",
|
29 |
+
"what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is",
|
30 |
+
"where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
|
31 |
+
"why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have",
|
32 |
+
"would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all",
|
33 |
+
"y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
|
34 |
+
"you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have",
|
35 |
+
"you're": "you are", "you've": "you have"}
|
36 |
+
|
37 |
+
def cleaned_complaints(text):
|
38 |
+
import nltk
|
39 |
+
from nltk.corpus import stopwords
|
40 |
+
from nltk.stem import WordNetLemmatizer
|
41 |
+
from nltk.corpus import words
|
42 |
+
import warnings
|
43 |
+
import re
|
44 |
+
|
45 |
+
lemmatizer = WordNetLemmatizer()
|
46 |
+
|
47 |
+
stop_words = set(stopwords.words('english'))
|
48 |
+
|
49 |
+
contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not",
|
50 |
+
"didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not",
|
51 |
+
"he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is",
|
52 |
+
"I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would",
|
53 |
+
"i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would",
|
54 |
+
"it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam",
|
55 |
+
"mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have",
|
56 |
+
"mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock",
|
57 |
+
"oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have",
|
58 |
+
"she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is",
|
59 |
+
"should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as",
|
60 |
+
"this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would",
|
61 |
+
"there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have",
|
62 |
+
"they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have",
|
63 |
+
"wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are",
|
64 |
+
"we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are",
|
65 |
+
"what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is",
|
66 |
+
"where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
|
67 |
+
"why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have",
|
68 |
+
"would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all",
|
69 |
+
"y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
|
70 |
+
"you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have",
|
71 |
+
"you're": "you are", "you've": "you have"}
|
72 |
+
|
73 |
+
newString=re.sub(r'@[A-Za-z0-9]+','',text) #removing user mentions
|
74 |
+
newString=re.sub("#","",newString) #removing hashtag symbol
|
75 |
+
newString= ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in newString.split(" ")]) #contraction mapping
|
76 |
+
newString= re.sub(r'http\S+', '', newString) #removing links
|
77 |
+
newString= re.sub(r"'s\b","",newString) #removing 's
|
78 |
+
letters_only = re.sub("[^a-zA-Z]", " ", newString) #Fetching out only letters
|
79 |
+
lower_case = letters_only.lower() #converting all words to lowercase
|
80 |
+
tokens = [w for w in lower_case.split() if not w in stop_words]#stopwords removal
|
81 |
+
tokens = [x for x in tokens if x in words.words()]
|
82 |
+
# tokens= lower_case.split()
|
83 |
+
newString=''
|
84 |
+
for i in tokens:
|
85 |
+
newString=newString+lemmatizer.lemmatize(i)+' ' #converting words using lemmatisation
|
86 |
+
return newString.strip()
|