fake_real_data / preprocessing.py
marmiskarian's picture
Add data preprocessing notebook and custom text preprocessing function
f23e828
raw
history blame
1.51 kB
import re
from nltk.corpus import stopwords
def preprocess_text(df):
"""
Preprocesses the text column in a DataFrame by applying various cleaning operations.
Args:
df (pandas.DataFrame): The DataFrame containing the text column to be preprocessed.
Returns:
None. The text column in the provided DataFrame is modified in place.
"""
# Remove URLs, user mentions, non-alphanumeric characters and hashtags from the tweets
df['text'] = df['text'].apply(lambda x: re.sub(r'http\S+', '', str(x))) # remove URLs
df['text'] = df['text'].apply(lambda x: re.sub(r'@\S+', '', str(x))) # remove user mentions
df['text'] = df['text'].apply(lambda x: re.sub(r'[^a-zA-Z0-9\s]', '', str(x))) # remove non-alphanumeric characters
df['text'] = df['text'].apply(lambda x: re.sub(r'#\S+', '', str(x))) # remove hashtags
# Remove punctuation and convert text to lowercase
df['text'] = df['text'].apply(lambda x: re.sub('[^\w\s]', '', x))
df['text'] = df['text'].apply(lambda x: x.lower())
# Remove stop word (such as "a", "an", "the", "is", "of", etc.)
stop_words = set(stopwords.words('english'))
df['text'] = df['text'].apply(lambda x: ' '.join([word for word in x.split() if word not in stop_words]))
# Remove any remaining white space
df['text'] = df['text'].apply(lambda x: x.strip())
# Remove observations with less than 3 words
df = df[df['text'].apply(lambda x: len(x.split()) >= 3)]
return df