File size: 1,485 Bytes
d49f00b
 
 
 
 
924ded6
d49f00b
924ded6
74046b8
924ded6
 
d49f00b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import snscrape.modules.twitter as sntwitter
import pandas as pd
import re


def scrape_tweets(query, max_tweets=10):
    tweets_list = []
    for i,tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()):
        if max_tweets != -1 and i >= int(max_tweets):
            break
        tweets_list.append([tweet.date, tweet.id, tweet.content, tweet.user.username, tweet.likeCount, tweet.retweetCount, tweet.replyCount, tweet.quoteCount, tweet.url, tweet.lang])

    df = pd.DataFrame(tweets_list, columns=['Datetime', 'Tweet Id', 'Text', 'Username', 'Likes', 'Retweets', 'Replies', 'Quotes', 'URL', 'Language'])
    df = df[df["Language"] == "in"]
    return df

def remove_unnecessary_char(text):
    text = re.sub("\[USERNAME\]", " ", text)
    text = re.sub("\[URL\]", " ", text)
    text = re.sub("\[SENSITIVE-NO\]", " ", text)
    text = re.sub('  +', ' ', text)
    return text

def preprocess_tweet(text):
    text = re.sub('\n',' ',text)
    text = re.sub('^(\@\w+ ?)+',' ',text)
    text = re.sub(r'\@\w+',' ',text)
    text = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))',' ',text)
    text = re.sub('/', ' ', text)
    text = re.sub('  +', ' ', text)
    return text

def remove_nonaplhanumeric(text):
    text = re.sub('[^0-9a-zA-Z]+', ' ', text)
    return text

def preprocess_text(text):
    text = preprocess_tweet(text)
    text = remove_unnecessary_char(text)
    text = remove_nonaplhanumeric(text)
    text = text.lower()
    return text