File size: 1,780 Bytes
5a7343a
 
1c68463
5a7343a
1c68463
 
5a7343a
40423d5
1c68463
7784c61
5a7343a
1c68463
 
 
 
 
7784c61
5a7343a
 
 
 
1c68463
aba9e9b
7d400e6
5a7343a
 
1c68463
bad90d6
40423d5
f8d386e
bad90d6
 
 
070cd41
bad90d6
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import pandas as pd
from datasets import load_dataset
from transformers import AutoTokenizer


tokenizer = AutoTokenizer.from_pretrained("roberta-base")
stats = []
for i in ["emoji_temporal", "hate_temporal", "nerd_temporal", "ner_temporal", "topic_temporal", "sentiment_small_temporal"]:
    for s in ["train", "validation", "test"]:
        dataset = load_dataset("tweettemposhift/tweet_temporal_shift", i, split=s)
        df = dataset.to_pandas()
        if i != "nerd_temporal":
            token_length = [len(tokenizer.tokenize(t)) for t in dataset['text']]
        else:
            token_length = [len(tokenizer.tokenize(f"{d['target']} {tokenizer.sep_token} {d['definition']} {tokenizer.sep_token} {d['text']}")) for d in dataset]
        token_length_in = [i for i in token_length if i <= 126]
        date = pd.to_datetime(df.date).sort_values().values
        stats.append({
            "data": i,
            "split": s,
            "size": len(dataset),
            "size (token length < 128)": len(token_length_in),
            "mean_token_length": sum(token_length)/len(token_length),
            "date": f'{str(date[0]).split("T")[0]} / {str(date[-1]).split("T")[0]}',
         })
df = pd.DataFrame(stats)
print(df)
pretty_name = {
    "emoji_temporal": "Emoji",
    "hate_temporal": "Hate",
    "nerd_temporal": "NERD",
    "ner_temporal": "NER",
    "topic_temporal": "Topic",
    "sentiment_small_temporal": "Sentiment"
}
df.index = [pretty_name[i] for i in df.pop("data")]
df = df[["split", "size", "date"]]
pretty_name_split = {"train": "Train", "validation": "Valid", "test": "Test"}
df["split"] = [pretty_name_split[i] for i in df["split"]]
df.columns = [i.capitalize() for i in df.columns]
df['Size'] = df['Size'].map('{:,}'.format)
print(df.to_latex())