File size: 3,083 Bytes
2954102
 
 
 
 
 
 
9443a53
2954102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58491d6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import os
import json

import numpy as np
import pandas as pd
from datasets import load_dataset

os.makedirs("experiments/analysis", exist_ok=True)
root_dir = "experiments/prediction_files"
id_to_label = {
    '0': 'arts_&_culture',
    '1': 'business_&_entrepreneurs',
    '2': 'celebrity_&_pop_culture',
    '3': 'diaries_&_daily_life',
    '4': 'family',
    '5': 'fashion_&_style',
    '6': 'film_tv_&_video',
    '7': 'fitness_&_health',
    '8': 'food_&_dining',
    '9': 'gaming',
    '10': 'learning_&_educational',
    '11': 'music',
    '12': 'news_&_social_concern',
    '13': 'other_hobbies',
    '14': 'relationships',
    '15': 'science_&_technology',
    '16': 'sports',
    '17': 'travel_&_adventure',
    '18': 'youth_&_student_life'
}
splits = ["test_1", "test_2", "test_3", "test_4"]
model_list = [
    "roberta-base",
    "bertweet-base",
    "bernice",
    "roberta-large",
    "bertweet-large",
    "twitter-roberta-base-2019-90m",
    "twitter-roberta-base-dec2020",
    "twitter-roberta-base-2021-124m",
    "twitter-roberta-base-2022-154m",
    "twitter-roberta-large-2022-154m"
]
references = {}
for s in splits:
    data = load_dataset("tweettemposhift/tweet_temporal_shift", f"topic_temporal", split=s)
    references[s] = [{id_to_label[str(n)] for n, k in enumerate(i) if k == 1} for i in data['gold_label_list']]

count = {}
pred_tmp = {}
for model_m in model_list:
    flags = []
    pred_all = []
    for s in splits:
        with open(f"{root_dir}/topic-topic_temporal-{model_m}/{s}.jsonl") as f:
            pred = [set(json.loads(i)["label"]) for i in f.read().split('\n') if len(i)]
            flags += [len(a.intersection(b)) > 0 for a, b in zip(references[s], pred)]
            pred_all += pred
    for seed_s in range(3):
        flags_rand = []
        for random_r in range(4):
            with open(f"{root_dir}/topic-topic_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f:
                pred = [set(json.loads(i)["label"]) for i in f.read().split('\n') if len(i)]
                label = references[f"test_{random_r + 1}"]
                flags_rand += [len(a.intersection(b)) > 0 for a, b in zip(label, pred)]
        tmp_flag = [not x and y for x, y in zip(flags, flags_rand)]
        count[f"{model_m}_{seed_s}"] = tmp_flag
        pred_tmp[f"{model_m}_{seed_s}"] = [list(x) if y else [] for x, y in zip(pred_all, tmp_flag)]

df_tmp = pd.DataFrame([[dict(zip(*np.unique(i, return_counts=True))) for i in pd.DataFrame(pred_tmp).sum(1).values]], index=["errors"]).T
df_tmp["error_count"] = pd.DataFrame(count).sum(1).values
gold_label = []
text = []
for s in splits:
    gold_label += load_dataset("tweettemposhift/tweet_temporal_shift", "topic_temporal", split=s)['gold_label_list']
    text += load_dataset("tweettemposhift/tweet_temporal_shift", "topic_temporal", split=s)['text']
df_tmp["true_label"] = [", ".join([id_to_label[str(n)] for n, k in enumerate(i) if k == 1]) for i in gold_label]
df_tmp["text"] = text
df_tmp.sort_values("error_count", ascending=False).to_csv("experiments/analysis/topic.csv")