File size: 5,169 Bytes
81a794d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import os
import jsonlines
from collections import defaultdict
import pandas as pd
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, matthews_corrcoef, confusion_matrix

RESULT_ROOTS = "./result"

LANGUAGE_MAP = {
    "all": "All",
    "C": "C/C++",
    "C++": "C/C++",
    "Java": "Java",
    "Python": "Python",
}

table_dict = {}

for method in os.listdir(RESULT_ROOTS):
    
    msg_labels = defaultdict(list)
    msg_predicts = defaultdict(list)
    msg_metrics = {}

    nomsg_labels = defaultdict(list)
    nomsg_predicts = defaultdict(list)
    nomsg_metrics = {}

    mix_labels = defaultdict(list)
    mix_predicts = defaultdict(list)
    mix_metrics = {}
    
    msg_result_file = os.path.join(RESULT_ROOTS, method, "msg.jsonl")
    nomsg_result_file = os.path.join(RESULT_ROOTS, method, "nomsg.jsonl")
    
    if not os.path.exists(msg_result_file) or not os.path.exists(nomsg_result_file):
        continue
    
    with jsonlines.open(msg_result_file) as reader:
        for item in reader:
            lang = LANGUAGE_MAP[item["language"]]
            for section in item["sections"]:
                msg_labels["All"].append(section['related'])
                msg_predicts["All"].append(section['predict'])
                msg_labels[lang].append(section['related'])
                msg_predicts[lang].append(section['predict'])
                mix_labels["All"].append(section['related'])
                mix_predicts["All"].append(section['predict'])
                mix_labels[lang].append(section['related'])
                mix_predicts[lang].append(section['predict'])
                
            
    with jsonlines.open(nomsg_result_file) as reader:
        for item in reader:
            lang = LANGUAGE_MAP[item["language"]]
            for section in item["sections"]:
                nomsg_labels["All"].append(section['related'])
                nomsg_predicts["All"].append(section['predict'])
                nomsg_labels[lang].append(section['related'])
                nomsg_predicts[lang].append(section['predict'])
                mix_labels["All"].append(section['related'])
                mix_predicts["All"].append(section['predict'])
                mix_labels[lang].append(section['related'])
                mix_predicts[lang].append(section['predict'])
    
    
    for lang in LANGUAGE_MAP.values():
        accuracy = accuracy_score(msg_labels[lang], msg_predicts[lang])
        # precision = precision_score(msg_labels[lang], msg_predicts[lang])
        # recall = recall_score(msg_labels[lang], msg_predicts[lang])
        f1 = f1_score(msg_labels[lang], msg_predicts[lang])
        mcc = matthews_corrcoef(msg_labels[lang], msg_predicts[lang])
        tp, fp, tn, fn = confusion_matrix(msg_labels[lang], msg_predicts[lang]).ravel()
        fpr = fp / (fp + tn + 1e-6)
        
        msg_metrics.update({
            f"{lang}_Acc": f"{accuracy * 100:.2f}\\%",
            # f"{lang}_P": f"{precision * 100:.2f}%",
            # f"{lang}_R": f"{recall * 100:.2f}%",
            f"{lang}_F1": f"{f1 * 100:.2f}\\%",
            # f"{lang}_FPR": f"{fpr * 100:.2f}\\%",
            f"{lang}_MCC": f"{mcc * 100:.2f}\\%"
        })
        
        accuracy = accuracy_score(nomsg_labels[lang], nomsg_predicts[lang])
        # precision = precision_score(nomsg_labels[lang], nomsg_predicts[lang])
        # recall = recall_score(nomsg_labels[lang], nomsg_predicts[lang])
        f1 = f1_score(nomsg_labels[lang], nomsg_predicts[lang])
        mcc = matthews_corrcoef(nomsg_labels[lang], nomsg_predicts[lang])
        tp, fp, tn, fn = confusion_matrix(nomsg_labels[lang], nomsg_predicts[lang]).ravel()
        fpr = fp / (fp + tn + 1e-6)
        
        nomsg_metrics.update({
            f"{lang}_Acc": f"{accuracy * 100:.2f}\\%",
            # f"{lang}_P": f"{precision * 100:.2f}%",
            # f"{lang}_R": f"{recall * 100:.2f}%",
            f"{lang}_F1": f"{f1 * 100:.2f}\\%",
            # f"{lang}_FPR": f"{fpr * 100:.2f}\\%",
            f"{lang}_MCC": f"{mcc * 100:.2f}\\%"
        })
        
        accuracy = accuracy_score(mix_labels[lang], mix_predicts[lang])
        # precision = precision_score(mix_labels[lang], mix_predicts[lang])
        # recall = recall_score(mix_labels[lang], mix_predicts[lang])
        f1 = f1_score(mix_labels[lang], mix_predicts[lang])
        mcc = matthews_corrcoef(mix_labels[lang], mix_predicts[lang])
        tp, fp, tn, fn = confusion_matrix(mix_labels[lang], mix_predicts[lang]).ravel()
        fpr = fp / (fp + tn + 1e-6)
        
        mix_metrics.update({
            f"{lang}_Acc": f"{accuracy * 100:.2f}\\%",
            # f"{lang}_P": f"{precision * 100:.2f}%",
            # f"{lang}_R": f"{recall * 100:.2f}%",
            f"{lang}_F1": f"{f1 * 100:.2f}\\%",
            # f"{lang}_FPR": f"{fpr * 100:.2f}\\%",
            f"{lang}_MCC": f"{mcc * 100:.2f}\\%"
        })
    
    table_dict[method] = mix_metrics
    if method == "patchouli":
        table_dict[f"{method}_msg"] = msg_metrics
        table_dict[f"{method}_nomsg"] = nomsg_metrics
    
    
df = pd.DataFrame(table_dict).T
df.to_csv("result.csv")