File size: 3,856 Bytes
c1b0800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82293ed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
from collections import Counter
from typing import List

import datasets
import matplotlib.pyplot as plt
import pandas as pd
from constants import (event_centered_2_descriptions,
                       physical_entity_2_descriptions, relations_map,
                       social_intercation_2_descriptions)


def show_bar(relation: List):
    c = dict(Counter(relation))

    keys = list(c.keys())
    values = list(c.values())
    plt.bar(keys, values)
    plt.xticks(rotation=25, fontsize=8)
    plt.yticks(fontsize=8)

    plt.xlabel('relations')
    plt.ylabel('numbers')
    plt.title('relations analysis')

    for i in range(len(keys)):
        plt.text(i, values[i] + 10, str(values[i]), ha='center', fontsize=10)

    plt.show()

def read_file(data_path: str):
    df = pd.read_csv(data_path, sep='\t', header=None)

    df.columns = ['event', 'relation', 'tail']
    print(df.head())

    event = df['event'].tolist()
    relation = df['relation'].tolist()
    tail = df['tail'].tolist()

    return event, relation, tail

def make_base_dataset(event: List[str], relation: List[str], tail: List[str]):
    new_event, new_relation, new_tail = [], [], []
    knowledge_type = []
    relation_description = []

    prev_event, prev_relation, prev_tail = None, None, None

    for i in range(len(event)):
        if i > 0 and event[i] == prev_event and relation[i] == prev_relation:
            new_tail[-1].extend( [tail[i]] )
        else:
            new_event.append(event[i])
            new_relation.append(relation[i])
            # insert knowledge type
            relation_list = []
            for r in list(relations_map.values()):
                relation_list.extend(r)
            if relation[i] not in relation_list:
                raise ValueError(f'dont find match knowledge type named {relation[i]}, please check it!')
            
            for k, v in relations_map.items():
                if relation[i] in v:
                    knowledge_type.append(k)
                    if k == 'social_intercation':
                        relation_description.append(social_intercation_2_descriptions[relation[i]])
                    elif k == 'physical_entity':
                        relation_description.append(physical_entity_2_descriptions[relation[i]])
                    elif k == 'event_centered':
                        relation_description.append(event_centered_2_descriptions[relation[i]])
                    else:
                        raise KeyError(f"dont find match relation type named {relation[i]} in dict, please check it!")
                    
            new_tail.append( [tail[i]] )
        prev_event, prev_relation, prev_tail = event[i], relation[i], tail[i]
    
    df = pd.DataFrame({
        'knowledge_type': knowledge_type,
        'event': new_event,
        'relation': new_relation,
        'relation_description': relation_description,
        'tail': new_tail,
    })

    print(df.head())
    return df

def get_dataset(data_path: str):
    event, relation, tail = read_file(data_path=data_path)
    df = make_base_dataset(event=event, relation=relation, tail=tail)
    dataset = datasets.Dataset.from_pandas(df, split='train')

    print(dataset)
    return dataset

def upload_dataset(dataset, repo_id :str, access_token: str, private: bool):
    dataset.push_to_hub(
        repo_id = repo_id,
        private = private,
        token   = access_token,
    )

if __name__ == '__main__':
    train_dataset = get_dataset('./dataset/train.tsv')
    valid_dataset = get_dataset('./dataset/dev.tsv')
    test_dataset = get_dataset('./dataset/test.tsv')
    
    dataset = datasets.DatasetDict({
        'train': train_dataset,
        'validation': valid_dataset,
        'test': test_dataset
    })

    print(dataset)

    upload_dataset(dataset, repo_id='', private=False, access_token='')