parquet-converter commited on
Commit
fe142b6
1 Parent(s): aa08036

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,54 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - uncompressed
37
- *.pcm filter=lfs diff=lfs merge=lfs -text
38
- *.sam filter=lfs diff=lfs merge=lfs -text
39
- *.raw filter=lfs diff=lfs merge=lfs -text
40
- # Audio files - compressed
41
- *.aac filter=lfs diff=lfs merge=lfs -text
42
- *.flac filter=lfs diff=lfs merge=lfs -text
43
- *.mp3 filter=lfs diff=lfs merge=lfs -text
44
- *.ogg filter=lfs diff=lfs merge=lfs -text
45
- *.wav filter=lfs diff=lfs merge=lfs -text
46
- # Image files - uncompressed
47
- *.bmp filter=lfs diff=lfs merge=lfs -text
48
- *.gif filter=lfs diff=lfs merge=lfs -text
49
- *.png filter=lfs diff=lfs merge=lfs -text
50
- *.tiff filter=lfs diff=lfs merge=lfs -text
51
- # Image files - compressed
52
- *.jpg filter=lfs diff=lfs merge=lfs -text
53
- *.jpeg filter=lfs diff=lfs merge=lfs -text
54
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
feed.py DELETED
@@ -1,183 +0,0 @@
1
- from curses.ascii import isalpha
2
- import os
3
- import csv
4
- import re
5
- from typing import Sequence
6
- import json
7
- import ast
8
- import datasets
9
-
10
-
11
-
12
- _DESCRIPTION = """\
13
- Example dataset toxic
14
- """
15
- _DATA_URL = "https://drive.google.com/uc?id=1Ldnn3YYt_ErYq4ZGSon1MvcP3uJO0_PX"
16
- _DATA_ENG = "https://drive.google.com/uc?id=1p-iyKTRhUXaDmqsx69Zvb4ivjaCmVVr8"
17
-
18
- _TEXT = {
19
- "sen_vi": [" thất vọng", " bình thường", " hài lòng"],
20
- "sen_en": [" negative", " neutral", " positive"],
21
- "top_vi": [" giảng viên", " môn học", " phòng học", " tổng thể"],
22
- "top_en": [" lecturer", " curriculum", " facility", " general"],
23
- "top_en_": ["lecturer", "curriculum", "facility", "general"],
24
- "sen_en_": ["negative", "neutral", "positive"],
25
- "sen_vi_": ["thất vọng", "bình thường", "hài lòng"],
26
- "top_vi_": ["giảng viên", "môn học", "phòng học", "tổng thể"],
27
- }
28
-
29
- class Config(datasets.BuilderConfig):
30
- """BuilderConfig for GLUE."""
31
-
32
- def __init__(self, data_url, **kwargs):
33
- """BuilderConfig
34
- Args:
35
- data_url: `string`, url to the dataset (word or raw level)
36
- **kwargs: keyword arguments forwarded to super.
37
- """
38
- super(Config, self).__init__(
39
- version=datasets.Version(
40
- "1.0.0",
41
- ),
42
- **kwargs,
43
- )
44
- self.data_url = data_url
45
-
46
-
47
- class Guess(datasets.GeneratorBasedBuilder):
48
- VERSION = datasets.Version("0.1.0")
49
- BUILDER_CONFIGS = [
50
- Config(
51
- name="top_vi",
52
- data_url=_DATA_URL,
53
- description="data",
54
- ),
55
- Config(
56
- name="top_en",
57
- data_url=_DATA_ENG,
58
- description="data",
59
- ),
60
- Config(
61
- name="sen_vi",
62
- data_url=_DATA_URL,
63
- description="data",
64
- ),
65
- Config(
66
- name="sen_en",
67
- data_url=_DATA_ENG,
68
- description="data",
69
- ),
70
- Config(
71
- name="sen_en_",
72
- data_url=_DATA_ENG,
73
- description="data",
74
- ),
75
- Config(
76
- name="top_en_",
77
- data_url=_DATA_ENG,
78
- description="data",
79
- ),
80
- Config(
81
- name="top_vi_",
82
- data_url=_DATA_URL,
83
- description="data",
84
- ),
85
- Config(
86
- name="sen_vi_",
87
- data_url=_DATA_URL,
88
- description="data",
89
- ),
90
- ]
91
-
92
- def _info(self):
93
- # TODO(wikitext): Specifies the datasets.DatasetInfo object
94
- return datasets.DatasetInfo(
95
- # This is the description that will appear on the datasets page.
96
- description=_DESCRIPTION,
97
- # datasets.features.FeatureConnectors
98
- features=datasets.Features(
99
- {
100
- "text": datasets.Value("string"),
101
- "classes": datasets.Sequence(datasets.Value("string")),
102
- "target": datasets.Value("int8")
103
- }
104
- ),
105
- # If there's a common (input, target) tuple from the features,
106
- # specify them here. They'll be used if as_supervised=True in
107
- # builder.as_dataset.
108
- supervised_keys=None,
109
- )
110
-
111
- def _split_generators(self, dl_manager):
112
- """Returns SplitGenerators."""
113
- # TODO(wikitext): Downloads the data and defines the splits
114
- # dl_manager is a datasets.download.DownloadManager that can be used to
115
- # download and extract URLs
116
- data_file = dl_manager.download(self.config.data_url)
117
- return [
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TEST,
120
- gen_kwargs={"data_file": data_file, "type": self.config.name},
121
- ),
122
- ]
123
-
124
-
125
- def _generate_examples(self, data_file, type):
126
-
127
- """Yields examples."""
128
- # TODO(wikitext): Yields (key, example) tuples from the dataset
129
- with open(data_file, 'r') as f:
130
- lines = list(f)
131
-
132
- if type[:3] == 'sen':
133
- _CLASS = {
134
- "negative": 0,
135
- "neutral": 1,
136
- "positive": 2,
137
- }
138
- else:
139
- _CLASS = {
140
- "lecturer": 0,
141
- "curriculum": 1,
142
- "facility": 2,
143
- "others": 3
144
- }
145
-
146
- TEXT_ = _TEXT[type]
147
-
148
- for idx, line in enumerate(lines):
149
- json_object = ast.literal_eval(line)
150
- if type[:3] == 'top':
151
- label = json_object['topic']
152
- else:
153
- label = json_object['sentiment']
154
-
155
- if label not in _CLASS:
156
- continue
157
-
158
- _text = json_object['text']
159
- _classes = []
160
-
161
- _PROMPT = {
162
- "sen_vi": f'{_text} Cảm thấy ',
163
- "sen_en": f'{_text} The sentiment of this sentence is ',
164
- "top_vi": f'Nói về ',
165
- "top_en": f'Comment about ',
166
- "sen_en_": f'{_text} The sentiment of this sentence is ',
167
- "top_en_": f'Comment about ',
168
- "sen_vi_": f'{_text} Cảm thấy ',
169
- "top_vi_": f'Nói về ',
170
- }
171
-
172
- for _cl in TEXT_:
173
- if type[:3] == 'sen':
174
- _classes.append(_cl)
175
- else:
176
- _classes.append(f'{_cl}. {_text}')
177
-
178
-
179
- yield idx, {
180
- "text" : _PROMPT[type],
181
- "classes" : _classes,
182
- "target" : _CLASS[label]
183
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sen_en/feed-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85d291b7d99e1cd0d8e53805a17a38f4b57913d298b23ef452883a4645a9e14d
3
+ size 136535
sen_en_/feed-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2030e7a2ae7b2e1764a146e360c981551e1705f18c09ffa6790d118deb0f6eba
3
+ size 136503
sen_vi/feed-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c3c0d296643401d327c06a449a5a6af1745e07de6b76e0da1ff5180b6c6b24f
3
+ size 141100
sen_vi_/feed-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f1465df28182faae254ec93d46e1eae906617b12a566ffa7f3f15f4829ea2e6
3
+ size 141068
top_en/feed-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57b10f9de277d777d98dec4845758e2a079c7c3055f82910ed5598945ac173e4
3
+ size 253007
top_en_/feed-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:526990a0f80f00dfb25f15a5f3f43d6394af0060e1e5f920c2dd478e671fa3fc
3
+ size 250096
top_vi/feed-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4df581da38f46dc0ef6a6dafcc45dd4966b6359bc53e61fd72e5bc07e6458c1e
3
+ size 275056
top_vi_/feed-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f46d6e2b2d65f8c013c461bb45ead863a0caa4fd27192b785b762cb125a4359
3
+ size 269747