autotrain-data-processor commited on
Commit
758ea4b
·
1 Parent(s): 3155b77

Processed data from AutoTrain data processor ([2023-02-15 03:19 ]

Browse files
README.md CHANGED
@@ -22,67 +22,105 @@ A sample from this dataset looks as follows:
22
  ```json
23
  [
24
  {
25
- "feat_Unnamed: 0": 9180,
26
  "tokens": [
27
- "keren",
28
- "banget",
29
- "projectnya",
30
- "\ud83d\udc4f"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  ],
32
  "tags": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  1,
34
  1,
 
 
35
  3,
36
- 6
 
 
37
  ]
38
  },
39
  {
40
- "feat_Unnamed: 0": 1911,
41
  "tokens": [
42
- "ygshop",
43
- "juga",
44
- "lolaaaa",
45
- ".",
46
- "mau",
47
- "order",
48
- "piye",
49
- "lek",
50
- "loadinge",
51
- "ngene",
52
- "!",
53
- "ate",
54
- "tuku",
55
- "piye",
56
- "lek",
57
- "loadingnge",
58
- "ngene",
59
  "?",
60
- "welcome",
61
- "back",
62
- "albummmmm",
63
- "!!"
 
 
 
 
 
 
 
64
  ],
65
  "tags": [
66
- 3,
67
- 1,
68
- 3,
69
  6,
70
  1,
71
  1,
72
- 2,
73
- 2,
74
- 5,
75
- 2,
76
  6,
77
- 2,
78
- 2,
79
- 2,
80
- 2,
81
- 5,
82
- 2,
83
  6,
84
- 0,
85
- 0,
 
 
 
 
86
  0,
87
  6
88
  ]
@@ -108,5 +146,5 @@ This dataset is split into a train and validation split. The split sizes are as
108
 
109
  | Split name | Num samples |
110
  | ------------ | ------------------- |
111
- | train | 7868 |
112
- | valid | 3151 |
 
22
  ```json
23
  [
24
  {
25
+ "feat_Unnamed: 0": 1104,
26
  "tokens": [
27
+ "@user",
28
+ "salah",
29
+ "satu",
30
+ "dari",
31
+ "4",
32
+ "anak",
33
+ "dr",
34
+ "sunardi",
35
+ "ada",
36
+ "yg",
37
+ "berprofesi",
38
+ "sbg",
39
+ "dokter",
40
+ "juga",
41
+ ",",
42
+ "lulusan",
43
+ "unair",
44
+ ",",
45
+ "sudah",
46
+ "selesai",
47
+ "koas",
48
+ "dan",
49
+ "intern",
50
+ "tolong",
51
+ "disupport",
52
+ "pak",
53
+ "anak",
54
+ "beliau"
55
  ],
56
  "tags": [
57
+ 6,
58
+ 1,
59
+ 1,
60
+ 1,
61
+ 6,
62
+ 1,
63
+ 6,
64
+ 6,
65
+ 1,
66
+ 1,
67
+ 1,
68
+ 1,
69
+ 1,
70
+ 1,
71
+ 6,
72
+ 1,
73
+ 6,
74
+ 6,
75
+ 1,
76
+ 1,
77
  1,
78
  1,
79
+ 0,
80
+ 1,
81
  3,
82
+ 1,
83
+ 1,
84
+ 1
85
  ]
86
  },
87
  {
88
+ "feat_Unnamed: 0": 239,
89
  "tokens": [
90
+ "@user",
91
+ "kamu",
92
+ "pake",
93
+ "apa",
94
+ "toh",
 
 
 
 
 
 
 
 
 
 
 
 
95
  "?",
96
+ "aku",
97
+ "pake",
98
+ "xl",
99
+ "banter",
100
+ "lho",
101
+ "di",
102
+ "apartemen",
103
+ "pun",
104
+ "bisa",
105
+ "download",
106
+ "yutub"
107
  ],
108
  "tags": [
 
 
 
109
  6,
110
  1,
111
  1,
112
+ 1,
113
+ 1,
 
 
114
  6,
115
+ 1,
116
+ 1,
 
 
 
 
117
  6,
118
+ 1,
119
+ 1,
120
+ 1,
121
+ 1,
122
+ 1,
123
+ 1,
124
  0,
125
  6
126
  ]
 
146
 
147
  | Split name | Num samples |
148
  | ------------ | ------------------- |
149
+ | train | 1105 |
150
+ | valid | 438 |
processed/train/dataset.arrow CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27c5b9124d495360fa9cdec467aebf95e2758ea3f8286679631822e230177b51
3
- size 3314560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:490450efddfc9c95f6615e119c3f3da1c6c93d4e4c4b4609e28b5d62d29c4fa3
3
+ size 461048
processed/train/dataset_info.json CHANGED
@@ -34,8 +34,8 @@
34
  "splits": {
35
  "train": {
36
  "name": "train",
37
- "num_bytes": 3310830,
38
- "num_examples": 7868,
39
  "dataset_name": null
40
  }
41
  }
 
34
  "splits": {
35
  "train": {
36
  "name": "train",
37
+ "num_bytes": 459603,
38
+ "num_examples": 1105,
39
  "dataset_name": null
40
  }
41
  }
processed/train/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "dataset.arrow"
5
  }
6
  ],
7
- "_fingerprint": "4c2eb59d217b246f",
8
  "_format_columns": [
9
  "feat_Unnamed: 0",
10
  "tags",
 
4
  "filename": "dataset.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "0806f1db0a02927a",
8
  "_format_columns": [
9
  "feat_Unnamed: 0",
10
  "tags",
processed/valid/dataset.arrow CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cd84f65758b269e6f741b72a0a30187bad65f7278dff5eb9624efc2f06dcbe9
3
- size 1361688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c21871efd710ab1a1faa51521084cb73a776e0d8e67ec6f9734b204bb11a96af
3
+ size 199960
processed/valid/dataset_info.json CHANGED
@@ -34,8 +34,8 @@
34
  "splits": {
35
  "valid": {
36
  "name": "valid",
37
- "num_bytes": 1359490,
38
- "num_examples": 3151,
39
  "dataset_name": null
40
  }
41
  }
 
34
  "splits": {
35
  "valid": {
36
  "name": "valid",
37
+ "num_bytes": 198879,
38
+ "num_examples": 438,
39
  "dataset_name": null
40
  }
41
  }
processed/valid/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "dataset.arrow"
5
  }
6
  ],
7
- "_fingerprint": "a5a1eb885265149a",
8
  "_format_columns": [
9
  "feat_Unnamed: 0",
10
  "tags",
 
4
  "filename": "dataset.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "db8382fd7efa9342",
8
  "_format_columns": [
9
  "feat_Unnamed: 0",
10
  "tags",