KoichiYasuoka commited on
Commit
223b87d
·
1 Parent(s): c0aefe7

release after tokenizer refinement

Browse files
Files changed (9) hide show
  1. README.md +31 -0
  2. config.json +519 -0
  3. maker.py +114 -0
  4. pytorch_model.bin +3 -0
  5. special_tokens_map.json +51 -0
  6. tokenizer.json +0 -0
  7. tokenizer.model +3 -0
  8. tokenizer_config.json +171 -0
  9. ud.py +155 -0
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "ja"
4
+ tags:
5
+ - "japanese"
6
+ - "pos"
7
+ - "dependency-parsing"
8
+ - "modernbert"
9
+ base_model: sbintuitions/modernbert-ja-130m
10
+ datasets:
11
+ - "universal_dependencies"
12
+ license: "mit"
13
+ pipeline_tag: "token-classification"
14
+ widget:
15
+ - text: "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"
16
+ ---
17
+
18
+ # modernbert-japanese-130m-ud-embeds
19
+
20
+ ## Model Description
21
+
22
+ This is a ModernBERT model pretrained for POS-tagging and dependency-parsing, derived from [modernbert-ja-130m](https://huggingface.co/sbintuitions/modernbert-ja-130m) refined for [UD_Japanese-GSDLUW](https://github.com/UniversalDependencies/UD_Japanese-GSDLUW).
23
+
24
+ ## How to Use
25
+
26
+ ```py
27
+ from transformers import pipeline
28
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/modernbert-japanese-130m-ud-embeds",trust_remote_code=True)
29
+ print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
30
+ ```
31
+
config.json ADDED
@@ -0,0 +1,519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ModernBertForTokenClassification"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "classifier_activation": "gelu",
9
+ "classifier_bias": false,
10
+ "classifier_dropout": 0.0,
11
+ "classifier_pooling": "cls",
12
+ "cls_token_id": 6,
13
+ "custom_pipelines": {
14
+ "upos": {
15
+ "impl": "ud.BellmanFordTokenClassificationPipeline",
16
+ "pt": "AutoModelForTokenClassification"
17
+ },
18
+ "universal-dependencies": {
19
+ "impl": "ud.UniversalDependenciesPipeline",
20
+ "pt": "AutoModelForTokenClassification"
21
+ }
22
+ },
23
+ "decoder_bias": true,
24
+ "deterministic_flash_attn": false,
25
+ "embedding_dropout": 0.0,
26
+ "eos_token_id": 2,
27
+ "global_attn_every_n_layers": 3,
28
+ "global_rope_theta": 160000.0,
29
+ "gradient_checkpointing": false,
30
+ "hidden_activation": "gelu",
31
+ "hidden_size": 512,
32
+ "id2label": {
33
+ "0": "ADJ",
34
+ "1": "ADJ.",
35
+ "2": "ADJ|_",
36
+ "3": "ADJ|l-acl",
37
+ "4": "ADJ|l-advcl",
38
+ "5": "ADJ|l-amod",
39
+ "6": "ADJ|l-ccomp",
40
+ "7": "ADJ|l-csubj",
41
+ "8": "ADJ|l-csubj:outer",
42
+ "9": "ADJ|l-nmod",
43
+ "10": "ADJ|l-nsubj",
44
+ "11": "ADJ|l-obj",
45
+ "12": "ADJ|l-obl",
46
+ "13": "ADJ|r-acl",
47
+ "14": "ADJ|r-amod",
48
+ "15": "ADJ|r-dep",
49
+ "16": "ADJ|root",
50
+ "17": "ADP",
51
+ "18": "ADP.",
52
+ "19": "ADP|_",
53
+ "20": "ADP|l-case",
54
+ "21": "ADP|r-case",
55
+ "22": "ADP|r-fixed",
56
+ "23": "ADV",
57
+ "24": "ADV.",
58
+ "25": "ADV|_",
59
+ "26": "ADV|l-advcl",
60
+ "27": "ADV|l-advmod",
61
+ "28": "ADV|l-obj",
62
+ "29": "ADV|r-dep",
63
+ "30": "ADV|root",
64
+ "31": "AUX",
65
+ "32": "AUX.",
66
+ "33": "AUX|Polarity=Neg|_",
67
+ "34": "AUX|Polarity=Neg|r-aux",
68
+ "35": "AUX|Polarity=Neg|r-fixed",
69
+ "36": "AUX|_",
70
+ "37": "AUX|r-aux",
71
+ "38": "AUX|r-cop",
72
+ "39": "AUX|r-fixed",
73
+ "40": "AUX|root",
74
+ "41": "B-ADJ",
75
+ "42": "B-ADJ.",
76
+ "43": "B-ADP",
77
+ "44": "B-ADP.",
78
+ "45": "B-ADV",
79
+ "46": "B-ADV.",
80
+ "47": "B-AUX",
81
+ "48": "B-AUX.",
82
+ "49": "B-CCONJ",
83
+ "50": "B-CCONJ.",
84
+ "51": "B-DET",
85
+ "52": "B-DET.",
86
+ "53": "B-INTJ",
87
+ "54": "B-INTJ.",
88
+ "55": "B-NOUN",
89
+ "56": "B-NOUN.",
90
+ "57": "B-NUM",
91
+ "58": "B-NUM.",
92
+ "59": "B-PART",
93
+ "60": "B-PART.",
94
+ "61": "B-PRON",
95
+ "62": "B-PRON.",
96
+ "63": "B-PROPN",
97
+ "64": "B-PROPN.",
98
+ "65": "B-PUNCT",
99
+ "66": "B-PUNCT.",
100
+ "67": "B-SCONJ",
101
+ "68": "B-SCONJ.",
102
+ "69": "B-SYM",
103
+ "70": "B-SYM.",
104
+ "71": "B-VERB",
105
+ "72": "B-VERB.",
106
+ "73": "B-X",
107
+ "74": "B-X.",
108
+ "75": "CCONJ",
109
+ "76": "CCONJ.",
110
+ "77": "CCONJ|_",
111
+ "78": "CCONJ|l-cc",
112
+ "79": "CCONJ|r-cc",
113
+ "80": "DET",
114
+ "81": "DET.",
115
+ "82": "DET|_",
116
+ "83": "DET|l-det",
117
+ "84": "I-ADJ",
118
+ "85": "I-ADJ.",
119
+ "86": "I-ADP",
120
+ "87": "I-ADP.",
121
+ "88": "I-ADV",
122
+ "89": "I-ADV.",
123
+ "90": "I-AUX",
124
+ "91": "I-AUX.",
125
+ "92": "I-CCONJ",
126
+ "93": "I-CCONJ.",
127
+ "94": "I-DET",
128
+ "95": "I-DET.",
129
+ "96": "I-INTJ",
130
+ "97": "I-INTJ.",
131
+ "98": "I-NOUN",
132
+ "99": "I-NOUN.",
133
+ "100": "I-NUM",
134
+ "101": "I-NUM.",
135
+ "102": "I-PART",
136
+ "103": "I-PART.",
137
+ "104": "I-PRON",
138
+ "105": "I-PRON.",
139
+ "106": "I-PROPN",
140
+ "107": "I-PROPN.",
141
+ "108": "I-PUNCT",
142
+ "109": "I-PUNCT.",
143
+ "110": "I-SCONJ",
144
+ "111": "I-SCONJ.",
145
+ "112": "I-SYM",
146
+ "113": "I-SYM.",
147
+ "114": "I-VERB",
148
+ "115": "I-VERB.",
149
+ "116": "I-X",
150
+ "117": "I-X.",
151
+ "118": "INTJ",
152
+ "119": "INTJ.",
153
+ "120": "INTJ|_",
154
+ "121": "INTJ|l-discourse",
155
+ "122": "INTJ|r-discourse",
156
+ "123": "INTJ|root",
157
+ "124": "NOUN",
158
+ "125": "NOUN.",
159
+ "126": "NOUN|Polarity=Neg|_",
160
+ "127": "NOUN|Polarity=Neg|l-obl",
161
+ "128": "NOUN|Polarity=Neg|root",
162
+ "129": "NOUN|_",
163
+ "130": "NOUN|l-acl",
164
+ "131": "NOUN|l-advcl",
165
+ "132": "NOUN|l-ccomp",
166
+ "133": "NOUN|l-compound",
167
+ "134": "NOUN|l-csubj",
168
+ "135": "NOUN|l-csubj:outer",
169
+ "136": "NOUN|l-nmod",
170
+ "137": "NOUN|l-nsubj",
171
+ "138": "NOUN|l-nsubj:outer",
172
+ "139": "NOUN|l-obj",
173
+ "140": "NOUN|l-obl",
174
+ "141": "NOUN|r-compound",
175
+ "142": "NOUN|r-nmod",
176
+ "143": "NOUN|r-nsubj",
177
+ "144": "NOUN|root",
178
+ "145": "NUM",
179
+ "146": "NUM.",
180
+ "147": "NUM|_",
181
+ "148": "NUM|l-advcl",
182
+ "149": "NUM|l-compound",
183
+ "150": "NUM|l-nmod",
184
+ "151": "NUM|l-nsubj",
185
+ "152": "NUM|l-nsubj:outer",
186
+ "153": "NUM|l-nummod",
187
+ "154": "NUM|l-obj",
188
+ "155": "NUM|l-obl",
189
+ "156": "NUM|r-compound",
190
+ "157": "NUM|root",
191
+ "158": "PART",
192
+ "159": "PART.",
193
+ "160": "PART|_",
194
+ "161": "PART|l-mark",
195
+ "162": "PART|r-mark",
196
+ "163": "PRON",
197
+ "164": "PRON.",
198
+ "165": "PRON|_",
199
+ "166": "PRON|l-acl",
200
+ "167": "PRON|l-advcl",
201
+ "168": "PRON|l-nmod",
202
+ "169": "PRON|l-nsubj",
203
+ "170": "PRON|l-nsubj:outer",
204
+ "171": "PRON|l-obj",
205
+ "172": "PRON|l-obl",
206
+ "173": "PRON|root",
207
+ "174": "PROPN",
208
+ "175": "PROPN.",
209
+ "176": "PROPN|_",
210
+ "177": "PROPN|l-acl",
211
+ "178": "PROPN|l-advcl",
212
+ "179": "PROPN|l-compound",
213
+ "180": "PROPN|l-nmod",
214
+ "181": "PROPN|l-nsubj",
215
+ "182": "PROPN|l-nsubj:outer",
216
+ "183": "PROPN|l-obj",
217
+ "184": "PROPN|l-obl",
218
+ "185": "PROPN|r-compound",
219
+ "186": "PROPN|r-nmod",
220
+ "187": "PROPN|root",
221
+ "188": "PUNCT",
222
+ "189": "PUNCT.",
223
+ "190": "PUNCT|_",
224
+ "191": "PUNCT|l-punct",
225
+ "192": "PUNCT|r-punct",
226
+ "193": "SCONJ",
227
+ "194": "SCONJ.",
228
+ "195": "SCONJ|_",
229
+ "196": "SCONJ|l-dep",
230
+ "197": "SCONJ|r-fixed",
231
+ "198": "SCONJ|r-mark",
232
+ "199": "SYM",
233
+ "200": "SYM.",
234
+ "201": "SYM|_",
235
+ "202": "SYM|l-compound",
236
+ "203": "SYM|l-dep",
237
+ "204": "SYM|l-nmod",
238
+ "205": "SYM|l-obl",
239
+ "206": "SYM|r-compound",
240
+ "207": "SYM|r-dep",
241
+ "208": "VERB",
242
+ "209": "VERB.",
243
+ "210": "VERB|_",
244
+ "211": "VERB|l-acl",
245
+ "212": "VERB|l-advcl",
246
+ "213": "VERB|l-ccomp",
247
+ "214": "VERB|l-compound",
248
+ "215": "VERB|l-csubj",
249
+ "216": "VERB|l-csubj:outer",
250
+ "217": "VERB|l-nmod",
251
+ "218": "VERB|l-obj",
252
+ "219": "VERB|l-obl",
253
+ "220": "VERB|r-acl",
254
+ "221": "VERB|r-advcl",
255
+ "222": "VERB|r-compound",
256
+ "223": "VERB|root",
257
+ "224": "X",
258
+ "225": "X.",
259
+ "226": "X|_",
260
+ "227": "X|l-nmod",
261
+ "228": "X|r-dep"
262
+ },
263
+ "initializer_cutoff_factor": 2.0,
264
+ "initializer_range": 0.02,
265
+ "intermediate_size": 2048,
266
+ "label2id": {
267
+ "ADJ": 0,
268
+ "ADJ.": 1,
269
+ "ADJ|_": 2,
270
+ "ADJ|l-acl": 3,
271
+ "ADJ|l-advcl": 4,
272
+ "ADJ|l-amod": 5,
273
+ "ADJ|l-ccomp": 6,
274
+ "ADJ|l-csubj": 7,
275
+ "ADJ|l-csubj:outer": 8,
276
+ "ADJ|l-nmod": 9,
277
+ "ADJ|l-nsubj": 10,
278
+ "ADJ|l-obj": 11,
279
+ "ADJ|l-obl": 12,
280
+ "ADJ|r-acl": 13,
281
+ "ADJ|r-amod": 14,
282
+ "ADJ|r-dep": 15,
283
+ "ADJ|root": 16,
284
+ "ADP": 17,
285
+ "ADP.": 18,
286
+ "ADP|_": 19,
287
+ "ADP|l-case": 20,
288
+ "ADP|r-case": 21,
289
+ "ADP|r-fixed": 22,
290
+ "ADV": 23,
291
+ "ADV.": 24,
292
+ "ADV|_": 25,
293
+ "ADV|l-advcl": 26,
294
+ "ADV|l-advmod": 27,
295
+ "ADV|l-obj": 28,
296
+ "ADV|r-dep": 29,
297
+ "ADV|root": 30,
298
+ "AUX": 31,
299
+ "AUX.": 32,
300
+ "AUX|Polarity=Neg|_": 33,
301
+ "AUX|Polarity=Neg|r-aux": 34,
302
+ "AUX|Polarity=Neg|r-fixed": 35,
303
+ "AUX|_": 36,
304
+ "AUX|r-aux": 37,
305
+ "AUX|r-cop": 38,
306
+ "AUX|r-fixed": 39,
307
+ "AUX|root": 40,
308
+ "B-ADJ": 41,
309
+ "B-ADJ.": 42,
310
+ "B-ADP": 43,
311
+ "B-ADP.": 44,
312
+ "B-ADV": 45,
313
+ "B-ADV.": 46,
314
+ "B-AUX": 47,
315
+ "B-AUX.": 48,
316
+ "B-CCONJ": 49,
317
+ "B-CCONJ.": 50,
318
+ "B-DET": 51,
319
+ "B-DET.": 52,
320
+ "B-INTJ": 53,
321
+ "B-INTJ.": 54,
322
+ "B-NOUN": 55,
323
+ "B-NOUN.": 56,
324
+ "B-NUM": 57,
325
+ "B-NUM.": 58,
326
+ "B-PART": 59,
327
+ "B-PART.": 60,
328
+ "B-PRON": 61,
329
+ "B-PRON.": 62,
330
+ "B-PROPN": 63,
331
+ "B-PROPN.": 64,
332
+ "B-PUNCT": 65,
333
+ "B-PUNCT.": 66,
334
+ "B-SCONJ": 67,
335
+ "B-SCONJ.": 68,
336
+ "B-SYM": 69,
337
+ "B-SYM.": 70,
338
+ "B-VERB": 71,
339
+ "B-VERB.": 72,
340
+ "B-X": 73,
341
+ "B-X.": 74,
342
+ "CCONJ": 75,
343
+ "CCONJ.": 76,
344
+ "CCONJ|_": 77,
345
+ "CCONJ|l-cc": 78,
346
+ "CCONJ|r-cc": 79,
347
+ "DET": 80,
348
+ "DET.": 81,
349
+ "DET|_": 82,
350
+ "DET|l-det": 83,
351
+ "I-ADJ": 84,
352
+ "I-ADJ.": 85,
353
+ "I-ADP": 86,
354
+ "I-ADP.": 87,
355
+ "I-ADV": 88,
356
+ "I-ADV.": 89,
357
+ "I-AUX": 90,
358
+ "I-AUX.": 91,
359
+ "I-CCONJ": 92,
360
+ "I-CCONJ.": 93,
361
+ "I-DET": 94,
362
+ "I-DET.": 95,
363
+ "I-INTJ": 96,
364
+ "I-INTJ.": 97,
365
+ "I-NOUN": 98,
366
+ "I-NOUN.": 99,
367
+ "I-NUM": 100,
368
+ "I-NUM.": 101,
369
+ "I-PART": 102,
370
+ "I-PART.": 103,
371
+ "I-PRON": 104,
372
+ "I-PRON.": 105,
373
+ "I-PROPN": 106,
374
+ "I-PROPN.": 107,
375
+ "I-PUNCT": 108,
376
+ "I-PUNCT.": 109,
377
+ "I-SCONJ": 110,
378
+ "I-SCONJ.": 111,
379
+ "I-SYM": 112,
380
+ "I-SYM.": 113,
381
+ "I-VERB": 114,
382
+ "I-VERB.": 115,
383
+ "I-X": 116,
384
+ "I-X.": 117,
385
+ "INTJ": 118,
386
+ "INTJ.": 119,
387
+ "INTJ|_": 120,
388
+ "INTJ|l-discourse": 121,
389
+ "INTJ|r-discourse": 122,
390
+ "INTJ|root": 123,
391
+ "NOUN": 124,
392
+ "NOUN.": 125,
393
+ "NOUN|Polarity=Neg|_": 126,
394
+ "NOUN|Polarity=Neg|l-obl": 127,
395
+ "NOUN|Polarity=Neg|root": 128,
396
+ "NOUN|_": 129,
397
+ "NOUN|l-acl": 130,
398
+ "NOUN|l-advcl": 131,
399
+ "NOUN|l-ccomp": 132,
400
+ "NOUN|l-compound": 133,
401
+ "NOUN|l-csubj": 134,
402
+ "NOUN|l-csubj:outer": 135,
403
+ "NOUN|l-nmod": 136,
404
+ "NOUN|l-nsubj": 137,
405
+ "NOUN|l-nsubj:outer": 138,
406
+ "NOUN|l-obj": 139,
407
+ "NOUN|l-obl": 140,
408
+ "NOUN|r-compound": 141,
409
+ "NOUN|r-nmod": 142,
410
+ "NOUN|r-nsubj": 143,
411
+ "NOUN|root": 144,
412
+ "NUM": 145,
413
+ "NUM.": 146,
414
+ "NUM|_": 147,
415
+ "NUM|l-advcl": 148,
416
+ "NUM|l-compound": 149,
417
+ "NUM|l-nmod": 150,
418
+ "NUM|l-nsubj": 151,
419
+ "NUM|l-nsubj:outer": 152,
420
+ "NUM|l-nummod": 153,
421
+ "NUM|l-obj": 154,
422
+ "NUM|l-obl": 155,
423
+ "NUM|r-compound": 156,
424
+ "NUM|root": 157,
425
+ "PART": 158,
426
+ "PART.": 159,
427
+ "PART|_": 160,
428
+ "PART|l-mark": 161,
429
+ "PART|r-mark": 162,
430
+ "PRON": 163,
431
+ "PRON.": 164,
432
+ "PRON|_": 165,
433
+ "PRON|l-acl": 166,
434
+ "PRON|l-advcl": 167,
435
+ "PRON|l-nmod": 168,
436
+ "PRON|l-nsubj": 169,
437
+ "PRON|l-nsubj:outer": 170,
438
+ "PRON|l-obj": 171,
439
+ "PRON|l-obl": 172,
440
+ "PRON|root": 173,
441
+ "PROPN": 174,
442
+ "PROPN.": 175,
443
+ "PROPN|_": 176,
444
+ "PROPN|l-acl": 177,
445
+ "PROPN|l-advcl": 178,
446
+ "PROPN|l-compound": 179,
447
+ "PROPN|l-nmod": 180,
448
+ "PROPN|l-nsubj": 181,
449
+ "PROPN|l-nsubj:outer": 182,
450
+ "PROPN|l-obj": 183,
451
+ "PROPN|l-obl": 184,
452
+ "PROPN|r-compound": 185,
453
+ "PROPN|r-nmod": 186,
454
+ "PROPN|root": 187,
455
+ "PUNCT": 188,
456
+ "PUNCT.": 189,
457
+ "PUNCT|_": 190,
458
+ "PUNCT|l-punct": 191,
459
+ "PUNCT|r-punct": 192,
460
+ "SCONJ": 193,
461
+ "SCONJ.": 194,
462
+ "SCONJ|_": 195,
463
+ "SCONJ|l-dep": 196,
464
+ "SCONJ|r-fixed": 197,
465
+ "SCONJ|r-mark": 198,
466
+ "SYM": 199,
467
+ "SYM.": 200,
468
+ "SYM|_": 201,
469
+ "SYM|l-compound": 202,
470
+ "SYM|l-dep": 203,
471
+ "SYM|l-nmod": 204,
472
+ "SYM|l-obl": 205,
473
+ "SYM|r-compound": 206,
474
+ "SYM|r-dep": 207,
475
+ "VERB": 208,
476
+ "VERB.": 209,
477
+ "VERB|_": 210,
478
+ "VERB|l-acl": 211,
479
+ "VERB|l-advcl": 212,
480
+ "VERB|l-ccomp": 213,
481
+ "VERB|l-compound": 214,
482
+ "VERB|l-csubj": 215,
483
+ "VERB|l-csubj:outer": 216,
484
+ "VERB|l-nmod": 217,
485
+ "VERB|l-obj": 218,
486
+ "VERB|l-obl": 219,
487
+ "VERB|r-acl": 220,
488
+ "VERB|r-advcl": 221,
489
+ "VERB|r-compound": 222,
490
+ "VERB|root": 223,
491
+ "X": 224,
492
+ "X.": 225,
493
+ "X|_": 226,
494
+ "X|l-nmod": 227,
495
+ "X|r-dep": 228
496
+ },
497
+ "layer_norm_eps": 1e-05,
498
+ "local_attention": 128,
499
+ "local_rope_theta": 10000.0,
500
+ "max_position_embeddings": 8192,
501
+ "mlp_bias": false,
502
+ "mlp_dropout": 0.0,
503
+ "model_type": "modernbert",
504
+ "norm_bias": false,
505
+ "norm_eps": 1e-05,
506
+ "num_attention_heads": 8,
507
+ "num_hidden_layers": 19,
508
+ "pad_token_id": 3,
509
+ "position_embedding_type": "rope",
510
+ "reference_compile": false,
511
+ "repad_logits_with_grad": false,
512
+ "sep_token_id": 4,
513
+ "sparse_pred_ignore_index": -100,
514
+ "sparse_prediction": false,
515
+ "tokenizer_class": "LlamaTokenizer",
516
+ "torch_dtype": "float32",
517
+ "transformers_version": "4.48.3",
518
+ "vocab_size": 102400
519
+ }
maker.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="sbintuitions/modernbert-ja-130m"
3
+ tgt="KoichiYasuoka/modernbert-japanese-130m-ud-embeds"
4
+ url="https://github.com/UniversalDependencies/UD_Japanese-GSDLUW"
5
+ import os
6
+ d=os.path.basename(url)
7
+ os.system("test -d "+d+" || git clone --depth=1 "+url)
8
+ os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
9
+ class UDEmbedsDataset(object):
10
+ def __init__(self,conllu,tokenizer,oldtokenizer=None,embeddings=None):
11
+ self.conllu=open(conllu,"r",encoding="utf-8")
12
+ self.tokenizer=tokenizer
13
+ self.oldtokenizer=oldtokenizer if oldtokenizer else tokenizer
14
+ self.embeddings=embeddings
15
+ self.seeks=[0]
16
+ label=set(["SYM","SYM.","SYM|_"])
17
+ dep=set()
18
+ s=self.conllu.readline()
19
+ while s!="":
20
+ if s=="\n":
21
+ self.seeks.append(self.conllu.tell())
22
+ else:
23
+ w=s.split("\t")
24
+ if len(w)==10:
25
+ if w[0].isdecimal():
26
+ p=w[3]
27
+ q="" if w[5]=="_" else "|"+w[5]
28
+ d=("|" if w[6]=="0" else "|l-" if int(w[0])<int(w[6]) else "|r-")+w[7]
29
+ for k in [p,p+".","B-"+p,"B-"+p+".","I-"+p,"I-"+p+".",p+q+"|_",p+q+d]:
30
+ label.add(k)
31
+ s=self.conllu.readline()
32
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
33
+ def __call__(*args):
34
+ lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
35
+ for t in args:
36
+ t.label2id=lid
37
+ return lid
38
+ def __del__(self):
39
+ self.conllu.close()
40
+ __len__=lambda self:(len(self.seeks)-1)*2
41
+ def __getitem__(self,i):
42
+ self.conllu.seek(self.seeks[int(i/2)])
43
+ z,c,t,s=i%2,[],[""],False
44
+ while t[0]!="\n":
45
+ t=self.conllu.readline().split("\t")
46
+ if len(t)==10 and t[0].isdecimal():
47
+ if s:
48
+ t[1]=" "+t[1]
49
+ c.append(t)
50
+ s=t[9].find("SpaceAfter=No")<0
51
+ x=[True if t[6]=="0" or int(t[6])>j or sum([1 if int(c[i][6])==j+1 else 0 for i in range(j+1,len(c))])>0 else False for j,t in enumerate(c)]
52
+ if z==0:
53
+ v=self.tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
54
+ ids,upos=[self.tokenizer.bos_token_id],["SYM."]
55
+ for i,(j,k) in enumerate(zip(v,c)):
56
+ if j==[]:
57
+ j=[self.tokenizer.unk_token_id]
58
+ p=k[3] if x[i] else k[3]+"."
59
+ ids+=j
60
+ upos+=[p] if len(j)==1 else ["B-"+p]+["I-"+p]*(len(j)-1)
61
+ ids.append(self.tokenizer.eos_token_id)
62
+ upos.append("SYM.")
63
+ emb=self.embeddings
64
+ else:
65
+ import torch
66
+ v=self.oldtokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
67
+ if len(x)<127:
68
+ x=[True]*len(x)
69
+ else:
70
+ w=sum([len(x)-i+1 if b else 0 for i,b in enumerate(x)])+1
71
+ for i in range(len(x)):
72
+ if x[i]==False and w+len(x)-i<8192:
73
+ x[i]=True
74
+ w+=len(x)-i+1
75
+ p=[t[3] if t[5]=="_" else t[3]+"|"+t[5] for i,t in enumerate(c)]
76
+ d=[t[7] if t[6]=="0" else "l-"+t[7] if int(t[0])<int(t[6]) else "r-"+t[7] for t in c]
77
+ ids,upos=[-1],["SYM|_"]
78
+ for i in range(len(x)):
79
+ if x[i]:
80
+ ids.append(i)
81
+ upos.append(p[i]+"|"+d[i] if c[i][6]=="0" else p[i]+"|_")
82
+ for j in range(i+1,len(x)):
83
+ ids.append(j)
84
+ upos.append(p[j]+"|"+d[j] if int(c[j][6])==i+1 else p[i]+"|"+d[i] if int(c[i][6])==j+1 else p[j]+"|_")
85
+ ids.append(-1)
86
+ upos.append("SYM|_")
87
+ with torch.no_grad():
88
+ m=[]
89
+ for j in v:
90
+ if j==[]:
91
+ j=[self.tokenizer.unk_token_id]
92
+ m.append(self.embeddings[j,:].sum(axis=0))
93
+ m.append(self.embeddings[self.tokenizer.eos_token_id,:])
94
+ emb=torch.stack(m)
95
+ return{"inputs_embeds":emb[ids[:8192],:],"labels":[self.label2id[p] for p in upos[:8192]]}
96
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DefaultDataCollator,TrainingArguments,Trainer
97
+ from tokenizers.pre_tokenizers import Sequence,Split
98
+ from tokenizers import Regex
99
+ from copy import deepcopy
100
+ otk=AutoTokenizer.from_pretrained(src)
101
+ ntk=deepcopy(otk)
102
+ ntk.backend_tokenizer.pre_tokenizer=Sequence([Split("[ぁ-ん]","isolated"),otk.backend_tokenizer.pre_tokenizer])
103
+ trainDS=UDEmbedsDataset("train.conllu",ntk,otk)
104
+ devDS=UDEmbedsDataset("dev.conllu",ntk,otk)
105
+ testDS=UDEmbedsDataset("test.conllu",ntk,otk)
106
+ lid=trainDS(devDS,testDS)
107
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True,trust_remote_code=True)
108
+ mdl=AutoModelForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True,trust_remote_code=True)
109
+ trainDS.embeddings=mdl.get_input_embeddings().weight
110
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=1,dataloader_pin_memory=False,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
111
+ trn=Trainer(args=arg,data_collator=DefaultDataCollator(),model=mdl,train_dataset=trainDS)
112
+ trn.train()
113
+ trn.save_model(tgt)
114
+ otk.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b74f493a41ee73d8b1dce645987d4daebc5a31f40b8737179b92fa50e1b3a5b
3
+ size 530122154
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<cls>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "<sep>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:008293028e1a9d9a1038d9b63d989a2319797dfeaa03f171093a57b33a3a8277
3
+ size 1831879
tokenizer_config.json ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_dummy_prefix_space": false,
4
+ "add_eos_token": true,
5
+ "add_prefix_space": false,
6
+ "added_tokens_decoder": {
7
+ "0": {
8
+ "content": "<unk>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false,
13
+ "special": true
14
+ },
15
+ "1": {
16
+ "content": "<s>",
17
+ "lstrip": false,
18
+ "normalized": false,
19
+ "rstrip": false,
20
+ "single_word": false,
21
+ "special": true
22
+ },
23
+ "2": {
24
+ "content": "</s>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false,
29
+ "special": true
30
+ },
31
+ "3": {
32
+ "content": "<pad>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false,
37
+ "special": true
38
+ },
39
+ "4": {
40
+ "content": "<sep>",
41
+ "lstrip": false,
42
+ "normalized": false,
43
+ "rstrip": false,
44
+ "single_word": false,
45
+ "special": true
46
+ },
47
+ "5": {
48
+ "content": "<mask>",
49
+ "lstrip": false,
50
+ "normalized": false,
51
+ "rstrip": false,
52
+ "single_word": false,
53
+ "special": true
54
+ },
55
+ "6": {
56
+ "content": "<cls>",
57
+ "lstrip": false,
58
+ "normalized": false,
59
+ "rstrip": false,
60
+ "single_word": false,
61
+ "special": true
62
+ },
63
+ "7": {
64
+ "content": "<|system|>",
65
+ "lstrip": false,
66
+ "normalized": false,
67
+ "rstrip": false,
68
+ "single_word": false,
69
+ "special": false
70
+ },
71
+ "8": {
72
+ "content": "<|assistant|>",
73
+ "lstrip": false,
74
+ "normalized": false,
75
+ "rstrip": false,
76
+ "single_word": false,
77
+ "special": false
78
+ },
79
+ "9": {
80
+ "content": "<|user|>",
81
+ "lstrip": false,
82
+ "normalized": false,
83
+ "rstrip": false,
84
+ "single_word": false,
85
+ "special": false
86
+ },
87
+ "10": {
88
+ "content": "<|available_tools|>",
89
+ "lstrip": false,
90
+ "normalized": false,
91
+ "rstrip": false,
92
+ "single_word": false,
93
+ "special": false
94
+ },
95
+ "11": {
96
+ "content": "<|tool_calls|>",
97
+ "lstrip": false,
98
+ "normalized": false,
99
+ "rstrip": false,
100
+ "single_word": false,
101
+ "special": false
102
+ },
103
+ "12": {
104
+ "content": "<|tool_results|>",
105
+ "lstrip": false,
106
+ "normalized": false,
107
+ "rstrip": false,
108
+ "single_word": false,
109
+ "special": false
110
+ },
111
+ "13": {
112
+ "content": "<|code|>",
113
+ "lstrip": false,
114
+ "normalized": false,
115
+ "rstrip": false,
116
+ "single_word": false,
117
+ "special": false
118
+ },
119
+ "14": {
120
+ "content": "<|file|>",
121
+ "lstrip": false,
122
+ "normalized": false,
123
+ "rstrip": false,
124
+ "single_word": false,
125
+ "special": false
126
+ },
127
+ "102397": {
128
+ "content": "<|prefix|>",
129
+ "lstrip": false,
130
+ "normalized": false,
131
+ "rstrip": false,
132
+ "single_word": false,
133
+ "special": false
134
+ },
135
+ "102398": {
136
+ "content": "<|suffix|>",
137
+ "lstrip": false,
138
+ "normalized": false,
139
+ "rstrip": false,
140
+ "single_word": false,
141
+ "special": false
142
+ },
143
+ "102399": {
144
+ "content": "<|middle|>",
145
+ "lstrip": false,
146
+ "normalized": false,
147
+ "rstrip": false,
148
+ "single_word": false,
149
+ "special": false
150
+ }
151
+ },
152
+ "bos_token": "<s>",
153
+ "clean_up_tokenization_spaces": false,
154
+ "cls_token": "<cls>",
155
+ "do_lower_case": false,
156
+ "eos_token": "</s>",
157
+ "extra_ids": 0,
158
+ "extra_special_tokens": {},
159
+ "keep_accents": true,
160
+ "legacy": false,
161
+ "mask_token": "<mask>",
162
+ "model_max_length": 1000000000000000019884624838656,
163
+ "pad_token": "<pad>",
164
+ "padding_side": "right",
165
+ "sep_token": "<sep>",
166
+ "sp_model_kwargs": {},
167
+ "spaces_between_special_tokens": false,
168
+ "tokenizer_class": "LlamaTokenizer",
169
+ "unk_token": "<unk>",
170
+ "use_default_system_prompt": false
171
+ }
ud.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+ from transformers import TokenClassificationPipeline
3
+
4
+ class BellmanFordTokenClassificationPipeline(TokenClassificationPipeline):
5
+ def __init__(self,**kwargs):
6
+ from copy import deepcopy
7
+ from tokenizers import Regex
8
+ from tokenizers.pre_tokenizers import Sequence,Split
9
+ super().__init__(**kwargs)
10
+ self.oldtokenizer=deepcopy(self.tokenizer)
11
+ self.tokenizer.backend_tokenizer.pre_tokenizer=Sequence([Split(Regex("[ぁ-ん]"),"isolated"),self.oldtokenizer.backend_tokenizer.pre_tokenizer])
12
+ x=self.model.config.label2id
13
+ y=[k for k in x if k.find("|")<0 and not k.startswith("I-")]
14
+ self.transition=numpy.full((len(x),len(x)),-numpy.inf)
15
+ for k,v in x.items():
16
+ if k.find("|")<0:
17
+ for j in ["I-"+k[2:]] if k.startswith("B-") else [k]+y if k.startswith("I-") else y:
18
+ self.transition[v,x[j]]=0
19
+ def check_model_type(self,supported_models):
20
+ pass
21
+ def postprocess(self,model_outputs,**kwargs):
22
+ if "logits" not in model_outputs:
23
+ return self.postprocess(model_outputs[0],**kwargs)
24
+ return self.bellman_ford_token_classification(model_outputs,**kwargs)
25
+ def bellman_ford_token_classification(self,model_outputs,**kwargs):
26
+ m=model_outputs["logits"][0].numpy()
27
+ e=numpy.exp(m-numpy.max(m,axis=-1,keepdims=True))
28
+ z=e/e.sum(axis=-1,keepdims=True)
29
+ for i in range(m.shape[0]-1,0,-1):
30
+ m[i-1]+=numpy.max(m[i]+self.transition,axis=1)
31
+ k=[numpy.argmax(m[0]+self.transition[0])]
32
+ for i in range(1,m.shape[0]):
33
+ k.append(numpy.argmax(m[i]+self.transition[k[-1]]))
34
+ w=[{"entity":self.model.config.id2label[j],"start":s,"end":e,"score":z[i,j]} for i,((s,e),j) in enumerate(zip(model_outputs["offset_mapping"][0].tolist(),k)) if s<e]
35
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
36
+ for i,t in reversed(list(enumerate(w))):
37
+ p=t.pop("entity")
38
+ if p.startswith("I-"):
39
+ w[i-1]["score"]=min(w[i-1]["score"],t["score"])
40
+ w[i-1]["end"]=w.pop(i)["end"]
41
+ elif p.startswith("B-"):
42
+ t["entity_group"]=p[2:]
43
+ else:
44
+ t["entity_group"]=p
45
+ for t in w:
46
+ t["text"]=model_outputs["sentence"][t["start"]:t["end"]]
47
+ return w
48
+
49
+ class UniversalDependenciesPipeline(BellmanFordTokenClassificationPipeline):
50
+ def __init__(self,**kwargs):
51
+ kwargs["aggregation_strategy"]="simple"
52
+ super().__init__(**kwargs)
53
+ x=self.model.config.label2id
54
+ self.root=numpy.full((len(x)),-numpy.inf)
55
+ self.left_arc=numpy.full((len(x)),-numpy.inf)
56
+ self.right_arc=numpy.full((len(x)),-numpy.inf)
57
+ for k,v in x.items():
58
+ if k.endswith("|root"):
59
+ self.root[v]=0
60
+ elif k.find("|l-")>0:
61
+ self.left_arc[v]=0
62
+ elif k.find("|r-")>0:
63
+ self.right_arc[v]=0
64
+ def postprocess(self,model_outputs,**kwargs):
65
+ import torch
66
+ kwargs["aggregation_strategy"]="simple"
67
+ if "logits" not in model_outputs:
68
+ return self.postprocess(model_outputs[0],**kwargs)
69
+ w=self.bellman_ford_token_classification(model_outputs,**kwargs)
70
+ off=[(t["start"],t["end"]) for t in w]
71
+ for i,(s,e) in reversed(list(enumerate(off))):
72
+ if s<e:
73
+ d=w[i]["text"]
74
+ j=len(d)-len(d.lstrip())
75
+ if j>0:
76
+ d=d.lstrip()
77
+ off[i]=(off[i][0]+j,off[i][1])
78
+ j=len(d)-len(d.rstrip())
79
+ if j>0:
80
+ d=d.rstrip()
81
+ off[i]=(off[i][0],off[i][1]-j)
82
+ if d.strip()=="":
83
+ off.pop(i)
84
+ w.pop(i)
85
+ v=self.oldtokenizer([t["text"] for t in w],add_special_tokens=False)
86
+ x=[not t["entity_group"].endswith(".") for t in w]
87
+ if len(x)<127:
88
+ x=[True]*len(x)
89
+ else:
90
+ k=sum([len(x)-i+1 if b else 0 for i,b in enumerate(x)])+1
91
+ for i in numpy.argsort(numpy.array([t["score"] for t in w])):
92
+ if x[i]==False and k+len(x)-i<8192:
93
+ x[i]=True
94
+ k+=len(x)-i+1
95
+ ids=[-1]
96
+ for i in range(len(x)):
97
+ if x[i]:
98
+ ids.append(i)
99
+ for j in range(i+1,len(x)):
100
+ ids.append(j)
101
+ ids.append(-1)
102
+ with torch.no_grad():
103
+ e=self.model.get_input_embeddings().weight
104
+ m=[]
105
+ for j in v["input_ids"]:
106
+ if j==[]:
107
+ j=[self.tokenizer.unk_token_id]
108
+ m.append(e[j,:].sum(axis=0))
109
+ m.append(e[self.tokenizer.eos_token_id,:])
110
+ m=torch.stack(m).to(self.device)
111
+ e=self.model(inputs_embeds=torch.unsqueeze(m[ids,:],0))
112
+ m=e.logits[0].cpu().numpy()
113
+ e=numpy.full((len(x),len(x),m.shape[-1]),m.min())
114
+ k=1
115
+ for i in range(len(x)):
116
+ if x[i]:
117
+ e[i,i]=m[k]+self.root
118
+ k+=1
119
+ for j in range(1,len(x)-i):
120
+ e[i+j,i]=m[k]+self.left_arc
121
+ e[i,i+j]=m[k]+self.right_arc
122
+ k+=1
123
+ k+=1
124
+ m,p=numpy.max(e,axis=2),numpy.argmax(e,axis=2)
125
+ h=self.chu_liu_edmonds(m)
126
+ z=[i for i,j in enumerate(h) if i==j]
127
+ if len(z)>1:
128
+ k,h=z[numpy.argmax(m[z,z])],numpy.min(m)-numpy.max(m)
129
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
130
+ h=self.chu_liu_edmonds(m)
131
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
132
+ t=model_outputs["sentence"].replace("\n"," ")
133
+ u="# text = "+t+"\n"
134
+ for i,(s,e) in enumerate(off):
135
+ u+="\t".join([str(i+1),t[s:e],"_",q[i][0],"_","_" if len(q[i])<3 else "|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),"root" if q[i][-1]=="root" else q[i][-1][2:],"_","_" if i+1<len(off) and e<off[i+1][0] else "SpaceAfter=No"])+"\n"
136
+ return u+"\n"
137
+ def chu_liu_edmonds(self,matrix):
138
+ h=numpy.argmax(matrix,axis=0)
139
+ x=[-1 if i==j else j for i,j in enumerate(h)]
140
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
141
+ y=[]
142
+ while x!=y:
143
+ y=list(x)
144
+ for i,j in enumerate(x):
145
+ x[i]=b(x,i,j)
146
+ if max(x)<0:
147
+ return h
148
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
149
+ z=matrix-numpy.max(matrix,axis=0)
150
+ m=numpy.block([[z[x,:][:,x],numpy.max(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.max(z[y,:][:,x],axis=0),numpy.max(z[y,y])]])
151
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.argmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
152
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
153
+ i=y[numpy.argmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
154
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
155
+ return h