KoichiYasuoka
commited on
Commit
•
615aa9b
1
Parent(s):
0a226a4
model improved
Browse files- maker.py +17 -3
- pytorch_model.bin +1 -1
maker.py
CHANGED
@@ -6,6 +6,10 @@ url="https://github.com/UniversalDependencies/UD_Vietnamese-VTB"
|
|
6 |
d=os.path.basename(url)
|
7 |
os.system("test -d "+d+" || git clone --depth=1 "+url)
|
8 |
os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
|
|
|
|
|
|
|
|
|
9 |
class UDgoeswithDataset(object):
|
10 |
def __init__(self,conllu,tokenizer):
|
11 |
self.ids,self.tags,label=[],[],set()
|
@@ -17,7 +21,7 @@ class UDgoeswithDataset(object):
|
|
17 |
if len(t)==10 and t[0].isdecimal():
|
18 |
c.append(t)
|
19 |
elif c!=[]:
|
20 |
-
v=tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
|
21 |
for i in range(len(v)-1,-1,-1):
|
22 |
for j in range(1,len(v[i])):
|
23 |
c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
|
@@ -43,13 +47,23 @@ class UDgoeswithDataset(object):
|
|
43 |
__getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
|
44 |
from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
|
45 |
tkz=AutoTokenizer.from_pretrained(src)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
trainDS=UDgoeswithDataset("train.conllu",tkz)
|
47 |
devDS=UDgoeswithDataset("dev.conllu",tkz)
|
48 |
testDS=UDgoeswithDataset("test.conllu",tkz)
|
49 |
lid=trainDS(devDS,testDS)
|
50 |
-
cfg=AutoConfig.from_pretrained(
|
51 |
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,evaluation_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1)
|
52 |
-
trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(
|
53 |
trn.train()
|
54 |
trn.save_model(tgt)
|
55 |
tkz.save_pretrained(tgt)
|
|
|
6 |
d=os.path.basename(url)
|
7 |
os.system("test -d "+d+" || git clone --depth=1 "+url)
|
8 |
os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
|
9 |
+
url="https://github.com/datquocnguyen/VnDT"
|
10 |
+
d=os.path.basename(url)
|
11 |
+
os.system("test -d "+d+" || git clone --depth=1 "+url)
|
12 |
+
os.system("for F in train dev test ; do cp "+d+"/*-gold-*-$F.conll pre-$F.conll ; done")
|
13 |
class UDgoeswithDataset(object):
|
14 |
def __init__(self,conllu,tokenizer):
|
15 |
self.ids,self.tags,label=[],[],set()
|
|
|
21 |
if len(t)==10 and t[0].isdecimal():
|
22 |
c.append(t)
|
23 |
elif c!=[]:
|
24 |
+
v=tokenizer([t[1].replace("_"," ") for t in c],add_special_tokens=False)["input_ids"]
|
25 |
for i in range(len(v)-1,-1,-1):
|
26 |
for j in range(1,len(v[i])):
|
27 |
c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
|
|
|
47 |
__getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
|
48 |
from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
|
49 |
tkz=AutoTokenizer.from_pretrained(src)
|
50 |
+
trainDS=UDgoeswithDataset("pre-train.conll",tkz)
|
51 |
+
devDS=UDgoeswithDataset("pre-dev.conll",tkz)
|
52 |
+
testDS=UDgoeswithDataset("pre-test.conll",tkz)
|
53 |
+
lid=trainDS(devDS,testDS)
|
54 |
+
cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
|
55 |
+
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,evaluation_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1)
|
56 |
+
trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS,eval_dataset=devDS)
|
57 |
+
trn.train()
|
58 |
+
trn.save_model("tmpdir")
|
59 |
+
tkz.save_pretrained("tmpdir")
|
60 |
trainDS=UDgoeswithDataset("train.conllu",tkz)
|
61 |
devDS=UDgoeswithDataset("dev.conllu",tkz)
|
62 |
testDS=UDgoeswithDataset("test.conllu",tkz)
|
63 |
lid=trainDS(devDS,testDS)
|
64 |
+
cfg=AutoConfig.from_pretrained("tmpdir",num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
|
65 |
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=32,output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2,evaluation_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1)
|
66 |
+
trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained("tmpdir",config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS,eval_dataset=devDS)
|
67 |
trn.train()
|
68 |
trn.save_model(tgt)
|
69 |
tkz.save_pretrained(tgt)
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 441306865
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afbc383453f9f496264642c011fcd2cf8d8574ba2114d854a83d6565212e6494
|
3 |
size 441306865
|