A-Funakoshi commited on
Commit
77aed32
·
1 Parent(s): 6eedd4a

Upload 9 files

Browse files
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "cl-tohoku/bert-base-japanese-whole-word-masking",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "positive",
13
+ "1": "negative",
14
+ "2": "neutral"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "negative": 1,
20
+ "neutral": 2,
21
+ "positive": 0
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "bert",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 0,
29
+ "position_embedding_type": "absolute",
30
+ "problem_type": "single_label_classification",
31
+ "tokenizer_class": "BertJapaneseTokenizer",
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.33.2",
34
+ "type_vocab_size": 2,
35
+ "use_cache": true,
36
+ "vocab_size": 32000
37
+ }
finetuning_wrime_01_base.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ import torch
3
+ # GPUが使用可能か判断
4
+ if torch.cuda.is_available():
5
+ print('gpu is available')
6
+ else:
7
+ raise Exception('gpu is NOT available')
8
+
9
+ # %%
10
+ from datasets import load_dataset, DatasetDict
11
+ from transformers import AutoTokenizer
12
+ from transformers import AutoModelForSequenceClassification
13
+ from transformers import TrainingArguments
14
+ from transformers import Trainer
15
+ from sklearn.metrics import accuracy_score, f1_score
16
+ import numpy as np
17
+ import pandas as pd
18
+ import torch
19
+ import random
20
+
21
+ # %%
22
+ from pprint import pprint
23
+ from datasets import load_dataset
24
+
25
+ # Hugging Face Hub上のllm-book/wrime-sentimentのリポジトリから
26
+ # データを読み込む
27
+ train_dataset = load_dataset("llm-book/wrime-sentiment", split="train", remove_neutral=False)
28
+ valid_dataset = load_dataset("llm-book/wrime-sentiment", split="validation", remove_neutral=False)
29
+ # pprintで見やすく表示する
30
+ pprint(train_dataset)
31
+ pprint(valid_dataset)
32
+
33
+ # %%
34
+ # トークナイザのロード
35
+ model_name = "cl-tohoku/bert-base-japanese-whole-word-masking"
36
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
37
+
38
+ # %%
39
+ # トークナイズ処理
40
+ def preprocess_text(batch):
41
+ encoded_batch = tokenizer(batch['sentence'], max_length=512)
42
+ encoded_batch['labels'] = batch['label']
43
+ return encoded_batch
44
+
45
+ encoded_train_dataset = train_dataset.map(
46
+ preprocess_text,
47
+ remove_columns=train_dataset.column_names,
48
+ )
49
+ encoded_valid_dataset = valid_dataset.map(
50
+ preprocess_text,
51
+ remove_columns=valid_dataset.column_names,
52
+ )
53
+
54
+ # ミニバッチ構築
55
+ from transformers import DataCollatorWithPadding
56
+
57
+ data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
58
+
59
+ # %%
60
+ # モデルの準備
61
+ from transformers import AutoModelForSequenceClassification
62
+
63
+ class_label = train_dataset.features["label"]
64
+ label2id = {label: id for id, label in enumerate(class_label.names)}
65
+ id2label = {id: label for id, label in enumerate(class_label.names)}
66
+ model = AutoModelForSequenceClassification.from_pretrained(
67
+ model_name,
68
+ num_labels=class_label.num_classes,
69
+ label2id=label2id, # ラベル名からIDへの対応を指定
70
+ id2label=id2label, # IDからラベル名への対応を指定
71
+ )
72
+ print(type(model).__name__)
73
+
74
+ # %%
75
+ # 訓練の準備
76
+ from transformers import TrainingArguments
77
+ # 保存ディレクトリ
78
+ save_dir = f'bert-finetuned-wrime-base'
79
+
80
+ training_args = TrainingArguments(
81
+ output_dir=save_dir, # 結果の保存フォルダ
82
+ per_device_train_batch_size=32, # 訓練時のバッチサイズ
83
+ per_device_eval_batch_size=32, # 評価時のバッチサイズ
84
+ learning_rate=2e-5, # 学習率
85
+ lr_scheduler_type="constant", # 学習率スケジューラの種類
86
+ warmup_ratio=0.1, # 学習率のウォームアップの長さを指定
87
+ num_train_epochs=100, # エポック数
88
+ save_strategy="epoch", # チェックポイントの保存タイミング
89
+ logging_strategy="epoch", # ロギングのタイミング
90
+ evaluation_strategy="epoch", # 検証セットによる評価のタイミング
91
+ load_best_model_at_end=True, # 訓練後に開発セットで最良のモデルをロード
92
+ metric_for_best_model="accuracy", # 最良のモデルを決定する評価指標
93
+ fp16=True, # 自動混合精度演算の有効化
94
+ )
95
+
96
+ # %%
97
+ # メトリクスの定義
98
+ def compute_metrics(pred):
99
+ labels = pred.label_ids
100
+ preds = pred.predictions.argmax(-1)
101
+ f1 = f1_score(labels, preds, average="weighted")
102
+ acc = accuracy_score(labels, preds)
103
+ return {"accuracy": acc, "f1": f1}
104
+
105
+ # %%
106
+ # 訓練の実行
107
+ from transformers import Trainer
108
+ from transformers import EarlyStoppingCallback
109
+
110
+ trainer = Trainer(
111
+ model=model,
112
+ train_dataset=encoded_train_dataset,
113
+ eval_dataset=encoded_valid_dataset,
114
+ data_collator=data_collator,
115
+ args=training_args,
116
+ compute_metrics=compute_metrics,
117
+ callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],
118
+ )
119
+ trainer.train()
120
+
121
+ # %%
122
+ # モデルの保存
123
+ trainer.save_model(save_dir)
124
+ tokenizer.save_pretrained(save_dir)
125
+ # 履歴の保存
126
+ history_df = pd.DataFrame(trainer.state.log_history)
127
+ history_df.to_csv('base_line/wrime_baseline_history.csv')
128
+
129
+ # %%
130
+ import matplotlib.pyplot as plt
131
+
132
+ def show_graph(df, suptitle, output='output.png'):
133
+ suptitle_size = 23
134
+ graph_title_size = 20
135
+ legend_size = 18
136
+ ticks_size = 13
137
+ # 学習曲線
138
+ fig = plt.figure(figsize=(20, 5))
139
+ plt.suptitle(suptitle, fontsize=suptitle_size)
140
+ # Train Loss
141
+ plt.subplot(131)
142
+ plt.title('Train Loss', fontsize=graph_title_size)
143
+ plt.plot(df['loss'].dropna(), label='train')
144
+ plt.legend(fontsize=legend_size)
145
+ plt.yticks(fontsize=ticks_size)
146
+ # Validation Loss
147
+ plt.subplot(132)
148
+ # reg_str = f'$y={round(regression.coef_[0],5)}*x+{round(regression.intercept_,3)}$'
149
+ plt.title(f'Val Loss', fontsize=graph_title_size)
150
+ y = df['eval_loss'].dropna().values
151
+ x = np.arange(len(y)).reshape(-1, 1)
152
+ # pred = regression.coef_ * x.ravel() + regression.intercept_ # 線形回帰直線
153
+ plt.plot(y, color='tab:orange', label='val')
154
+ # plt.plot(pred, color='green', label='pred')
155
+ plt.legend(fontsize=legend_size)
156
+ # plt.xlabel(reg_str, fontsize=ticks_size)
157
+ plt.yticks(fontsize=ticks_size)
158
+ # Accuracy/F1
159
+ plt.subplot(133)
160
+ plt.title('eval Accuracy/F1', fontsize=graph_title_size)
161
+ plt.plot(df['eval_accuracy'].dropna(), label='accuracy')
162
+ plt.plot(df['eval_f1'].dropna(), label='F1')
163
+ plt.legend(fontsize=legend_size)
164
+ plt.yticks(fontsize=ticks_size)
165
+ plt.tight_layout()
166
+ # plt.show()
167
+ plt.savefig(output)
168
+
169
+ # %%
170
+ # 結果を表示
171
+ suptitle = 'batch:32, lr:2e-5, type:constant'
172
+ show_graph(history_df, suptitle, 'base_line/wrime_baseline_output.png')
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcc31eef7269ace955c7c243db26a97edcdf58e42d43d477fd124ff3a9354ac1
3
+ size 442545135
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "do_subword_tokenize": true,
6
+ "do_word_tokenize": true,
7
+ "jumanpp_kwargs": null,
8
+ "mask_token": "[MASK]",
9
+ "mecab_kwargs": null,
10
+ "model_max_length": 512,
11
+ "never_split": null,
12
+ "pad_token": "[PAD]",
13
+ "sep_token": "[SEP]",
14
+ "subword_tokenizer_type": "wordpiece",
15
+ "sudachi_kwargs": null,
16
+ "tokenizer_class": "BertJapaneseTokenizer",
17
+ "unk_token": "[UNK]",
18
+ "word_tokenizer_type": "mecab"
19
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9015a0155086e38ddabd69ddde783cf2e6da0f931df8984a6996b23e63cf1a0a
3
+ size 4015
vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
wrime_baseline_history.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ,loss,learning_rate,epoch,step,eval_loss,eval_accuracy,eval_f1,eval_runtime,eval_samples_per_second,eval_steps_per_second,train_runtime,train_samples_per_second,train_steps_per_second,total_flos,train_loss
2
+ 0,0.7499,2e-05,1.0,938,,,,,,,,,,,
3
+ 1,,,1.0,938,0.654792308807373,0.712,0.712652463040488,1.2726,1964.409,62.075,,,,,
4
+ 2,0.5464,2e-05,2.0,1876,,,,,,,,,,,
5
+ 3,,,2.0,1876,0.699586033821106,0.7128,0.7073848402995836,1.2761,1959.062,61.906,,,,,
6
+ 4,0.3782,2e-05,3.0,2814,,,,,,,,,,,
7
+ 5,,,3.0,2814,0.7840703129768372,0.7028,0.7011891803629126,1.2756,1959.882,61.932,,,,,
8
+ 6,0.2444,2e-05,4.0,3752,,,,,,,,,,,
9
+ 7,,,4.0,3752,0.9180415868759155,0.7108,0.7088532209184721,1.2745,1961.551,61.985,,,,,
10
+ 8,0.1664,2e-05,5.0,4690,,,,,,,,,,,
11
+ 9,,,5.0,4690,1.0885692834854126,0.7128,0.7127755757268002,1.2786,1955.302,61.788,,,,,
12
+ 10,,,5.0,4690,,,,,,,328.8299,9123.257,285.254,6502067001188640.0,0.41708059737931436
wrime_baseline_output.png ADDED