|
import transformers |
|
import numpy as np |
|
from datasets import load_dataset |
|
from transformers import AutoModelForSeq2SeqLM |
|
from transformers import AutoTokenizer |
|
from transformers import DataCollatorForSeq2Seq |
|
import evaluate |
|
import numpy as np |
|
from transformers import Seq2SeqTrainingArguments |
|
from transformers import Seq2SeqTrainer |
|
from torch.utils.data import DataLoader |
|
from transformers import pipeline |
|
from transformers import AdamW |
|
from accelerate import Accelerator |
|
from transformers import get_scheduler |
|
from huggingface_hub import Repository, get_full_repo_name |
|
from tqdm.auto import tqdm |
|
import torch |
|
from torch import Tensor |
|
|
|
|
|
|
|
raw_datasets = load_dataset("aatherton2024/eng-nah-svo") |
|
model_checkpoint = "aatherton2024/eng-nah-svo-cpt" |
|
|
|
if False: |
|
def get_training_corpus(raw_datasets): |
|
return ( |
|
raw_datasets["train"][i : i + 1000] |
|
for i in range(0, len(raw_datasets["train"]), 1000) |
|
) |
|
|
|
training_corpus = get_training_corpus(raw_datasets) |
|
old_tokenizer = AutoTokenizer.from_pretrained("gpt2") |
|
tokenizer = old_tokenizer.train_new_from_iterator(training_corpus, 52000) |
|
|
|
tokenizer.save_pretrained("eng-nah-svo-cpt") |
|
tokenizer.push_to_hub("eng-nah-svo-cpt") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) |
|
tokenizer.add_special_tokens({'pad_token': '[PAD]'}) |
|
|
|
max_length = 128 |
|
|
|
|
|
def preprocess_function(examples): |
|
inputs = examples["en"] |
|
targets = examples["fr"] |
|
model_inputs = tokenizer( |
|
inputs, text_target=targets, max_length=max_length, truncation=True |
|
) |
|
return model_inputs |
|
|
|
|
|
tokenized_datasets = raw_datasets.map( |
|
preprocess_function, |
|
batched=True, |
|
remove_columns=raw_datasets["train"].column_names, |
|
) |
|
|
|
|
|
if False: |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint) |
|
|
|
else: |
|
from transformers import BertConfig, BertLMHeadModel |
|
from transformers import AutoModel |
|
|
|
|
|
|
|
|
|
|
|
model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-fr") |
|
|
|
|
|
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model) |
|
|
|
|
|
metric = evaluate.load("sacrebleu") |
|
|
|
|
|
def compute_metrics(eval_preds): |
|
preds, labels = eval_preds |
|
|
|
if isinstance(preds, tuple): |
|
preds = preds[0] |
|
|
|
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) |
|
|
|
|
|
labels = np.where(labels != -100, labels, tokenizer.pad_token_id) |
|
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) |
|
|
|
|
|
decoded_preds = [pred.strip() for pred in decoded_preds] |
|
decoded_labels = [[label.strip()] for label in decoded_labels] |
|
|
|
result = metric.compute(predictions=decoded_preds, references=decoded_labels) |
|
return {"bleu": result["score"]} |
|
|
|
|
|
|
|
|
|
|
|
args = Seq2SeqTrainingArguments( |
|
f"eng-nah-svo-translation", |
|
evaluation_strategy="no", |
|
save_strategy="epoch", |
|
learning_rate=2e-5, |
|
per_device_train_batch_size=32, |
|
per_device_eval_batch_size=64, |
|
weight_decay=0.01, |
|
save_total_limit=3, |
|
num_train_epochs=3, |
|
predict_with_generate=True, |
|
fp16=False, |
|
push_to_hub=True, |
|
) |
|
|
|
|
|
trainer = Seq2SeqTrainer( |
|
model, |
|
args, |
|
train_dataset=tokenized_datasets["train"], |
|
eval_dataset=tokenized_datasets["test"], |
|
data_collator=data_collator, |
|
tokenizer=tokenizer, |
|
compute_metrics=compute_metrics, |
|
) |
|
|
|
print("evaluate1") |
|
trainer.evaluate(max_length=max_length) |
|
print("trainer train 1") |
|
trainer.train() |
|
print("evaluate 2") |
|
trainer.evaluate(max_length=max_length) |
|
trainer.push_to_hub(tags="translation", commit_message="Training complete") |
|
|
|
|
|
|
|
tokenized_datasets.set_format("torch") |
|
train_dataloader = DataLoader( |
|
tokenized_datasets["train"], |
|
shuffle=True, |
|
collate_fn=data_collator, |
|
batch_size=8, |
|
) |
|
eval_dataloader = DataLoader( |
|
tokenized_datasets["test"], collate_fn=data_collator, batch_size=8 |
|
) |
|
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint) |
|
|
|
|
|
optimizer = AdamW(model.parameters(), lr=2e-5) |
|
|
|
|
|
|
|
accelerator = Accelerator() |
|
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( |
|
model, optimizer, train_dataloader, eval_dataloader |
|
) |
|
|
|
|
|
|
|
num_train_epochs = 3 |
|
num_update_steps_per_epoch = len(train_dataloader) |
|
num_training_steps = num_train_epochs * num_update_steps_per_epoch |
|
|
|
lr_scheduler = get_scheduler( |
|
"linear", |
|
optimizer=optimizer, |
|
num_warmup_steps=0, |
|
num_training_steps=num_training_steps, |
|
) |
|
|
|
|
|
|
|
model_name = "model" |
|
|
|
output_dir = "./output" |
|
repo = Repository("/mnt/storage/aatherton/hf_eng_fra_trans", clone_from="aatherton2024/hf_eng_fra_trans") |
|
|
|
|
|
def postprocess(predictions, labels): |
|
predictions = predictions.cpu().numpy() |
|
labels = labels.cpu().numpy() |
|
|
|
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) |
|
|
|
|
|
labels = np.where(labels != -100, labels, tokenizer.pad_token_id) |
|
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) |
|
|
|
|
|
decoded_preds = [pred.strip() for pred in decoded_preds] |
|
decoded_labels = [[label.strip()] for label in decoded_labels] |
|
return decoded_preds, decoded_labels |
|
|
|
|
|
|
|
progress_bar = tqdm(range(num_training_steps)) |
|
|
|
for epoch in range(num_train_epochs): |
|
|
|
model.train() |
|
for batch in train_dataloader: |
|
outputs = model(**batch) |
|
loss = outputs.loss |
|
accelerator.backward(loss) |
|
|
|
optimizer.step() |
|
lr_scheduler.step() |
|
optimizer.zero_grad() |
|
progress_bar.update(1) |
|
|
|
|
|
model.eval() |
|
for batch in tqdm(eval_dataloader): |
|
with torch.no_grad(): |
|
generated_tokens = accelerator.unwrap_model(model).generate( |
|
batch["input_ids"], |
|
attention_mask=batch["attention_mask"], |
|
max_length=128, |
|
) |
|
labels = batch["labels"] |
|
|
|
|
|
generated_tokens = accelerator.pad_across_processes( |
|
generated_tokens, dim=1, pad_index=tokenizer.pad_token_id |
|
) |
|
labels = accelerator.pad_across_processes(labels, dim=1, pad_index=-100) |
|
|
|
predictions_gathered = accelerator.gather(generated_tokens) |
|
labels_gathered = accelerator.gather(labels) |
|
|
|
decoded_preds, decoded_labels = postprocess(predictions_gathered, labels_gathered) |
|
metric.add_batch(predictions=decoded_preds, references=decoded_labels) |
|
|
|
results = metric.compute() |
|
print(f"epoch {epoch}, BLEU score: {results['score']:.2f}") |
|
|
|
|
|
accelerator.wait_for_everyone() |
|
unwrapped_model = accelerator.unwrap_model(model) |
|
unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save) |
|
if accelerator.is_main_process: |
|
tokenizer.save_pretrained(output_dir) |
|
repo.push_to_hub( |
|
commit_message=f"Training in progress epoch {epoch}", blocking=False |
|
) |
|
|
|
|
|
|
|
|
|
model_checkpoint = "aatherton2024/hf_eng_fra_reproduction" |
|
translator = pipeline("translation", model=model_checkpoint) |
|
translator("Default to expanded threads") |
|
translator( |
|
"Unable to import %1 using the OFX importer plugin. This file is not the correct format." |
|
) |