File size: 1,625 Bytes
9ee675e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
from torch.utils.data import Dataset, DataLoader
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
get_constant_schedule_with_warmup,
)
import torch
class FinanciaSentimental(Dataset):
"""This class is used to load the data and tokenize it"""
def __init__(self, tokenizer, dataframe, columns, max_len=512):
self.tokenizer = tokenizer
self.dataframe = dataframe
## Columns to target
self._columns = columns
self.max_len = max_len
@property
def columns(self):
"""Return the columns to target"""
return self._columns
def __len__(self):
"""Return the length of the dataset"""
return len(self.dataframe)
def __getitem__(self, index):
"""Get the data at the index"""
values = self.dataframe.iloc[index]
text = values['text']
label = values[self._columns].values.astype(np.float32)
inputs = self.tokenizer.encode_plus(text, max_length=130, pad_to_max_length=True, padding='max_length', truncation=True, return_tensors='pt')
label = torch.tensor(label, dtype=torch.float)
input_ids = inputs["input_ids"].squeeze().to(dtype=torch.long)
attention_mask = inputs["attention_mask"].squeeze().to(dtype=torch.long)
token_type_ids = inputs["token_type_ids"].squeeze().to(dtype=torch.long)
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"labels":label
}
return inputs_dict
|