Spaces:
Build error
Build error
File size: 1,460 Bytes
690a0b1 3db2689 1f75f58 690a0b1 ba55e05 8141d0c ba55e05 1f75f58 690a0b1 1f75f58 a3ff196 1f75f58 e39127e 690a0b1 3db2689 690a0b1 1f75f58 690a0b1 3db2689 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments
from datasets import pandas, Dataset
import csv
# Read requirements.txt file
with open('requirements.txt', 'r') as req_file:
requirements = req_file.read().splitlines
for requirement in requirements:
('pip install --use-feature=build-backend')
# Load and preprocess the IMDB dataset from CSV
preprocessed_data = []
with open('IMDB Dataset.csv', 'r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
text = row['review']
label = row['sentiment']
preprocessed_entry = {
'text': text,
'label': label
}
preprocessed_data.append(preprocessed_entry)
# Convert the preprocessed data to a pandas DataFrame
df = pandas.DataFrame(preprocessed_data)
# Convert the DataFrame to a datasets dataset
dataset = Dataset.from_pandas(df)
# Tokenize the dataset
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
tokenized_datasets = dataset.map(tokenize_function, batched=True)
# Fine-tune the Bloom model
model = AutoModelForSequenceClassification.from_pretrained("bigscience/bloom-560m", num_labels=2)
training_args = TrainingArguments(output_dir="test_trainer")
import numpy as np
import evaluate
metric = evaluate.load("accuracy")
|