|
import json |
|
import os |
|
import pandas as pd |
|
from datasets import load_dataset, Dataset |
|
from transformers import AutoTokenizer |
|
|
|
|
|
def extract_unique_genres(df): |
|
"""Extracts unique genres from the dataframe.""" |
|
genre_list = [] |
|
for genres in df['genre']: |
|
for genre in genres.split(','): |
|
genre_list.append(genre.strip()) |
|
return sorted(list(set(genre_list))) |
|
|
|
def encode_genres(genre_str): |
|
"""Encodes the genres for multi-label classification.""" |
|
genres = [g.strip() for g in genre_str.split(',')] |
|
encoding = [1 if genre in genres else 0 for genre in genre_to_int.keys()] |
|
return encoding |
|
|
|
def tokenize_function(examples): |
|
"""Tokenizes the text and returns only the necessary fields for the model.""" |
|
encodings = tokenizer(examples["overview"], padding="max_length", truncation=True) |
|
encodings["labels"] = [encode_genres(genre_str) for genre_str in examples["genre"]] |
|
return encodings |
|
|
|
|
|
|
|
dataset = load_dataset('gatesfoundation/imdb-movies', data_files='data/imdb_movies.csv', split='train') |
|
|
|
|
|
df = dataset.to_pandas() |
|
df = df.dropna(subset=['overview', 'genre']) |
|
|
|
|
|
unique_genres = extract_unique_genres(df) |
|
|
|
|
|
mapping_file_path = 'genre_to_int.json' |
|
if os.path.exists(mapping_file_path): |
|
with open(mapping_file_path, 'r') as f: |
|
genre_to_int = json.load(f) |
|
else: |
|
genre_to_int = {genre: index for index, genre in enumerate(unique_genres)} |
|
with open(mapping_file_path, 'w') as f: |
|
json.dump(genre_to_int, f) |
|
|
|
|
|
df['encoded_genres'] = df['genre'].apply(encode_genres) |
|
|
|
|
|
dataset = Dataset.from_pandas(df) |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") |
|
tokenized_datasets = dataset.map(tokenize_function, batched=True) |
|
|
|
|
|
tokenized_datasets.save_to_disk('data/imdb_movies_preprocessed') |
|
|