imdb-movies / python /preprocess_movies.py
nachosforlife's picture
add arrow dataset not split
9ae22d6
import json
import os
import pandas as pd
from datasets import load_dataset, Dataset
from transformers import AutoTokenizer
# High-Level Utility Functions
def extract_unique_genres(df):
"""Extracts unique genres from the dataframe."""
genre_list = []
for genres in df['genre']:
for genre in genres.split(','):
genre_list.append(genre.strip())
return sorted(list(set(genre_list)))
def encode_genres(genre_str):
"""Encodes the genres for multi-label classification."""
genres = [g.strip() for g in genre_str.split(',')]
encoding = [1 if genre in genres else 0 for genre in genre_to_int.keys()]
return encoding
def tokenize_function(examples):
"""Tokenizes the text and returns only the necessary fields for the model."""
encodings = tokenizer(examples["overview"], padding="max_length", truncation=True)
encodings["labels"] = [encode_genres(genre_str) for genre_str in examples["genre"]]
return encodings
# Load the dataset directly from HuggingFace DatasetHub
# Note: Adjust the token as per your HuggingFace access configuration
dataset = load_dataset('gatesfoundation/imdb-movies', data_files='data/imdb_movies.csv', split='train')
# Convert dataset to pandas dataframe
df = dataset.to_pandas()
df = df.dropna(subset=['overview', 'genre'])
# Extract unique genres
unique_genres = extract_unique_genres(df)
# Load or save genre_to_int mapping
mapping_file_path = 'genre_to_int.json'
if os.path.exists(mapping_file_path):
with open(mapping_file_path, 'r') as f:
genre_to_int = json.load(f)
else:
genre_to_int = {genre: index for index, genre in enumerate(unique_genres)}
with open(mapping_file_path, 'w') as f:
json.dump(genre_to_int, f)
# Encode genres for multi-label classification
df['encoded_genres'] = df['genre'].apply(encode_genres)
# Convert the pandas dataframe back to a HuggingFace dataset
dataset = Dataset.from_pandas(df)
# Tokenize the dataset
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
tokenized_datasets = dataset.map(tokenize_function, batched=True)
# Save the tokenized dataset in HuggingFace format
tokenized_datasets.save_to_disk('data/imdb_movies_preprocessed')