Datasets:
File size: 1,631 Bytes
ad9ffae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
from datasets import load_dataset
import re
import random
def split_into_paragraphs(text):
# Split by markdown headers or double newlines
paragraphs = re.split(r'\n\n|(?=^#)', text, flags=re.MULTILINE)
return [p.strip() for p in paragraphs if p.strip()]
def create_input_output_pairs(example):
paragraphs = example['paragraphs']
n_paragraphs = len(paragraphs)
# Randomly select about half of the paragraphs for input
n_input = max(1, random.randint(n_paragraphs // 2 - 1, n_paragraphs // 2 + 1))
input_paragraphs = paragraphs[:n_input]
output_paragraphs = paragraphs[n_input:]
return {
'inputs': ' '.join(input_paragraphs),
'targets': ' '.join(output_paragraphs)
}
def preprocess_dataset(dataset_name, text_column='text'):
# Load the dataset
dataset = load_dataset(dataset_name)
# Split text into paragraphs
dataset = dataset.map(
lambda example: {'paragraphs': split_into_paragraphs(example[text_column])},
remove_columns=[text_column]
)
# Create input-output pairs
preprocessed_dataset = dataset.map(
create_input_output_pairs,
remove_columns=['paragraphs']
)
return preprocessed_dataset
# Usage example
if __name__ == "__main__":
# Replace 'your_dataset' with the actual dataset name
dataset_name = 'your_dataset'
preprocessed_dataset = preprocess_dataset(dataset_name)
# Print some examples
print(preprocessed_dataset['train'][:5])
# Save the preprocessed dataset
preprocessed_dataset.save_to_disk("preprocessed_dataset")
|