tiny-shakespeare / create_dataset.py
RonanKMcGovern
data set complete
5a223ff
raw
history blame
1.89 kB
import csv
from transformers import AutoTokenizer
# Initialize the tokenizer
tokenizer = AutoTokenizer.from_pretrained("TheBloke/Yarn-Llama-2-7B-128K-GPTQ", use_fast=True)
# Read the input data
with open('input.txt', 'r') as f:
data = f.readlines()
# Initialize variables
train_data = []
test_data = []
current_row = ""
current_token_count = 0
carry_over = ""
# Iterate over each line and add to train or test data
for i, line in enumerate(data):
line_to_add = carry_over + line.strip()
carry_over = ""
# Tokenize the line to count tokens
tokens = tokenizer(line_to_add)['input_ids']
num_tokens = len(tokens)
# Check if adding the line would exceed the token limit
if current_token_count + num_tokens > 1024:
# Find the last period followed by a space in the current row
last_period_idx = current_row.rfind('. ')
if last_period_idx != -1:
# Carry over the content after the last period
carry_over = current_row[last_period_idx+2:].strip() + "\n"
current_row = current_row[:last_period_idx+1]
if i < len(data) * 0.9:
train_data.append(current_row.strip())
else:
test_data.append(current_row.strip())
current_row = carry_over
current_token_count = len(tokenizer(current_row.strip())['input_ids'])
# Add the line to the current row
current_row += (line_to_add + "\n") if current_row else (line_to_add + "\n")
current_token_count += num_tokens
# Save as train.csv and test.csv
with open('train.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Text'])
for row in train_data:
writer.writerow([row])
with open('test.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Text'])
for row in test_data:
writer.writerow([row])