File size: 4,586 Bytes
3c8602c 370ce50 3c8602c 370ce50 3c8602c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
#!/usr/bin/env python3
# Simple script to convert StackExchange XML to Open Assistant format
# Original code by https://github.com/b-mc2
from bs4 import BeautifulSoup as bs
import pandas as pd
import os
import glob
import sys
import re
from html2text import html2text
from datasets import load_dataset
CACHE_DIR = "xml/"
SOURCE = "stackexchange-{0}"
MAX_ANSWERS = 10
QUESTION_SCORE_TRESHOLD = 0
ANSWER_SCORE_TRESHOLD = 0
HF_DATASET = "donfu/oa-stackexchange"
xml_format_map = {
"Id": int,
"PostTypeId": int,
"CreationDate": str,
"Score": int,
"ViewCount": int,
"Body": str,
"AnswerCount": int,
"CommentCount": int,
"ContentLicense": str,
"AcceptedAnswerId": int,
"ParentId": int,
}
def main():
datasets = sys.argv[1:] if len(sys.argv) > 1 else list_cached_datasets()
for dataset in datasets:
process_dataset(dataset)
def list_cached_datasets():
xml_files = glob.glob(f"{CACHE_DIR}/*.xml")
datasets = [os.path.splitext(os.path.basename(file))[0] for file in xml_files]
datasets.sort()
return datasets
def process_dataset(dataset):
xml_file = f"{CACHE_DIR}/{dataset}.xml"
source = SOURCE.format(dataset)
if os.path.exists(xml_file):
df = xml_to_df(xml_file, source)
oa = convert_to_oa(df)
save_parquet(oa, dataset)
# upload_hf(dataset)
else:
print(f"XML file {xml_file} not found, please download first. Skipping...")
def convert_to_oa(all):
"""
Convert dataframe to Open Assistant format with INSTRUCTION, RESPONSE, SOURCE, METADATA columns
Only include questions with an AcceptedAnswerId
"""
create_metadata = lambda row: {
"tags": row["Tags_q"]
.replace("-", " ")
.replace("><", ", ")
.replace("<", "")
.replace(">", "")
if isinstance(row["Tags_q"], str)
else "",
"score": row["Score_q"],
"views": row["ViewCount_q"],
}
questions = all[all["AcceptedAnswerId"] != 0]
merged = pd.merge(
questions,
all,
how="left",
left_on="AcceptedAnswerId",
right_on="Id",
suffixes=("_q", "_a"),
)
merged["INSTRUCTION"] = (
merged["Title_q"] + "\n" + merged["Body_q"].apply(to_markdown)
)
merged["RESPONSE"] = merged["Body_a"].apply(to_markdown)
merged["SOURCE"] = merged["DataSource_q"]
merged["METADATA"] = merged.apply(create_metadata, axis=1)
return merged[["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]]
def save_parquet(df, dataset):
"""
Save Dataframe to Parquet. See here for specs:
https://projects.laion.ai/Open-Assistant/docs/data/datasets#creating-a-dataset-on-hugging-face
"""
parquet_file = f"{dataset}.parquet"
df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
print("Converted data into parquet format: " + parquet_file)
def upload_hf(dataset):
"""
Upload to Hugging Face
"""
parquet_file = f"{dataset}.parquet"
dataset = load_dataset("parquet", data_files=parquet_file, name=dataset)
dataset.push_to_hub(HF_DATASET, max_shard_size="500MB")
print("Uploaded to Hugging Face: " + HF_DATASET)
def xml_to_df(path: str, source: str):
"""
Collect and Manually import XML into Dataframe
pd.read_xml() errors when XML trees are too large, this is just a hack to
download a XML file and parse into a Dataframe. **Not Tested on huge XML files**
Parameters:
response (Requests.Response): Requests response object with the XML data
Returns:
df (DataFrame): A Dataframe from the XML file
"""
with open(path, "rb") as f:
soup = bs(f, "xml")
posts = soup.find_all("row")
all_posts = [post.attrs for post in posts]
df = pd.DataFrame(all_posts)
df.AnswerCount.fillna(0, inplace=True)
df.ViewCount.fillna(0, inplace=True)
df.AcceptedAnswerId.fillna(0, inplace=True)
df.ParentId.fillna(0, inplace=True)
df["DataSource"] = source
df = df.astype(xml_format_map)
return df
remove_markdown_links_pattern = r"\[([^\]]+)\]\(([^\)]+)\)"
remove_remaining_links = r"https?:\/\/[^\s]+"
# Replace HTML content to markdown but remove links
def to_markdown(text):
text = html2text(text, bodywidth=0).strip()
text = re.sub(remove_markdown_links_pattern, r"\1", text)
text = re.sub(remove_remaining_links, "", text)
if "http" in text:
raise "Found http in markdown: " + text
return text
if __name__ == "__main__":
main()
|