|
|
|
|
|
|
|
|
|
from bs4 import BeautifulSoup as bs |
|
import pandas as pd |
|
import os |
|
import glob |
|
import sys |
|
import re |
|
from html2text import html2text |
|
from datasets import load_dataset |
|
|
|
CACHE_DIR = "xml/" |
|
SOURCE = "stackexchange-{0}" |
|
MAX_ANSWERS = 10 |
|
QUESTION_SCORE_TRESHOLD = 0 |
|
ANSWER_SCORE_TRESHOLD = 0 |
|
HF_DATASET = "donfu/oa-stackexchange" |
|
|
|
xml_format_map = { |
|
"Id": int, |
|
"PostTypeId": int, |
|
"CreationDate": str, |
|
"Score": int, |
|
"ViewCount": int, |
|
"Body": str, |
|
"AnswerCount": int, |
|
"CommentCount": int, |
|
"ContentLicense": str, |
|
"AcceptedAnswerId": int, |
|
"ParentId": int, |
|
} |
|
|
|
|
|
def main(): |
|
datasets = sys.argv[1:] if len(sys.argv) > 1 else list_cached_datasets() |
|
for dataset in datasets: |
|
process_dataset(dataset) |
|
|
|
|
|
def list_cached_datasets(): |
|
xml_files = glob.glob(f"{CACHE_DIR}/*.xml") |
|
datasets = [os.path.splitext(os.path.basename(file))[0] for file in xml_files] |
|
datasets.sort() |
|
return datasets |
|
|
|
|
|
def process_dataset(dataset): |
|
xml_file = f"{CACHE_DIR}/{dataset}.xml" |
|
source = SOURCE.format(dataset) |
|
if os.path.exists(xml_file): |
|
df = xml_to_df(xml_file, source) |
|
oa = convert_to_oa(df) |
|
save_parquet(oa, dataset) |
|
|
|
else: |
|
print(f"XML file {xml_file} not found, please download first. Skipping...") |
|
|
|
|
|
def convert_to_oa(all): |
|
""" |
|
Convert dataframe to Open Assistant format with INSTRUCTION, RESPONSE, SOURCE, METADATA columns |
|
|
|
Only include questions with an AcceptedAnswerId |
|
""" |
|
create_metadata = lambda row: { |
|
"tags": row["Tags_q"] |
|
.replace("-", " ") |
|
.replace("><", ", ") |
|
.replace("<", "") |
|
.replace(">", "") |
|
if isinstance(row["Tags_q"], str) |
|
else "", |
|
"score": row["Score_q"], |
|
"views": row["ViewCount_q"], |
|
} |
|
questions = all[all["AcceptedAnswerId"] != 0] |
|
merged = pd.merge( |
|
questions, |
|
all, |
|
how="left", |
|
left_on="AcceptedAnswerId", |
|
right_on="Id", |
|
suffixes=("_q", "_a"), |
|
) |
|
merged["INSTRUCTION"] = ( |
|
merged["Title_q"] + "\n" + merged["Body_q"].apply(to_markdown) |
|
) |
|
merged["RESPONSE"] = merged["Body_a"].apply(to_markdown) |
|
merged["SOURCE"] = merged["DataSource_q"] |
|
merged["METADATA"] = merged.apply(create_metadata, axis=1) |
|
|
|
return merged[["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]] |
|
|
|
|
|
def save_parquet(df, dataset): |
|
""" |
|
Save Dataframe to Parquet. See here for specs: |
|
https://projects.laion.ai/Open-Assistant/docs/data/datasets#creating-a-dataset-on-hugging-face |
|
""" |
|
parquet_file = f"{dataset}.parquet" |
|
df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False) |
|
print("Converted data into parquet format: " + parquet_file) |
|
|
|
|
|
def upload_hf(dataset): |
|
""" |
|
Upload to Hugging Face |
|
""" |
|
parquet_file = f"{dataset}.parquet" |
|
dataset = load_dataset("parquet", data_files=parquet_file, name=dataset) |
|
dataset.push_to_hub(HF_DATASET, max_shard_size="500MB") |
|
print("Uploaded to Hugging Face: " + HF_DATASET) |
|
|
|
|
|
def xml_to_df(path: str, source: str): |
|
""" |
|
Collect and Manually import XML into Dataframe |
|
|
|
pd.read_xml() errors when XML trees are too large, this is just a hack to |
|
download a XML file and parse into a Dataframe. **Not Tested on huge XML files** |
|
|
|
Parameters: |
|
response (Requests.Response): Requests response object with the XML data |
|
|
|
Returns: |
|
df (DataFrame): A Dataframe from the XML file |
|
""" |
|
with open(path, "rb") as f: |
|
soup = bs(f, "xml") |
|
posts = soup.find_all("row") |
|
|
|
all_posts = [post.attrs for post in posts] |
|
|
|
df = pd.DataFrame(all_posts) |
|
df.AnswerCount.fillna(0, inplace=True) |
|
df.ViewCount.fillna(0, inplace=True) |
|
df.AcceptedAnswerId.fillna(0, inplace=True) |
|
df.ParentId.fillna(0, inplace=True) |
|
df["DataSource"] = source |
|
df = df.astype(xml_format_map) |
|
return df |
|
|
|
|
|
remove_markdown_links_pattern = r"\[([^\]]+)\]\(([^\)]+)\)" |
|
remove_remaining_links = r"https?:\/\/[^\s]+" |
|
|
|
|
|
|
|
def to_markdown(text): |
|
text = html2text(text, bodywidth=0).strip() |
|
text = re.sub(remove_markdown_links_pattern, r"\1", text) |
|
text = re.sub(remove_remaining_links, "", text) |
|
|
|
if "http" in text: |
|
raise "Found http in markdown: " + text |
|
return text |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|