oa-stackexchange / process.py
donfu
Remove links from text
370ce50
raw
history blame
8.77 kB
#!/usr/bin/env python3
# Simple script to convert StackExchange XML to Open Assistant format
# Original code by https://github.com/b-mc2
from bs4 import BeautifulSoup as bs
import pandas as pd
import os
import glob
import sys
import re
from html2text import html2text
from datasets import load_dataset
CACHE_DIR = "xml/"
SOURCE = "stackexchange-{0}"
MAX_ANSWERS = 10
QUESTION_SCORE_TRESHOLD = 0
ANSWER_SCORE_TRESHOLD = 0
HF_DATASET = "donfu/oa-stackexchange"
xml_format_map = {
"Id": int,
"PostTypeId": int,
"CreationDate": str,
"Score": int,
"ViewCount": int,
"Body": str,
"AnswerCount": int,
"CommentCount": int,
"ContentLicense": str,
"AcceptedAnswerId": int,
"ParentId": int,
}
def main():
datasets = sys.argv[1:] if len(sys.argv) > 1 else list_cached_datasets()
for dataset in datasets:
process_dataset(dataset)
def list_cached_datasets():
xml_files = glob.glob(f"{CACHE_DIR}/*.xml")
datasets = [os.path.splitext(os.path.basename(file))[0] for file in xml_files]
datasets.sort()
return datasets
def process_dataset(dataset):
xml_file = f"{CACHE_DIR}/{dataset}.xml"
source = SOURCE.format(dataset)
if os.path.exists(xml_file):
df = xml_to_df(xml_file, source)
# df = filter_only_questions_with_accepted_answers(df)
# df = filter_scores_above(df, QUESTION_SCORE_TRESHOLD, ANSWER_SCORE_TRESHOLD)
# df = clean_tags(df)
# df = convert_html_to_markdown(df)
# df = group_qa(df)
oa = convert_to_oa(df)
save_parquet(oa, dataset)
# upload_hf(dataset)
else:
print(f"XML file {xml_file} not found, please download first. Skipping...")
def convert_to_oa(all):
"""
Convert dataframe to Open Assistant format with INSTRUCTION, RESPONSE, SOURCE, METADATA columns
Only include questions with an AcceptedAnswerId
"""
create_metadata = lambda row: {
"tags": row["Tags_q"]
.replace("-", " ")
.replace("><", ", ")
.replace("<", "")
.replace(">", "")
if isinstance(row["Tags_q"], str)
else "",
"score": row["Score_q"],
"views": row["ViewCount_q"],
}
questions = all[all["AcceptedAnswerId"] != 0]
merged = pd.merge(
questions,
all,
how="left",
left_on="AcceptedAnswerId",
right_on="Id",
suffixes=("_q", "_a"),
)
merged["INSTRUCTION"] = (
merged["Title_q"] + "\n" + merged["Body_q"].apply(to_markdown)
)
merged["RESPONSE"] = merged["Body_a"].apply(to_markdown)
merged["SOURCE"] = merged["DataSource_q"]
merged["METADATA"] = merged.apply(create_metadata, axis=1)
return merged[["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]]
def save_parquet(df, dataset):
"""
Save Dataframe to Parquet. See here for specs:
https://projects.laion.ai/Open-Assistant/docs/data/datasets#creating-a-dataset-on-hugging-face
"""
parquet_file = f"{dataset}.parquet"
df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
print("Converted data into parquet format: " + parquet_file)
def upload_hf(dataset):
"""
Upload to Hugging Face
"""
parquet_file = f"{dataset}.parquet"
dataset = load_dataset("parquet", data_files=parquet_file, name=dataset)
dataset.push_to_hub(HF_DATASET, max_shard_size="500MB")
print("Uploaded to Hugging Face: " + HF_DATASET)
def xml_to_df(path: str, source: str):
"""
Collect and Manually import XML into Dataframe
pd.read_xml() errors when XML trees are too large, this is just a hack to
download a XML file and parse into a Dataframe. **Not Tested on huge XML files**
Parameters:
response (Requests.Response): Requests response object with the XML data
Returns:
df (DataFrame): A Dataframe from the XML file
"""
with open(path, "rb") as f:
soup = bs(f, "xml")
posts = soup.find_all("row")
all_posts = [post.attrs for post in posts]
df = pd.DataFrame(all_posts)
df.AnswerCount.fillna(0, inplace=True)
df.ViewCount.fillna(0, inplace=True)
df.AcceptedAnswerId.fillna(0, inplace=True)
df.ParentId.fillna(0, inplace=True)
df["DataSource"] = source
df = df.astype(xml_format_map)
return df
def filter_only_questions_with_accepted_answers(df):
"""
Filter only to Questions with Accepted Answers
Filter dataframe by questions that have accepted answers, should also include
all rows of answers for those questions, even if not accepted.
Parameters:
df (DataFrame): containing a "AcceptedAnswerId", "Id", and "ParentId" columns
Returns:
df (DataFrame): current dataframe with filtered results
"""
accepted_ids = df[df["AcceptedAnswerId"] != 0]["Id"].tolist()
return df[(df["AcceptedAnswerId"] != 0) | (df["ParentId"].isin(accepted_ids))]
def filter_scores_above(
df, question_score_threshold: int = 20, answer_score_threshold: int = 20
):
"""
Filter Dataframe by minimum scores
Filter Question and Answer columns by score thresholds to trim lower scoring results
Parameters:
df (DataFrame): containing a "Score" column
Returns:
df (DataFrame): current dataframe with filtered results
"""
return df[
((df["Score"] >= question_score_threshold) & (df.PostTypeId == 1))
| ((df["Score"] >= answer_score_threshold) & (df.PostTypeId == 2))
]
remove_markdown_links_pattern = r"\[([^\]]+)\]\(([^\)]+)\)"
remove_remaining_links = r"https?:\/\/[^\s]+"
# Replace HTML content to markdown but remove links
def to_markdown(text):
text = html2text(text, bodywidth=0).strip()
text = re.sub(remove_markdown_links_pattern, r"\1", text)
text = re.sub(remove_remaining_links, "", text)
if "http" in text:
raise "Found http in markdown: " + text
return text
def convert_html_to_markdown(df, column: str = "Body"):
"""
Convert HTML tags to markdown
Feeds HTML text body into markdown. Remove final newline from <p> tags
Parameters:
df (DataFrame): containing a "Body" column with HTML
Returns:
df (DataFrame): current dataframe with parsed column
"""
df.dropna(subset=[column], inplace=True)
df[f"{column}Clean"] = df[column].apply(to_markdown)
return df
def clean_tags(df):
"""
Convert Tags into Comma separated
Converts Tag slugs into commas separated tags
Parameters:
df (DataFrame): containing a "Tags" column with slugs
Returns:
df (DataFrame): current dataframe with parsed column
"""
df["TagsClean"] = (
df["Tags"]
.str.replace("-", " ")
.str.replace("><", ", ")
.str.replace("<", "")
.str.replace(">", "")
)
return df
def group_qa(df):
"""
Group Questions and Answers
"""
questions = df[df.PostTypeId == 1]
answers = df[df.PostTypeId == 2]
df = pd.merge(
questions,
answers[
[
"Id",
"CreationDate",
"Score",
"ViewCount",
"CommentCount",
"ContentLicense",
"TagsClean",
"BodyClean",
"ParentId",
]
],
left_on="Id",
right_on="ParentId",
suffixes=("_q", "_a"),
how="left",
)
df["AcceptedAnswerFlag"] = df.apply(
lambda row: row["Id_a"] == row["AcceptedAnswerId"], axis=1
)
df = df.rename(
columns={
"BodyClean_q": "Question",
"Score_q": "QuestionScore",
"TagsClean_q": "QuestionTags",
"BodyClean_a": "Answer",
"Score_a": "AnswerScore",
"ContentLicense_q": "QuestionContentLicense",
"ContentLicense_a": "AnswerContentLicense",
"CreationDate_q": "CreationDate",
}
)
df = (
df.sort_values(
by=["AcceptedAnswerFlag", "AnswerScore"], ascending=[False, False]
)
.groupby("Question")
.head(MAX_ANSWERS)
.reset_index(drop=True)
)
df = (
df.groupby(
[
"Title",
"Question",
"QuestionScore",
"QuestionTags",
"QuestionContentLicense",
"DataSource",
"CreationDate",
]
)
.apply(
lambda x: x[["Answer", "AnswerScore", "AcceptedAnswerFlag"]].to_dict(
"records"
)
)
.reset_index()
.rename(columns={0: "Answers"})
)
return df
if __name__ == "__main__":
main()