File size: 2,958 Bytes
a6326c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
from utils import write_jsonl_file, read_csv_file, parse
import os
import pandas as pd
# Refer https://github.com/alexa/dialoglue/blob/master/data_utils/intent_scripts/get_hwu_data.py
LIST_OF_FILES = (
"alarm_query.csv\nalarm_remove.csv\nalarm_set.csv\naudio_volum"
"e_down.csv\naudio_volume_mute.csv\naudio_volume_up.csv\ncalend"
"ar_query.csv\t\ncalendar_remove.csv\t\ncalendar_set.csv\t\ncoo"
"king_recipe.csv\t\ndatetime_convert.csv\t\ndatetime_query.csv"
"\t\nemail_addcontact.csv\t\nemail_query.csv\t\nemail_querycon"
"tact.csv\t\nemail_sendemail.csv\t\ngeneral_affirm.csv\t\ngener"
"al_commandstop.csv\t\ngeneral_confirm.csv\t\ngeneral_dontcare."
"csv\t\ngeneral_explain.csv\t\ngeneral_joke.csv\t\ngeneral_neg"
"ate.csv\t\ngeneral_praise.csv\t\ngeneral_quirky.csv\t\ngenera"
"l_repeat.csv\t\niot_cleaning.csv\t\niot_coffee.csv\t\niot_hue"
"_lightchange.csv\t\niot_hue_lightdim.csv\t\niot_hue_lightoff."
"csv\t\niot_hue_lighton.csv\t\niot_hue_lightup.csv\t\niot_wemo_"
"off.csv\t\niot_wemo_on.csv\t\nlists_createoradd.csv\t\nlists_"
"query.csv\t\nlists_remove.csv\t\nmusic_likeness.csv\t\nmusic_q"
"uery.csv\t\nmusic_settings.csv\t\nnews_query.csv\t\nplay_audio"
"book.csv\t\nplay_game.csv\t\nplay_music.csv\t\nplay_podcasts."
"csv\t\nplay_radio.csv\t\nqa_currency.csv\t\nqa_definition.csv"
"\t\nqa_factoid.csv\t\nqa_maths.csv\t\nqa_stock.csv\t\nrecomme"
"ndation_events.csv\t\nrecommendation_locations.csv\t\nrecomme"
"ndation_movies.csv\t\nsocial_post.csv\t\nsocial_query.csv\t\n"
"takeaway_order.csv\t\ntakeaway_query.csv\t\ntransport_query.c"
"sv\t\ntransport_taxi.csv\t\ntransport_ticket.csv\t\ntransport"
"_traffic.csv\t\nweather_query.csv\t".split()
)
def reformat(args, split):
if split == "train":
input_dir = os.path.join(args.input_dir, "trainset")
else:
input_dir = os.path.join(args.input_dir, "testset", "csv")
dialogues = []
for filename in LIST_OF_FILES:
data = pd.read_csv(os.path.join(input_dir, filename), sep=";")
for i in range(len(data)):
utterance = data.iloc[i]["answer_from_anno"]
domain = data.iloc[i]["scenario"]
intent = data.iloc[i]["intent"]
dialogues.append(
{
"turn": "single",
"locale": "en",
"domain": domain,
"dialog": [
{
"roles": ["USER"],
"utterance": utterance,
"active_intents": [f"{domain} {intent}"],
}
],
}
)
write_jsonl_file(dialogues, os.path.join(args.output_dir, f"{split}.jsonl"))
def preprocess(args):
reformat(args, "train")
reformat(args, "test")
if __name__ == "__main__":
args = parse()
preprocess(args)
|