SajjadAyoubi commited on
Commit
a94ec60
·
1 Parent(s): df59d87

Create persian_qa.py

Browse files
Files changed (1) hide show
  1. persian_qa.py +95 -0
persian_qa.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ _CITATION = """\
4
+ @misc{PersianQA,
5
+ author = {Sajjad Ayoubi, Mohammad Yasin Davoodeh},
6
+ title = {PersianQA: a dataset for Persian Question Answering},
7
+ year = 2021,
8
+ publisher = {GitHub},
9
+ journal = {GitHub repository},
10
+ howpublished = {url{https://github.com/SajjjadAyobi/PersianQA}},
11
+ }
12
+ """
13
+ _DESCRIPTION = """\\\\
14
+ Persian Question Answering (PersianQA) Dataset is a reading comprehension dataset on Persian Wikipedia.
15
+ The crowd-sourced dataset consists of more than 9,000 entries. Each entry can be either an impossible to answer or a question with one or more answers spanning in the passage (the context) from which the questioner proposed the question. Much like the SQuAD2.0 dataset, the impossible or unanswerable questions can be utilized to create a system which "knows that it doesn't know the answer".
16
+ """
17
+ _URL = "https://raw.githubusercontent.com/sajjjadayobi/narm/master/utils/"
18
+ _URLS = {
19
+ "train": _URL + "pqa_train.json",
20
+ "test": _URL + "pqa_test.json",
21
+ }
22
+ class PersianQAConfig(datasets.BuilderConfig):
23
+ """BuilderConfig for PersianQA."""
24
+ def __init__(self, **kwargs):
25
+ """BuilderConfig for PersianQA.
26
+ Args:
27
+ **kwargs: keyword arguments forwarded to super.
28
+ """
29
+ super(PersianQAConfig, self).__init__(**kwargs)
30
+ class PersianQA(datasets.GeneratorBasedBuilder):
31
+ BUILDER_CONFIGS = [
32
+ PersianQAConfig(name="persian_qa", version=datasets.Version("1.0.0"), description="PersianQA plaint text version 1"),
33
+ ]
34
+ def _info(self):
35
+ return datasets.DatasetInfo(
36
+ # This is the description that will appear on the datasets page.
37
+ description=_DESCRIPTION,
38
+ # datasets.features.FeatureConnectors
39
+ features=datasets.Features(
40
+ {
41
+ "id": datasets.Value("int32"),
42
+ "title": datasets.Value("string"),
43
+ "context": datasets.Value("string"),
44
+ "question": datasets.Value("string"),
45
+ "answers": datasets.features.Sequence(
46
+ {
47
+ "text": datasets.Value("string"),
48
+ "answer_start": datasets.Value("int32"),
49
+ }
50
+ ),
51
+ }
52
+ ),
53
+ supervised_keys=None,
54
+ # Homepage of the dataset for documentation
55
+ homepage="https://github.com/sajjjadayobi/PersianQA/",
56
+ citation=_CITATION,
57
+ )
58
+ def _split_generators(self, dl_manager):
59
+ """Returns SplitGenerators."""
60
+ # TODO(persian_qa): Downloads the data and defines the splits
61
+ # dl_manager is a datasets.download.DownloadManager that can be used to
62
+ # download and extract URLs
63
+ urls_to_download = _URLS
64
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
65
+ return [
66
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
67
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}),
68
+ ]
69
+ def _generate_examples(self, filepath):
70
+ """Yields examples."""
71
+ # TODO(persian_qa): Yields (key, example) tuples from the dataset
72
+ with open(filepath, encoding="utf-8") as f:
73
+ print(filepath)
74
+ squad = json.load(f)
75
+ for example in squad["data"]:
76
+ title = example.get("title", "").strip()
77
+ for paragraph in example["paragraphs"]:
78
+ context = paragraph["context"].strip()
79
+ for qa in paragraph["qas"]:
80
+ question = qa["question"].strip()
81
+ id_ = qa["id"]
82
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
83
+ answers = [answer["text"].strip() for answer in qa["answers"]]
84
+ # Features currently used are "context", "question", and "answers".
85
+ # Others are extracted here for the ease of future expansions.
86
+ yield id_, {
87
+ "title": title,
88
+ "context": context,
89
+ "question": question,
90
+ "id": id_,
91
+ "answers": {
92
+ "answer_start": answer_starts,
93
+ "text": answers,
94
+ },
95
+ }