Create new file
Browse files- en-fa-translation.py +109 -0
en-fa-translation.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
# TODO: Add description of the dataset here
|
10 |
+
# You can copy an official description
|
11 |
+
_DESCRIPTION = """\
|
12 |
+
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
|
13 |
+
"""
|
14 |
+
_URL = "https://huggingface.co/datasets/Kamrani/en-fa-translation/resolve/main/"
|
15 |
+
_URLs = {
|
16 |
+
"train": _URL + "train.json",
|
17 |
+
"dev": _URL + "dev.json",
|
18 |
+
"test": _URL + "test.json",
|
19 |
+
}
|
20 |
+
|
21 |
+
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
22 |
+
class PersianTranslateDB(datasets.GeneratorBasedBuilder):
|
23 |
+
"""TODO: Short description of my dataset."""
|
24 |
+
|
25 |
+
VERSION = datasets.Version("1.0.0")
|
26 |
+
|
27 |
+
# This is an example of a dataset with multiple configurations.
|
28 |
+
# If you don't want/need to define several sub-sets in your dataset,
|
29 |
+
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
30 |
+
|
31 |
+
# If you need to make complex sub-parts in the datasets with configurable options
|
32 |
+
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
33 |
+
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
34 |
+
|
35 |
+
# You will be able to load one or the other configurations in the following list with
|
36 |
+
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
37 |
+
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
38 |
+
BUILDER_CONFIGS = [
|
39 |
+
datasets.BuilderConfig(name="en-fa", version=VERSION, description="en-fa translation database"),
|
40 |
+
#datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
|
41 |
+
]
|
42 |
+
|
43 |
+
DEFAULT_CONFIG_NAME = "en-fa" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
44 |
+
|
45 |
+
def _info(self):
|
46 |
+
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
47 |
+
features = datasets.Features(
|
48 |
+
{
|
49 |
+
"source": datasets.Value("string"),
|
50 |
+
"target": datasets.Value("string"),
|
51 |
+
# These are the features of your dataset like images, labels ...
|
52 |
+
}
|
53 |
+
)
|
54 |
+
return datasets.DatasetInfo(
|
55 |
+
# This is the description that will appear on the datasets page.
|
56 |
+
description=_DESCRIPTION,
|
57 |
+
# This defines the different columns of the dataset and their types
|
58 |
+
features=features, # Here we define them above because they are different between the two configurations
|
59 |
+
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
60 |
+
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
61 |
+
# supervised_keys=("sentence", "label"),
|
62 |
+
)
|
63 |
+
|
64 |
+
def _split_generators(self, dl_manager):
|
65 |
+
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
66 |
+
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
67 |
+
|
68 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
69 |
+
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
70 |
+
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
71 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
72 |
+
return [
|
73 |
+
datasets.SplitGenerator(
|
74 |
+
name=datasets.Split.TRAIN,
|
75 |
+
# These kwargs will be passed to _generate_examples
|
76 |
+
gen_kwargs={
|
77 |
+
"filepath": os.path.join(data_dir, "train.json"),
|
78 |
+
"split": "train",
|
79 |
+
},
|
80 |
+
),
|
81 |
+
datasets.SplitGenerator(
|
82 |
+
name=datasets.Split.VALIDATION,
|
83 |
+
# These kwargs will be passed to _generate_examples
|
84 |
+
gen_kwargs={
|
85 |
+
"filepath": os.path.join(data_dir, "dev.json"),
|
86 |
+
"split": "dev",
|
87 |
+
},
|
88 |
+
),
|
89 |
+
datasets.SplitGenerator(
|
90 |
+
name=datasets.Split.TEST,
|
91 |
+
# These kwargs will be passed to _generate_examples
|
92 |
+
gen_kwargs={
|
93 |
+
"filepath": os.path.join(data_dir, "test.json"),
|
94 |
+
"split": "test"
|
95 |
+
},
|
96 |
+
),
|
97 |
+
]
|
98 |
+
|
99 |
+
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
100 |
+
def _generate_examples(self, filepath, split):
|
101 |
+
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
102 |
+
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
103 |
+
with open(filepath, encoding="utf-8") as f:
|
104 |
+
for key, row in enumerate(f):
|
105 |
+
data = json.loads(row)
|
106 |
+
yield key, {
|
107 |
+
"source": data["source"],
|
108 |
+
"target": data["target"]
|
109 |
+
}
|