sobir-hf commited on
Commit
cda116a
·
1 Parent(s): 7b4dd76

Update tajik-text-segmentation.py

Browse files
Files changed (1) hide show
  1. tajik-text-segmentation.py +60 -51
tajik-text-segmentation.py CHANGED
@@ -1,73 +1,87 @@
1
- """Custom Annotations Dataset."""
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- import os
4
  from .annotations_parser import load_yedda_annotations
5
- import datasets
6
-
7
- logger = datasets.logging.get_logger(__name__)
8
 
 
9
 
10
 
11
- _CITATION = """
12
- @misc{tajik-text-segmentation-2023,
13
- title={Tajik sentence-wise text segmentation dataset},
14
- author={Sobir Bobiev},
15
- year={2023},
16
- howpublished={\\url{https://huggingface.co/datasets/sobir-hf/tajik-text-segmentation}},
17
  }
18
  """
19
 
20
- _DESCRIPTION = """
21
- Tajik sentence-wise text segmentation dataset consisting of annotated text files.
22
  """
23
 
24
- class CustomAnnotationsConfig(datasets.BuilderConfig):
25
- """BuilderConfig for Custom Annotations."""
26
 
27
- def __init__(self, **kwargs):
28
- """BuilderConfig for Custom Annotations.
29
- Args:
30
- **kwargs: keyword arguments forwarded to super.
31
- """
32
- super(CustomAnnotationsConfig, self).__init__(**kwargs)
33
 
34
 
35
- class CustomAnnotations(datasets.GeneratorBasedBuilder):
36
- """Custom Annotations: Dataset with annotated text files."""
37
 
38
- BUILDER_CONFIGS = [
39
- CustomAnnotationsConfig(
40
- name="plain_text",
41
- version=datasets.Version("1.0.0", ""),
42
- description="Plain text",
43
- ),
44
- ]
45
 
46
  def _info(self):
 
 
 
 
 
 
 
 
 
 
47
  return datasets.DatasetInfo(
 
48
  description=_DESCRIPTION,
49
- features=datasets.Features(
50
- {
51
- "file": datasets.Value("string"),
52
- "text": datasets.Value("string"),
53
- "annotated_text": datasets.Value("string"),
54
- "number_of_labels": datasets.Value("int32"),
55
- }
56
- ),
57
- supervised_keys=None,
58
- homepage="your dataset homepage",
59
  citation=_CITATION,
60
  )
61
 
62
  def _split_generators(self, dl_manager):
63
- directory_path = os.path.abspath('annotations')
64
  return [
65
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"directory_path": directory_path}),
 
 
 
 
 
 
66
  ]
67
 
68
  def _generate_examples(self, directory_path):
69
- """This function returns the examples in the raw (text) form."""
70
- logger.info("generating examples from = %s", directory_path)
71
 
72
  annotations = load_yedda_annotations(directory_path)
73
 
@@ -81,12 +95,7 @@ class CustomAnnotations(datasets.GeneratorBasedBuilder):
81
  "file": file,
82
  "text": text,
83
  "annotated_text": annotated_text,
 
 
84
  "number_of_labels": number_of_labels,
85
  }
86
-
87
- if __name__ == '__main__':
88
- # You can test the data generation by running this script.
89
- # It will create a dataset in a subdirectory `./datasets/`.
90
- from datasets import load_dataset
91
- dataset = load_dataset('./tajik-text-segmentation.py')
92
- print(dataset)
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
 
 
15
  from .annotations_parser import load_yedda_annotations
 
 
 
16
 
17
+ import datasets
18
 
19
 
20
+ # Find for instance the citation on arxiv or on the dataset repo/website
21
+ _CITATION = """\
22
+ @misc{tajik-text-segmentation,
23
+ title = {Tajik text segmentation dataset},
24
+ author={Sobir Bobiev},
25
+ year={2023}
26
  }
27
  """
28
 
29
+ _DESCRIPTION = """\
30
+ This dataset contains tajik texts with sentences annotated. Can be useful for sentence boundary detection, segmenting text and many NLP tasks.
31
  """
32
 
33
+ # TODO: Add a link to an official homepage for the dataset here
34
+ _HOMEPAGE = ""
35
 
36
+ # TODO: Add the licence for the dataset here if you can find it
37
+ _LICENSE = ""
 
 
 
 
38
 
39
 
40
+ class TajikTextSegmentation(datasets.GeneratorBasedBuilder):
41
+ """A dataset of sentence-wise text segmentation in Tajik language."""
42
 
43
+ VERSION = datasets.Version("1.1.0")
 
 
 
 
 
 
44
 
45
  def _info(self):
46
+ features = datasets.Features(
47
+ {
48
+ "file": datasets.Value("string"),
49
+ "text": datasets.Value("string"),
50
+ "annotated_text": datasets.Value("string"),
51
+ "number_of_labels": datasets.Value("int32"),
52
+ "positions": [[datasets.Value("int32")]],
53
+ "labels": [datasets.Value("string")]
54
+ }
55
+ )
56
  return datasets.DatasetInfo(
57
+ # This is the description that will appear on the datasets page.
58
  description=_DESCRIPTION,
59
+ # This defines the different columns of the dataset and their types
60
+ features=features, # Here we define them above because they are different between the two configurations
61
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
62
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
63
+ # supervised_keys=("sentence", "label"),
64
+ # Homepage of the dataset for documentation
65
+ homepage=_HOMEPAGE,
66
+ # License for the dataset if available
67
+ license=_LICENSE,
68
+ # Citation for the dataset
69
  citation=_CITATION,
70
  )
71
 
72
  def _split_generators(self, dl_manager):
 
73
  return [
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.TRAIN,
76
+ # These kwargs will be passed to _generate_examples
77
+ gen_kwargs={
78
+ "directory_path": './annotations',
79
+ },
80
+ ),
81
  ]
82
 
83
  def _generate_examples(self, directory_path):
84
+ """This function returns the examples."""
 
85
 
86
  annotations = load_yedda_annotations(directory_path)
87
 
 
95
  "file": file,
96
  "text": text,
97
  "annotated_text": annotated_text,
98
+ "positions": file_annotation['positions'],
99
+ "labels": file_annotation['labels'],
100
  "number_of_labels": number_of_labels,
101
  }