Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
Danish
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
Kenneth Enevoldsen commited on
Commit
1b6610f
·
unverified ·
1 Parent(s): 5747be6

format and ensure that tests pass

Browse files
data/ncc_books/create.py CHANGED
@@ -218,11 +218,11 @@ def filter_with_changelog(
218
  return dataset
219
 
220
 
221
- source_filter = lambda ds: doc_type_searchword in ds["doc_type"]
222
- length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10
223
- too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5
224
- alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7
225
- stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2)
226
 
227
  samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
228
 
 
218
  return dataset
219
 
220
 
221
+ source_filter = lambda ds: doc_type_searchword in ds["doc_type"] # noqa
222
+ length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10 # noqa
223
+ too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5 # noqa
224
+ alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7 # noqa
225
+ stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2) # noqa
226
 
227
  samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
228
 
data/ncc_books/ncc_books.md CHANGED
@@ -3,7 +3,7 @@ pretty_name: Norwegian Colossal Corpus (books)
3
  language:
4
  - da
5
  license: cc0-1.0
6
- license_name: CC0 1.0
7
  task_categories:
8
  - text-generation
9
  - fill-mask
 
3
  language:
4
  - da
5
  license: cc0-1.0
6
+ license_name: CC-0
7
  task_categories:
8
  - text-generation
9
  - fill-mask
data/ncc_maalfrid/create.py CHANGED
@@ -5,14 +5,13 @@
5
  # ]
6
  # ///
7
 
 
8
  import logging
9
  import re
10
- import inspect
11
-
12
- from pathlib import Path
13
- from datetime import datetime
14
  from collections import defaultdict
15
  from collections.abc import Callable
 
 
16
 
17
  import pandas as pd
18
  from datasets import Dataset, load_dataset
@@ -219,11 +218,11 @@ def filter_with_changelog(
219
  return dataset
220
 
221
 
222
- source_filter = lambda ds: doc_type_searchword in ds["doc_type"]
223
- length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10
224
- too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5
225
- alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7
226
- stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2)
227
 
228
  samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
229
 
 
5
  # ]
6
  # ///
7
 
8
+ import inspect
9
  import logging
10
  import re
 
 
 
 
11
  from collections import defaultdict
12
  from collections.abc import Callable
13
+ from datetime import datetime
14
+ from pathlib import Path
15
 
16
  import pandas as pd
17
  from datasets import Dataset, load_dataset
 
218
  return dataset
219
 
220
 
221
+ source_filter = lambda ds: doc_type_searchword in ds["doc_type"] # noqa
222
+ length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10 # noqa
223
+ too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5 # noqa
224
+ alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7 # noqa
225
+ stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2) # noqa
226
 
227
  samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
228
 
data/ncc_newspaper/create.py CHANGED
@@ -220,11 +220,11 @@ def filter_with_changelog(
220
 
221
 
222
  # filters
223
- source_filter = lambda ds: re.sub("ncc_", "", source) in ds["doc_type"]
224
- length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10
225
- too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5
226
- alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7
227
- stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2)
228
 
229
  samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
230
 
 
220
 
221
 
222
  # filters
223
+ source_filter = lambda ds: re.sub("ncc_", "", source) in ds["doc_type"] # noqa
224
+ length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10 # noqa
225
+ too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5 # noqa
226
+ alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7 # noqa
227
+ stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2) # noqa
228
 
229
  samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
230
 
data/ncc_newspaper/ncc_newspaper.md CHANGED
@@ -3,12 +3,14 @@ pretty_name: Norwegian Colossal Corpus (newspaper)
3
  language:
4
  - da
5
  license: cc0-1.0
6
- license_name: CC0 1.0
7
  task_categories:
8
  - text-generation
9
  - fill-mask
10
  task_ids:
11
  - language-modeling
 
 
12
  ---
13
 
14
  # Dataset Card for Norwegian Colossal Corpus (newspaper)
 
3
  language:
4
  - da
5
  license: cc0-1.0
6
+ license_name: CC-0
7
  task_categories:
8
  - text-generation
9
  - fill-mask
10
  task_ids:
11
  - language-modeling
12
+ domains:
13
+ - News
14
  ---
15
 
16
  # Dataset Card for Norwegian Colossal Corpus (newspaper)
data/ncc_parliament/create.py CHANGED
@@ -5,14 +5,13 @@
5
  # ]
6
  # ///
7
 
 
8
  import logging
9
  import re
10
- import inspect
11
-
12
- from pathlib import Path
13
- from datetime import datetime
14
  from collections import defaultdict
15
  from collections.abc import Callable
 
 
16
 
17
  import pandas as pd
18
  from datasets import Dataset, load_dataset
@@ -219,11 +218,11 @@ def filter_with_changelog(
219
  return dataset
220
 
221
 
222
- source_filter = lambda ds: doc_type_searchword in ds["doc_type"]
223
- length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10
224
- too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5
225
- alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7
226
- stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2)
227
 
228
  samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
229
 
 
5
  # ]
6
  # ///
7
 
8
+ import inspect
9
  import logging
10
  import re
 
 
 
 
11
  from collections import defaultdict
12
  from collections.abc import Callable
13
+ from datetime import datetime
14
+ from pathlib import Path
15
 
16
  import pandas as pd
17
  from datasets import Dataset, load_dataset
 
218
  return dataset
219
 
220
 
221
+ source_filter = lambda ds: doc_type_searchword in ds["doc_type"] # noqa
222
+ length_filter = lambda ds: len(word_tokenize(ds["text"])) >= 10 # noqa
223
+ too_long_filter = lambda ds: len(word_tokenize(ds["text"])) > 1e5 # noqa
224
+ alpha_filter = lambda ds: alpha_ratio(ds["text"]) >= 0.7 # noqa
225
+ stop_word_filter = lambda ds: count_min_target(word_tokenize(ds["text"]), spacy_sw, 2) # noqa
226
 
227
  samples_pr_source: dict = defaultdict(lambda: defaultdict(int))
228
 
src/tests/test_dataset_schema.py CHANGED
@@ -2,7 +2,6 @@ import pytest
2
  from datasets import load_dataset
3
 
4
  from dynaword.dataset_structure import SampleSchema
5
- from dynaword.datasheet import DEFAULT_SECTION_TAGS, DataSheet
6
  from dynaword.paths import repo_path
7
 
8
  from .conftest import DATASET_NAMES
@@ -19,7 +18,6 @@ def test_sample_schema(dataset_name: str):
19
  SampleSchema(**sample)
20
 
21
 
22
-
23
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
24
  def test_dataset_folder_structure(dataset_name: str):
25
  """tests that the dataset folder structure is as follows.
 
2
  from datasets import load_dataset
3
 
4
  from dynaword.dataset_structure import SampleSchema
 
5
  from dynaword.paths import repo_path
6
 
7
  from .conftest import DATASET_NAMES
 
18
  SampleSchema(**sample)
19
 
20
 
 
21
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
22
  def test_dataset_folder_structure(dataset_name: str):
23
  """tests that the dataset folder structure is as follows.
src/tests/test_datasheets.py CHANGED
@@ -11,7 +11,9 @@ def test_datasheet_load(dataset_name: str):
11
  """tests that the dataset frontmatter and markdown follows the correct format."""
12
 
13
  readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
14
- ds_sheet = DataSheet.load_from_path(readme) # will fail if format is not correct
 
 
15
 
16
 
17
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
@@ -20,7 +22,6 @@ def test_datasheet_content_tags(dataset_name: str):
20
  ds_sheet = DataSheet.load_from_path(readme)
21
 
22
  # ensure tags:
23
- body = ds_sheet.body
24
  tags = [v.value for v in DEFAULT_SECTION_TAGS]
25
  for tag in tags:
26
  ds_sheet.get_tag_idx(tag)
 
11
  """tests that the dataset frontmatter and markdown follows the correct format."""
12
 
13
  readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
14
+ ds_sheet = DataSheet.load_from_path( # noqa: F841
15
+ readme
16
+ ) # will fail if format is not correct
17
 
18
 
19
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
 
22
  ds_sheet = DataSheet.load_from_path(readme)
23
 
24
  # ensure tags:
 
25
  tags = [v.value for v in DEFAULT_SECTION_TAGS]
26
  for tag in tags:
27
  ds_sheet.get_tag_idx(tag)
src/tests/test_duplicates.py CHANGED
@@ -15,7 +15,9 @@ def test_no_within_data_duplicates(dataset_name: str):
15
  assert len(set(ds["text"])) == len(ds)
16
 
17
 
18
- @pytest.mark.skip("This tests takes too long to run") # there seems to be some duplicate across
 
 
19
  def test_no_data_duplicates():
20
  ds = load_dataset(str(repo_path.resolve()), split="train")
21
  ds = cast(Dataset, ds)
 
15
  assert len(set(ds["text"])) == len(ds)
16
 
17
 
18
+ @pytest.mark.skip(
19
+ "This tests takes too long to run"
20
+ ) # there seems to be some duplicate across
21
  def test_no_data_duplicates():
22
  ds = load_dataset(str(repo_path.resolve()), split="train")
23
  ds = cast(Dataset, ds)
test_results.log CHANGED
@@ -1,13 +1,16 @@
1
  ============================= test session starts ==============================
2
- platform linux -- Python 3.12.3, pytest-8.3.4, pluggy-1.5.0
3
- rootdir: /work/githubs/tmp/danish-dynaword
4
  configfile: pyproject.toml
5
- collected 124 items
6
 
7
- src/tests/test_dataset_schema.py ....................................... [ 31%]
8
- ................................................... [ 72%]
9
- src/tests/test_duplicates.py sssssssssssssssssssssssssssssss [ 97%]
 
 
 
10
  src/tests/test_load.py .. [ 99%]
11
  src/tests/test_unique_ids.py . [100%]
12
 
13
- ======================= 93 passed, 31 skipped in 20.62s ========================
 
1
  ============================= test session starts ==============================
2
+ platform darwin -- Python 3.12.0, pytest-8.3.4, pluggy-1.5.0
3
+ rootdir: /Users/au561649/Github/danish-dynaword
4
  configfile: pyproject.toml
5
+ collected 276 items
6
 
7
+ src/tests/test_dataset_schema.py ....................................... [ 14%]
8
+ ............................. [ 24%]
9
+ src/tests/test_datasheets.py ........................................... [ 40%]
10
+ ........................................................................ [ 66%]
11
+ ....................................................... [ 86%]
12
+ src/tests/test_duplicates.py ..................................s [ 98%]
13
  src/tests/test_load.py .. [ 99%]
14
  src/tests/test_unique_ids.py . [100%]
15
 
16
+ ======================= 275 passed, 1 skipped in 54.24s ========================