File size: 1,756 Bytes
b3ddbe2
e831b8a
1987fa7
4bb171f
8c38168
4bb171f
 
 
 
bb616b4
1987fa7
 
4bb171f
 
 
1987fa7
 
4bb171f
 
bb616b4
b3ddbe2
 
 
 
4bb171f
32843dd
b3ddbe2
bb616b4
4bb171f
 
b3ddbe2
 
4bb171f
 
 
bb616b4
4bb171f
 
bb616b4
4bb171f
bb616b4
4bb171f
 
 
0719213
4bb171f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from datasets import DatasetBuilder, DatasetInfo, SplitGenerator
from datasets.features import Features, Value
import json

class NbnnLanguageDetection(DatasetBuilder):

    VERSION = "0.1.0"

    def _info(self):
        print("DEBUG: Inside _info method")
        return DatasetInfo(
            features=Features({
                'text': Value('string'),
                'language': Value('string'),
            }),
            supervised_keys=('text', 'language')
        )

    def _split_generators(self, dl_manager):
        print("DEBUG: Inside _split_generators method")
        urls = {
            'train': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/train.jsonl',
            'dev': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/dev.jsonl',
            'test': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/test.jsonl',
        }

        downloaded_files = dl_manager.download(urls)
        print(f"DEBUG: Downloaded files: {downloaded_files}")

        return [
            SplitGenerator(name=split, gen_kwargs={'filepath': downloaded_files[split]})
            for split in urls.keys()
        ]

    def _generate_examples(self, filepath):
        print(f"DEBUG: Inside _generate_examples method with filepath: {filepath}")
        with open(filepath, 'r') as f:
            for id, line in enumerate(f):
                print(f"DEBUG: Processing line {id}")
                data = json.loads(line)
                print(f"DEBUG: Yielding id: {id}, text: {data['text']}, language: {data['language']}")
                yield id, {
                    'text': data['text'],
                    'language': data['language']
                }