File size: 4,086 Bytes
a754c44 899322c a754c44 9b6fa20 a754c44 9b6fa20 a754c44 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
from curses.ascii import isalpha
import os
import csv
import re
from typing import Sequence
import json
import ast
import datasets
_DESCRIPTION = """\
Example dataset toxic
"""
_LABEL = "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/test/label"
_LINES = "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/test/seq.in"
class Config(datasets.BuilderConfig):
"""BuilderConfig for GLUE."""
def __init__(self, data_url, label_url, **kwargs):
"""BuilderConfig
Args:
data_url: `string`, url to the dataset (word or raw level)
**kwargs: keyword arguments forwarded to super.
"""
super(Config, self).__init__(
version=datasets.Version(
"1.0.0",
),
**kwargs,
)
self.data_url = data_url
self.label_url = label_url
class Guess(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.1.0")
BUILDER_CONFIGS = [
Config(
name="all",
data_url=_LINES,
label_url=_LABEL,
description="data",
)
]
def _info(self):
# TODO(wikitext): Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"text": datasets.Value("string"),
"classes": datasets.Sequence(datasets.Value("string")),
"target": datasets.Value("int8")
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(wikitext): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
data_file = dl_manager.download(self.config.data_url)
label_file = dl_manager.download(self.config.label_url)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": data_file, "label_file": label_file},
),
]
def _generate_examples(self, data_file, label_file):
_CLASS = {
"abbreviation": 0,
"aircraft": 1,
"airfare": 2,
"airline": 3,
"airport": 4,
"capacity": 5,
"city": 6,
"distance": 7,
"flight": 8,
"flight_no": 19,
"flight_time": 10,
"ground_fare": 11,
"ground_service": 12,
"meal": 13,
"quantity": 14,
"restriction": 15
}
_TEXT = ["mã", "loại máy bay", "giá vé", "hãng hàng không",
"sân bay", "sức chứa máy bay", "địa điểm", "khoảng cách", "chuyến bay",
"số hiệu bay", "thời gian bay", "giá dịch vụ", "dịch vụ", "suất ăn", "số lượng", "hạn chế"]
with open(data_file, 'r') as f:
lines = f.read().splitlines()
with open(label_file, 'r') as f:
labels = f.read().splitlines()
data = []
for idx, (line, label) in enumerate(zip(lines, labels)):
if label not in _CLASS:
print(line, label)
continue
_classes = []
for _cl in _TEXT:
_classes.append(f'{_cl}: {line}')
yield idx, {
"text" : f'Cho xin thông tin ',
"classes" : _classes,
"target" : _CLASS[label]
}
|