Spaces:
Runtime error
Runtime error
File size: 8,040 Bytes
8044721 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 |
import random
from contextlib import contextmanager
from copy import deepcopy
import re
from functools import partial
def _split_tokenizer(x):
return x.split()
def _spacy_tokenize(x, spacy):
return [tok.text for tok in spacy.tokenizer(x)]
_patterns = [r'\'',
r'\"',
r'\.',
r'<br \/>',
r',',
r'\(',
r'\)',
r'\!',
r'\?',
r'\;',
r'\:',
r'\s+']
_replacements = [' \' ',
'',
' . ',
' ',
' , ',
' ( ',
' ) ',
' ! ',
' ? ',
' ',
' ',
' ']
_patterns_dict = list((re.compile(p), r) for p, r in zip(_patterns, _replacements))
def _basic_english_normalize(line):
r"""
Basic normalization for a line of text.
Normalization includes
- lowercasing
- complete some basic text normalization for English words as follows:
add spaces before and after '\''
remove '\"',
add spaces before and after '.'
replace '<br \/>'with single space
add spaces before and after ','
add spaces before and after '('
add spaces before and after ')'
add spaces before and after '!'
add spaces before and after '?'
replace ';' with single space
replace ':' with single space
replace multiple spaces with single space
Returns a list of tokens after splitting on whitespace.
"""
line = line.lower()
for pattern_re, replaced_str in _patterns_dict:
line = pattern_re.sub(replaced_str, line)
return line.split()
def get_tokenizer(tokenizer, language='en'):
r"""
Generate tokenizer function for a string sentence.
Arguments:
tokenizer: the name of tokenizer function. If None, it returns split()
function, which splits the string sentence by space.
If basic_english, it returns _basic_english_normalize() function,
which normalize the string first and split by space. If a callable
function, it will return the function. If a tokenizer library
(e.g. spacy, moses, toktok, revtok, subword), it returns the
corresponding library.
language: Default en
Examples:
>>> import torchtext
>>> from torchtext.data import get_tokenizer
>>> tokenizer = get_tokenizer("basic_english")
>>> tokens = tokenizer("You can now install TorchText using pip!")
>>> tokens
>>> ['you', 'can', 'now', 'install', 'torchtext', 'using', 'pip', '!']
"""
# default tokenizer is string.split(), added as a module function for serialization
if tokenizer is None:
return _split_tokenizer
if tokenizer == "basic_english":
if language != 'en':
raise ValueError("Basic normalization is only available for Enlish(en)")
return _basic_english_normalize
# simply return if a function is passed
if callable(tokenizer):
return tokenizer
if tokenizer == "spacy":
try:
import spacy
spacy = spacy.load(language)
return partial(_spacy_tokenize, spacy=spacy)
except ImportError:
print("Please install SpaCy. "
"See the docs at https://spacy.io for more information.")
raise
except AttributeError:
print("Please install SpaCy and the SpaCy {} tokenizer. "
"See the docs at https://spacy.io for more "
"information.".format(language))
raise
elif tokenizer == "moses":
try:
from sacremoses import MosesTokenizer
moses_tokenizer = MosesTokenizer()
return moses_tokenizer.tokenize
except ImportError:
print("Please install SacreMoses. "
"See the docs at https://github.com/alvations/sacremoses "
"for more information.")
raise
elif tokenizer == "toktok":
try:
from nltk.tokenize.toktok import ToktokTokenizer
toktok = ToktokTokenizer()
return toktok.tokenize
except ImportError:
print("Please install NLTK. "
"See the docs at https://nltk.org for more information.")
raise
elif tokenizer == 'revtok':
try:
import revtok
return revtok.tokenize
except ImportError:
print("Please install revtok.")
raise
elif tokenizer == 'subword':
try:
import revtok
return partial(revtok.tokenize, decap=True)
except ImportError:
print("Please install revtok.")
raise
raise ValueError("Requested tokenizer {}, valid choices are a "
"callable that takes a single string as input, "
"\"revtok\" for the revtok reversible tokenizer, "
"\"subword\" for the revtok caps-aware tokenizer, "
"\"spacy\" for the SpaCy English tokenizer, or "
"\"moses\" for the NLTK port of the Moses tokenization "
"script.".format(tokenizer))
def is_tokenizer_serializable(tokenizer, language):
"""Extend with other tokenizers which are found to not be serializable
"""
if tokenizer == 'spacy':
return False
return True
def interleave_keys(a, b):
"""Interleave bits from two sort keys to form a joint sort key.
Examples that are similar in both of the provided keys will have similar
values for the key defined by this function. Useful for tasks with two
text fields like machine translation or natural language inference.
"""
def interleave(args):
return ''.join([x for t in zip(*args) for x in t])
return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)
def get_torch_version():
import torch
v = torch.__version__
version_substrings = v.split('.')
major, minor = version_substrings[0], version_substrings[1]
return int(major), int(minor)
def dtype_to_attr(dtype):
# convert torch.dtype to dtype string id
# e.g. torch.int32 -> "int32"
# used for serialization
_, dtype = str(dtype).split('.')
return dtype
# TODO: Write more tests!
def ngrams_iterator(token_list, ngrams):
"""Return an iterator that yields the given tokens and their ngrams.
Arguments:
token_list: A list of tokens
ngrams: the number of ngrams.
Examples:
>>> token_list = ['here', 'we', 'are']
>>> list(ngrams_iterator(token_list, 2))
>>> ['here', 'here we', 'we', 'we are', 'are']
"""
def _get_ngrams(n):
return zip(*[token_list[i:] for i in range(n)])
for x in token_list:
yield x
for n in range(2, ngrams + 1):
for x in _get_ngrams(n):
yield ' '.join(x)
class RandomShuffler(object):
"""Use random functions while keeping track of the random state to make it
reproducible and deterministic."""
def __init__(self, random_state=None):
self._random_state = random_state
if self._random_state is None:
self._random_state = random.getstate()
@contextmanager
def use_internal_state(self):
"""Use a specific RNG state."""
old_state = random.getstate()
random.setstate(self._random_state)
yield
self._random_state = random.getstate()
random.setstate(old_state)
@property
def random_state(self):
return deepcopy(self._random_state)
@random_state.setter
def random_state(self, s):
self._random_state = s
def __call__(self, data):
"""Shuffle and return a new list."""
with self.use_internal_state():
return random.sample(data, len(data))
|