sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def create_token_indices(self, tokens):
"""If `apply_encoding_options` is inadequate, one can retrieve tokens from `self.token_counts`, filter with
a desired strategy and regenerate `token_index` using this method. The token index is subsequently used
when `encode_texts` or `decode_texts` methods are called.
"""
start_index = len(self.special_token)
indices = list(range(len(tokens) + start_index))
# prepend because the special tokens come in the beginning
tokens_with_special = self.special_token + list(tokens)
self._token2idx = dict(list(zip(tokens_with_special, indices)))
self._idx2token = dict(list(zip(indices, tokens_with_special))) | If `apply_encoding_options` is inadequate, one can retrieve tokens from `self.token_counts`, filter with
a desired strategy and regenerate `token_index` using this method. The token index is subsequently used
when `encode_texts` or `decode_texts` methods are called. | entailment |
def apply_encoding_options(self, min_token_count=1, limit_top_tokens=None):
"""Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to
play with different settings without having to re-run tokenization on the entire corpus.
Args:
min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens
below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1)
limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens`
tokens will be kept. Set to None to keep everything. (Default value: None)
"""
if not self.has_vocab:
raise ValueError("You need to build the vocabulary using `build_vocab` "
"before using `apply_encoding_options`")
if min_token_count < 1:
raise ValueError("`min_token_count` should atleast be 1")
# Remove tokens with freq < min_token_count
token_counts = list(self._token_counts.items())
token_counts = [x for x in token_counts if x[1] >= min_token_count]
# Clip to max_tokens.
if limit_top_tokens is not None:
token_counts.sort(key=lambda x: x[1], reverse=True)
filtered_tokens = list(zip(*token_counts))[0]
filtered_tokens = filtered_tokens[:limit_top_tokens]
else:
filtered_tokens = zip(*token_counts)[0]
# Generate indices based on filtered tokens.
self.create_token_indices(filtered_tokens) | Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to
play with different settings without having to re-run tokenization on the entire corpus.
Args:
min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens
below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1)
limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens`
tokens will be kept. Set to None to keep everything. (Default value: None) | entailment |
def encode_texts(self, texts, unknown_token="<UNK>", verbose=1, **kwargs):
"""Encodes the given texts using internal vocabulary with optionally applied encoding options. See
``apply_encoding_options` to set various options.
Args:
texts: The list of text items to encode.
unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted.
verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
**kwargs: The kwargs for `token_generator`.
Returns:
The encoded texts.
"""
if not self.has_vocab:
raise ValueError(
"You need to build the vocabulary using `build_vocab` before using `encode_texts`")
if unknown_token and unknown_token not in self.special_token:
raise ValueError(
"Your special token (" + unknown_token + ") to replace unknown words is not in the list of special token: " + self.special_token)
progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
encoded_texts = []
for token_data in self.token_generator(texts, **kwargs):
indices, token = token_data[:-1], token_data[-1]
token_idx = self._token2idx.get(token)
if token_idx is None and unknown_token:
token_idx = self.special_token.index(unknown_token)
if token_idx is not None:
utils._append(encoded_texts, indices, token_idx)
# Update progressbar per document level.
progbar.update(indices[0])
# All done. Finalize progressbar.
progbar.update(len(texts))
return encoded_texts | Encodes the given texts using internal vocabulary with optionally applied encoding options. See
``apply_encoding_options` to set various options.
Args:
texts: The list of text items to encode.
unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted.
verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
**kwargs: The kwargs for `token_generator`.
Returns:
The encoded texts. | entailment |
def decode_texts(self, encoded_texts, unknown_token="<UNK>", inplace=True):
"""Decodes the texts using internal vocabulary. The list structure is maintained.
Args:
encoded_texts: The list of texts to decode.
unknown_token: The placeholder value for unknown token. (Default value: "<UNK>")
inplace: True to make changes inplace. (Default value: True)
Returns:
The decoded texts.
"""
if len(self._token2idx) == 0:
raise ValueError(
"You need to build vocabulary using `build_vocab` before using `decode_texts`")
if not isinstance(encoded_texts, list):
# assume it's a numpy array
encoded_texts = encoded_texts.tolist()
if not inplace:
encoded_texts = deepcopy(encoded_texts)
utils._recursive_apply(encoded_texts,
lambda token_id: self._idx2token.get(token_id) or unknown_token)
return encoded_texts | Decodes the texts using internal vocabulary. The list structure is maintained.
Args:
encoded_texts: The list of texts to decode.
unknown_token: The placeholder value for unknown token. (Default value: "<UNK>")
inplace: True to make changes inplace. (Default value: True)
Returns:
The decoded texts. | entailment |
def build_vocab(self, texts, verbose=1, **kwargs):
"""Builds the internal vocabulary and computes various statistics.
Args:
texts: The list of text items to encode.
verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
**kwargs: The kwargs for `token_generator`.
"""
if self.has_vocab:
logger.warn(
"Tokenizer already has existing vocabulary. Overriding and building new vocabulary.")
progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
count_tracker = utils._CountTracker()
self._token_counts.clear()
self._num_texts = len(texts)
for token_data in self.token_generator(texts, **kwargs):
indices, token = token_data[:-1], token_data[-1]
count_tracker.update(indices)
self._token_counts[token] += 1
# Update progressbar per document level.
progbar.update(indices[0])
# Generate token2idx and idx2token.
self.create_token_indices(self._token_counts.keys())
# All done. Finalize progressbar update and count tracker.
count_tracker.finalize()
self._counts = count_tracker.counts
progbar.update(len(texts)) | Builds the internal vocabulary and computes various statistics.
Args:
texts: The list of text items to encode.
verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
**kwargs: The kwargs for `token_generator`. | entailment |
def pad_sequences(self, sequences, fixed_sentences_seq_length=None, fixed_token_seq_length=None,
padding='pre', truncating='post', padding_token="<PAD>"):
"""Pads each sequence to the same fixed length (length of the longest sequence or provided override).
Args:
sequences: list of list (samples, words) or list of list of list (samples, sentences, words)
fixed_sentences_seq_length: The fix sentence sequence length to use. If None, largest sentence length is used.
fixed_token_seq_length: The fix token sequence length to use. If None, largest word length is used.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than fixed_sentences_seq_length or fixed_token_seq_length
either in the beginning or in the end of the sentence or word sequence respectively.
padding_token: The token to add for padding.
Returns:
Numpy array of (samples, max_sentences, max_tokens) or (samples, max_tokens) depending on the sequence input.
Raises:
ValueError: in case of invalid values for `truncating` or `padding`.
"""
value = self.special_token.index(padding_token)
if value < 0:
raise ValueError('The padding token "' + padding_token +
" is not in the special tokens of the tokenizer.")
# Determine if input is (samples, max_sentences, max_tokens) or not.
if isinstance(sequences[0][0], list):
x = utils._pad_sent_sequences(sequences, fixed_sentences_seq_length,
fixed_token_seq_length, padding, truncating, value)
else:
x = utils._pad_token_sequences(
sequences, fixed_token_seq_length, padding, truncating, value)
return np.array(x, dtype='int32') | Pads each sequence to the same fixed length (length of the longest sequence or provided override).
Args:
sequences: list of list (samples, words) or list of list of list (samples, sentences, words)
fixed_sentences_seq_length: The fix sentence sequence length to use. If None, largest sentence length is used.
fixed_token_seq_length: The fix token sequence length to use. If None, largest word length is used.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than fixed_sentences_seq_length or fixed_token_seq_length
either in the beginning or in the end of the sentence or word sequence respectively.
padding_token: The token to add for padding.
Returns:
Numpy array of (samples, max_sentences, max_tokens) or (samples, max_tokens) depending on the sequence input.
Raises:
ValueError: in case of invalid values for `truncating` or `padding`. | entailment |
def get_stats(self, i):
"""Gets the standard statistics for aux_index `i`. For example, if `token_generator` generates
`(text_idx, sentence_idx, word)`, then `get_stats(0)` will return various statistics about sentence lengths
across texts. Similarly, `get_counts(1)` will return statistics of token lengths across sentences.
This information can be used to pad or truncate inputs.
"""
# OrderedDict to always show same order if printed.
result = OrderedDict()
result['min'] = np.min(self._counts[i])
result['max'] = np.max(self._counts[i])
result['std'] = np.std(self._counts[i])
result['mean'] = np.mean(self._counts[i])
return result | Gets the standard statistics for aux_index `i`. For example, if `token_generator` generates
`(text_idx, sentence_idx, word)`, then `get_stats(0)` will return various statistics about sentence lengths
across texts. Similarly, `get_counts(1)` will return statistics of token lengths across sentences.
This information can be used to pad or truncate inputs. | entailment |
def build_embedding_weights(word_index, embeddings_index):
"""Builds an embedding matrix for all words in vocab using embeddings_index
"""
logger.info('Loading embeddings for all words in the corpus')
embedding_dim = list(embeddings_index.values())[0].shape[-1]
# setting special tokens such as UNK and PAD to 0
# all other words are also set to 0.
embedding_weights = np.zeros((len(word_index), embedding_dim))
for word, i in word_index.items():
word_vector = embeddings_index.get(word)
if word_vector is not None:
embedding_weights[i] = word_vector
return embedding_weights | Builds an embedding matrix for all words in vocab using embeddings_index | entailment |
def build_fasttext_wiki_embedding_obj(embedding_type):
"""FastText pre-trained word vectors for 294 languages, with 300 dimensions, trained on Wikipedia. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. It's implemented as 'FasttextWikiTokenizer'. More information: https://fasttext.cc/docs/en/pretrained-vectors.html.
Args:
embedding_type: A string in the format `fastext.wiki.$LANG_CODE`. e.g. `fasttext.wiki.de` or `fasttext.wiki.es`
Returns:
Object with the URL and filename used later on for downloading the file.
"""
lang = embedding_type.split('.')[2]
return {
'file': 'wiki.{}.vec'.format(lang),
'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.{}.vec'.format(lang),
'extract': False,
} | FastText pre-trained word vectors for 294 languages, with 300 dimensions, trained on Wikipedia. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. It's implemented as 'FasttextWikiTokenizer'. More information: https://fasttext.cc/docs/en/pretrained-vectors.html.
Args:
embedding_type: A string in the format `fastext.wiki.$LANG_CODE`. e.g. `fasttext.wiki.de` or `fasttext.wiki.es`
Returns:
Object with the URL and filename used later on for downloading the file. | entailment |
def build_fasttext_cc_embedding_obj(embedding_type):
"""FastText pre-trained word vectors for 157 languages, with 300 dimensions, trained on Common Crawl and Wikipedia. Released in 2018, it succeesed the 2017 FastText Wikipedia embeddings. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. This information and more can be find on their Website: https://fasttext.cc/docs/en/crawl-vectors.html.
Args:
embedding_type: A string in the format `fastext.cc.$LANG_CODE`. e.g. `fasttext.cc.de` or `fasttext.cc.es`
Returns:
Object with the URL and filename used later on for downloading the file.
"""
lang = embedding_type.split('.')[2]
return {
'file': 'cc.{}.300.vec.gz'.format(lang),
'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{}.300.vec.gz'.format(lang),
'extract': False
} | FastText pre-trained word vectors for 157 languages, with 300 dimensions, trained on Common Crawl and Wikipedia. Released in 2018, it succeesed the 2017 FastText Wikipedia embeddings. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. This information and more can be find on their Website: https://fasttext.cc/docs/en/crawl-vectors.html.
Args:
embedding_type: A string in the format `fastext.cc.$LANG_CODE`. e.g. `fasttext.cc.de` or `fasttext.cc.es`
Returns:
Object with the URL and filename used later on for downloading the file. | entailment |
def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True):
"""Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
Returns:
The embeddings indexed by word.
"""
if embedding_path is not None:
embedding_type = embedding_path # identify embedding by path
embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)
if embeddings_index is not None:
return embeddings_index
if embedding_path is None:
embedding_type_obj = get_embedding_type(embedding_type)
# some very rough wrangling of zip files with the keras util `get_file`
# a special problem: when multiple files are in one zip file
extract = embedding_type_obj.get('extract', True)
file_path = get_file(
embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',))
if 'file_in_zip' in embedding_type_obj:
zip_folder = file_path.split('.zip')[0]
with ZipFile(file_path, 'r') as zf:
zf.extractall(zip_folder)
file_path = os.path.join(
zip_folder, embedding_type_obj['file_in_zip'])
else:
if extract:
if file_path.endswith('.zip'):
file_path = file_path.split('.zip')[0]
# if file_path.endswith('.gz'):
# file_path = file_path.split('.gz')[0]
else:
file_path = embedding_path
embeddings_index = _build_embeddings_index(file_path, embedding_dims)
if cache:
_EMBEDDINGS_CACHE[embedding_type] = embeddings_index
return embeddings_index | Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
Returns:
The embeddings indexed by word. | entailment |
def token_generator(self, texts, **kwargs):
"""Yields tokens from texts as `(text_idx, character)`
"""
for text_idx, text in enumerate(texts):
if self.lower:
text = text.lower()
for char in text:
yield text_idx, char | Yields tokens from texts as `(text_idx, character)` | entailment |
def token_generator(self, texts, **kwargs):
"""Yields tokens from texts as `(text_idx, sent_idx, character)`
Args:
texts: The list of texts.
**kwargs: Supported args include:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000)
"""
# Perf optimization. Only process what is necessary.
n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs)
nlp = spacy.load(self.lang)
kwargs = {
'batch_size': batch_size,
'n_threads': n_threads,
'disable': ['ner']
}
# Perf optimization: Lower the entire text instead of individual tokens.
texts_gen = utils._apply_generator(
texts, lambda x: x.lower()) if self.lower else texts
for text_idx, doc in enumerate(nlp.pipe(texts_gen, **kwargs)):
for sent_idx, sent in enumerate(doc.sents):
for word in sent:
for char in word:
yield text_idx, sent_idx, char | Yields tokens from texts as `(text_idx, sent_idx, character)`
Args:
texts: The list of texts.
**kwargs: Supported args include:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000) | entailment |
def equal_distribution_folds(y, folds=2):
"""Creates `folds` number of indices that has roughly balanced multi-label distribution.
Args:
y: The multi-label outputs.
folds: The number of folds to create.
Returns:
`folds` number of indices that have roughly equal multi-label distributions.
"""
n, classes = y.shape
# Compute sample distribution over classes
dist = y.sum(axis=0).astype('float')
dist /= dist.sum()
index_list = []
fold_dist = np.zeros((folds, classes), dtype='float')
for _ in range(folds):
index_list.append([])
for i in range(n):
if i < folds:
target_fold = i
else:
normed_folds = fold_dist.T / fold_dist.sum(axis=1)
how_off = normed_folds.T - dist
target_fold = np.argmin(
np.dot((y[i] - .5).reshape(1, -1), how_off.T))
fold_dist[target_fold] += y[i]
index_list[target_fold].append(i)
logger.debug("Fold distributions:")
logger.debug(fold_dist)
return index_list | Creates `folds` number of indices that has roughly balanced multi-label distribution.
Args:
y: The multi-label outputs.
folds: The number of folds to create.
Returns:
`folds` number of indices that have roughly equal multi-label distributions. | entailment |
def build_model(self, token_encoder_model, sentence_encoder_model,
trainable_embeddings=True, output_activation='softmax'):
"""Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by
`sentence_encoder_model`.
Args:
token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model
will be applied across all sentences to create a sentence encoding.
sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by
`token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification.
trainable_embeddings: Whether or not to fine tune embeddings.
output_activation: The output activation to use. (Default value: 'softmax')
Use:
- `softmax` for binary or multi-class.
- `sigmoid` for multi-label classification.
- `linear` for regression output.
Returns:
The model output tensor.
"""
if not isinstance(token_encoder_model, SequenceEncoderBase):
raise ValueError("`token_encoder_model` should be an instance of `{}`".format(
SequenceEncoderBase))
if not isinstance(sentence_encoder_model, SequenceEncoderBase):
raise ValueError("`sentence_encoder_model` should be an instance of `{}`".format(
SequenceEncoderBase))
if not sentence_encoder_model.allows_dynamic_length() and self.max_sents is None:
raise ValueError("Sentence encoder model '{}' requires padding. "
"You need to provide `max_sents`")
if self.embeddings_index is None:
# The +1 is for unknown token index 0.
embedding_layer = Embedding(len(self.token_index),
self.embedding_dims,
input_length=self.max_tokens,
mask_zero=token_encoder_model.allows_dynamic_length(),
trainable=trainable_embeddings)
else:
embedding_layer = Embedding(len(self.token_index),
self.embedding_dims,
weights=[build_embedding_weights(
self.token_index, self.embeddings_index)],
input_length=self.max_tokens,
mask_zero=token_encoder_model.allows_dynamic_length(),
trainable=trainable_embeddings)
word_input = Input(shape=(self.max_tokens,), dtype='int32')
x = embedding_layer(word_input)
word_encoding = token_encoder_model(x)
token_encoder_model = Model(
word_input, word_encoding, name='word_encoder')
doc_input = Input(
shape=(self.max_sents, self.max_tokens), dtype='int32')
sent_encoding = TimeDistributed(token_encoder_model)(doc_input)
x = sentence_encoder_model(sent_encoding)
x = Dense(self.num_classes, activation=output_activation)(x)
return Model(doc_input, x) | Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by
`sentence_encoder_model`.
Args:
token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model
will be applied across all sentences to create a sentence encoding.
sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by
`token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification.
trainable_embeddings: Whether or not to fine tune embeddings.
output_activation: The output activation to use. (Default value: 'softmax')
Use:
- `softmax` for binary or multi-class.
- `sigmoid` for multi-label classification.
- `linear` for regression output.
Returns:
The model output tensor. | entailment |
def process_save(X, y, tokenizer, proc_data_path, max_len=400, train=False, ngrams=None, limit_top_tokens=None):
"""Process text and save as Dataset
"""
if train and limit_top_tokens is not None:
tokenizer.apply_encoding_options(limit_top_tokens=limit_top_tokens)
X_encoded = tokenizer.encode_texts(X)
if ngrams is not None:
X_encoded = tokenizer.add_ngrams(X_encoded, n=ngrams, train=train)
X_padded = tokenizer.pad_sequences(
X_encoded, fixed_token_seq_length=max_len)
if train:
ds = Dataset(X_padded,
y, tokenizer=tokenizer)
else:
ds = Dataset(X_padded, y)
ds.save(proc_data_path) | Process text and save as Dataset | entailment |
def setup_data(X, y, tokenizer, proc_data_path, **kwargs):
"""Setup data
Args:
X: text data,
y: data labels,
tokenizer: A Tokenizer instance
proc_data_path: Path for the processed data
"""
# only build vocabulary once (e.g. training data)
train = not tokenizer.has_vocab
if train:
tokenizer.build_vocab(X)
process_save(X, y, tokenizer, proc_data_path,
train=train, **kwargs)
return tokenizer | Setup data
Args:
X: text data,
y: data labels,
tokenizer: A Tokenizer instance
proc_data_path: Path for the processed data | entailment |
def split_data(X, y, ratio=(0.8, 0.1, 0.1)):
"""Splits data into a training, validation, and test set.
Args:
X: text data
y: data labels
ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
Returns:
split data: X_train, X_val, X_test, y_train, y_val, y_test
"""
assert(sum(ratio) == 1 and len(ratio) == 3)
X_train, X_rest, y_train, y_rest = train_test_split(
X, y, train_size=ratio[0])
X_val, X_test, y_val, y_test = train_test_split(
X_rest, y_rest, train_size=ratio[1])
return X_train, X_val, X_test, y_train, y_val, y_test | Splits data into a training, validation, and test set.
Args:
X: text data
y: data labels
ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
Returns:
split data: X_train, X_val, X_test, y_train, y_val, y_test | entailment |
def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs):
"""Setup data while splitting into a training, validation, and test set.
Args:
X: text data,
y: data labels,
tokenizer: A Tokenizer instance
proc_data_dir: Directory for the split and processed data
"""
X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y)
# only build vocabulary on training data
tokenizer.build_vocab(X_train)
process_save(X_train, y_train, tokenizer, path.join(
proc_data_dir, 'train.bin'), train=True, **kwargs)
process_save(X_val, y_val, tokenizer, path.join(
proc_data_dir, 'val.bin'), **kwargs)
process_save(X_test, y_test, tokenizer, path.join(
proc_data_dir, 'test.bin'), **kwargs) | Setup data while splitting into a training, validation, and test set.
Args:
X: text data,
y: data labels,
tokenizer: A Tokenizer instance
proc_data_dir: Directory for the split and processed data | entailment |
def load_data_split(proc_data_dir):
"""Loads a split dataset
Args:
proc_data_dir: Directory with the split and processed data
Returns:
(Training Data, Validation Data, Test Data)
"""
ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin'))
ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin'))
ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin'))
return ds_train, ds_val, ds_test | Loads a split dataset
Args:
proc_data_dir: Directory with the split and processed data
Returns:
(Training Data, Validation Data, Test Data) | entailment |
def build_model(self, token_encoder_model, trainable_embeddings=True, output_activation='softmax'):
"""Builds a model using the given `text_model`
Args:
token_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document.
This encoding is then fed into a final `Dense` layer for classification.
trainable_embeddings: Whether or not to fine tune embeddings.
output_activation: The output activation to use. (Default value: 'softmax')
Use:
- `softmax` for binary or multi-class.
- `sigmoid` for multi-label classification.
- `linear` for regression output.
Returns:
The model output tensor.
"""
if not isinstance(token_encoder_model, SequenceEncoderBase):
raise ValueError("`token_encoder_model` should be an instance of `{}`".format(
SequenceEncoderBase))
if not token_encoder_model.allows_dynamic_length() and self.max_tokens is None:
raise ValueError("The provided `token_encoder_model` does not allow variable length mini-batches. "
"You need to provide `max_tokens`")
if self.embeddings_index is None:
# The +1 is for unknown token index 0.
embedding_layer = Embedding(len(self.token_index),
self.embedding_dims,
input_length=self.max_tokens,
mask_zero=token_encoder_model.allows_dynamic_length(),
trainable=trainable_embeddings)
else:
embedding_layer = Embedding(len(self.token_index),
self.embedding_dims,
weights=[build_embedding_weights(
self.token_index, self.embeddings_index)],
input_length=self.max_tokens,
mask_zero=token_encoder_model.allows_dynamic_length(),
trainable=trainable_embeddings)
sequence_input = Input(shape=(self.max_tokens,), dtype='int32')
x = embedding_layer(sequence_input)
x = token_encoder_model(x)
x = Dense(self.num_classes, activation=output_activation)(x)
return Model(sequence_input, x) | Builds a model using the given `text_model`
Args:
token_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document.
This encoding is then fed into a final `Dense` layer for classification.
trainable_embeddings: Whether or not to fine tune embeddings.
output_activation: The output activation to use. (Default value: 'softmax')
Use:
- `softmax` for binary or multi-class.
- `sigmoid` for multi-label classification.
- `linear` for regression output.
Returns:
The model output tensor. | entailment |
def _softmax(x, dim):
"""Computes softmax along a specified dim. Keras currently lacks this feature.
"""
if K.backend() == 'tensorflow':
import tensorflow as tf
return tf.nn.softmax(x, dim)
elif K.backend() is 'cntk':
import cntk
return cntk.softmax(x, dim)
elif K.backend() == 'theano':
# Theano cannot softmax along an arbitrary dim.
# So, we will shuffle `dim` to -1 and un-shuffle after softmax.
perm = np.arange(K.ndim(x))
perm[dim], perm[-1] = perm[-1], perm[dim]
x_perm = K.permute_dimensions(x, perm)
output = K.softmax(x_perm)
# Permute back
perm[dim], perm[-1] = perm[-1], perm[dim]
output = K.permute_dimensions(x, output)
return output
else:
raise ValueError("Backend '{}' not supported".format(K.backend())) | Computes softmax along a specified dim. Keras currently lacks this feature. | entailment |
def _apply_options(self, token):
"""Applies various filtering and processing options on token.
Returns:
The processed token. None if filtered.
"""
# Apply work token filtering.
if token.is_punct and self.remove_punct:
return None
if token.is_stop and self.remove_stop_words:
return None
if token.is_digit and self.remove_digits:
return None
if token.is_oov and self.exclude_oov:
return None
if token.pos_ in self.exclude_pos_tags:
return None
if token.ent_type_ in self.exclude_entities:
return None
# Lemmatized ones are already lowered.
if self.lemmatize:
return token.lemma_
if self.lower:
return token.lower_
return token.orth_ | Applies various filtering and processing options on token.
Returns:
The processed token. None if filtered. | entailment |
def token_generator(self, texts, **kwargs):
"""Yields tokens from texts as `(text_idx, word)`
Args:
texts: The list of texts.
**kwargs: Supported args include:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000)
"""
# Perf optimization. Only process what is necessary.
n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs)
nlp = spacy.load(self.lang)
disabled = ['parser']
if len(self.exclude_entities) > 0:
disabled.append('ner')
kwargs = {
'batch_size': batch_size,
'n_threads': n_threads,
'disable': disabled
}
for text_idx, doc in enumerate(nlp.pipe(texts, **kwargs)):
for word in doc:
processed_word = self._apply_options(word)
if processed_word is not None:
yield text_idx, processed_word | Yields tokens from texts as `(text_idx, word)`
Args:
texts: The list of texts.
**kwargs: Supported args include:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000) | entailment |
def _append(lst, indices, value):
"""Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required.
"""
for i, idx in enumerate(indices):
# We need to loop because sometimes indices can increment by more than 1 due to missing tokens.
# Example: Sentence with no words after filtering words.
while len(lst) <= idx:
# Update max counts whenever a new sublist is created.
# There is no need to worry about indices beyond `i` since they will end up creating new lists as well.
lst.append([])
lst = lst[idx]
# Add token and update token max count.
lst.append(value) | Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required. | entailment |
def _parse_spacy_kwargs(**kwargs):
"""Supported args include:
Args:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000)
"""
n_threads = kwargs.get('n_threads') or kwargs.get('num_threads')
batch_size = kwargs.get('batch_size')
if n_threads is None or n_threads is -1:
n_threads = cpu_count() - 1
if batch_size is None or batch_size is -1:
batch_size = 1000
return n_threads, batch_size | Supported args include:
Args:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000) | entailment |
def update(self, indices):
"""Updates counts based on indices. The algorithm tracks the index change at i and
update global counts for all indices beyond i with local counts tracked so far.
"""
# Initialize various lists for the first time based on length of indices.
if self._prev_indices is None:
self._prev_indices = indices
# +1 to track token counts in the last index.
self._local_counts = np.full(len(indices) + 1, 1)
self._local_counts[-1] = 0
self.counts = [[] for _ in range(len(self._local_counts))]
has_reset = False
for i in range(len(indices)):
# index value changed. Push all local values beyond i to count and reset those local_counts.
# For example, if document index changed, push counts on sentences and tokens and reset their local_counts
# to indicate that we are tracking those for new document. We need to do this at all document hierarchies.
if indices[i] > self._prev_indices[i]:
self._local_counts[i] += 1
has_reset = True
for j in range(i + 1, len(self.counts)):
self.counts[j].append(self._local_counts[j])
self._local_counts[j] = 1
# If none of the aux indices changed, update token count.
if not has_reset:
self._local_counts[-1] += 1
self._prev_indices = indices[:] | Updates counts based on indices. The algorithm tracks the index change at i and
update global counts for all indices beyond i with local counts tracked so far. | entailment |
def finalize(self):
"""This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily.
"""
for i in range(1, len(self._local_counts)):
self.counts[i].append(self._local_counts[i])
self.counts.pop(0)
for i in range(len(self.counts)):
self.counts[i] = np.array(self.counts[i]) | This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily. | entailment |
def read_folder(directory):
"""read text files in directory and returns them as array
Args:
directory: where the text files are
Returns:
Array of text
"""
res = []
for filename in os.listdir(directory):
with io.open(os.path.join(directory, filename), encoding="utf-8") as f:
content = f.read()
res.append(content)
return res | read text files in directory and returns them as array
Args:
directory: where the text files are
Returns:
Array of text | entailment |
def read_pos_neg_data(path, folder, limit):
"""returns array with positive and negative examples"""
training_pos_path = os.path.join(path, folder, 'pos')
training_neg_path = os.path.join(path, folder, 'neg')
X_pos = read_folder(training_pos_path)
X_neg = read_folder(training_neg_path)
if limit is None:
X = X_pos + X_neg
else:
X = X_pos[:limit] + X_neg[:limit]
y = [1] * int(len(X) / 2) + [0] * int(len(X) / 2)
return X, y | returns array with positive and negative examples | entailment |
def imdb(limit=None, shuffle=True):
"""Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data
Args:
limit: get only first N items for each class
Returns:
[X_train, y_train, X_test, y_test]
"""
movie_review_url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
# download and extract, thus remove the suffix '.tar.gz'
path = keras.utils.get_file(
'aclImdb.tar.gz', movie_review_url, extract=True)[:-7]
X_train, y_train = read_pos_neg_data(path, 'train', limit)
X_test, y_test = read_pos_neg_data(path, 'test', limit)
if shuffle:
X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
X_test, y_test = sklearn.utils.shuffle(X_test, y_test)
return X_train, X_test, y_train, y_test | Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data
Args:
limit: get only first N items for each class
Returns:
[X_train, y_train, X_test, y_test] | entailment |
def to_absolute(self, x, y):
"""
Converts coordinates provided with reference to the center \
of the canvas (0, 0) to absolute coordinates which are used \
by the canvas object in which (0, 0) is located in the top \
left of the object.
:param x: x value in pixels
:param y: x value in pixels
:return: None
"""
return x + self.size/2, y + self.size/2 | Converts coordinates provided with reference to the center \
of the canvas (0, 0) to absolute coordinates which are used \
by the canvas object in which (0, 0) is located in the top \
left of the object.
:param x: x value in pixels
:param y: x value in pixels
:return: None | entailment |
def set_value(self, number: (float, int)):
"""
Sets the value of the graphic
:param number: the number (must be between 0 and \
'max_range' or the scale will peg the limits
:return: None
"""
self.canvas.delete('all')
self.canvas.create_image(0, 0, image=self.image, anchor='nw')
number = number if number <= self.max_value else self.max_value
number = 0.0 if number < 0.0 else number
radius = 0.9 * self.size/2.0
angle_in_radians = (2.0 * cmath.pi / 3.0) \
+ number / self.max_value * (5.0 * cmath.pi / 3.0)
center = cmath.rect(0, 0)
outer = cmath.rect(radius, angle_in_radians)
if self.needle_thickness == 0:
line_width = int(5 * self.size / 200)
line_width = 1 if line_width < 1 else line_width
else:
line_width = self.needle_thickness
self.canvas.create_line(
*self.to_absolute(center.real, center.imag),
*self.to_absolute(outer.real, outer.imag),
width=line_width,
fill=self.needle_color
)
self.readout['text'] = '{}{}'.format(number, self.unit) | Sets the value of the graphic
:param number: the number (must be between 0 and \
'max_range' or the scale will peg the limits
:return: None | entailment |
def _draw_background(self, divisions=10):
"""
Draws the background of the dial
:param divisions: the number of divisions
between 'ticks' shown on the dial
:return: None
"""
self.canvas.create_arc(2, 2, self.size-2, self.size-2,
style=tk.PIESLICE, start=-60, extent=30,
fill='red')
self.canvas.create_arc(2, 2, self.size-2, self.size-2,
style=tk.PIESLICE, start=-30, extent=60,
fill='yellow')
self.canvas.create_arc(2, 2, self.size-2, self.size-2,
style=tk.PIESLICE, start=30, extent=210,
fill='green')
# find the distance between the center and the inner tick radius
inner_tick_radius = int(self.size * 0.4)
outer_tick_radius = int(self.size * 0.5)
for tick in range(divisions):
angle_in_radians = (2.0 * cmath.pi / 3.0) \
+ tick/divisions * (5.0 * cmath.pi / 3.0)
inner_point = cmath.rect(inner_tick_radius, angle_in_radians)
outer_point = cmath.rect(outer_tick_radius, angle_in_radians)
self.canvas.create_line(
*self.to_absolute(inner_point.real, inner_point.imag),
*self.to_absolute(outer_point.real, outer_point.imag),
width=1
) | Draws the background of the dial
:param divisions: the number of divisions
between 'ticks' shown on the dial
:return: None | entailment |
def draw_axes(self):
"""
Removes all existing series and re-draws the axes.
:return: None
"""
self.canvas.delete('all')
rect = 50, 50, self.w - 50, self.h - 50
self.canvas.create_rectangle(rect, outline="black")
for x in self.frange(0, self.x_max - self.x_min + 1, self.x_tick):
value = Decimal(self.x_min + x)
if self.x_min <= value <= self.x_max:
x_step = (self.px_x * x) / self.x_tick
coord = 50 + x_step, self.h - 50, 50 + x_step, self.h - 45
self.canvas.create_line(coord, fill="black")
coord = 50 + x_step, self.h - 40
label = round(Decimal(self.x_min + x), 1)
self.canvas.create_text(coord, fill="black", text=label)
for y in self.frange(0, self.y_max - self.y_min + 1, self.y_tick):
value = Decimal(self.y_max - y)
if self.y_min <= value <= self.y_max:
y_step = (self.px_y * y) / self.y_tick
coord = 45, 50 + y_step, 50, 50 + y_step
self.canvas.create_line(coord, fill="black")
coord = 35, 50 + y_step
label = round(value, 1)
self.canvas.create_text(coord, fill="black", text=label) | Removes all existing series and re-draws the axes.
:return: None | entailment |
def plot_point(self, x, y, visible=True, color='black', size=5):
"""
Places a single point on the grid
:param x: the x coordinate
:param y: the y coordinate
:param visible: True if the individual point should be visible
:param color: the color of the point
:param size: the point size in pixels
:return: The absolute coordinates as a tuple
"""
xp = (self.px_x * (x - self.x_min)) / self.x_tick
yp = (self.px_y * (self.y_max - y)) / self.y_tick
coord = 50 + xp, 50 + yp
if visible:
# divide down to an appropriate size
size = int(size/2) if int(size/2) > 1 else 1
x, y = coord
self.canvas.create_oval(
x-size, y-size,
x+size, y+size,
fill=color
)
return coord | Places a single point on the grid
:param x: the x coordinate
:param y: the y coordinate
:param visible: True if the individual point should be visible
:param color: the color of the point
:param size: the point size in pixels
:return: The absolute coordinates as a tuple | entailment |
def plot_line(self, points: list, color='black', point_visibility=False):
"""
Plot a line of points
:param points: a list of tuples, each tuple containing an (x, y) point
:param color: the color of the line
:param point_visibility: True if the points \
should be individually visible
:return: None
"""
last_point = ()
for point in points:
this_point = self.plot_point(point[0], point[1],
color=color, visible=point_visibility)
if last_point:
self.canvas.create_line(last_point + this_point, fill=color)
last_point = this_point | Plot a line of points
:param points: a list of tuples, each tuple containing an (x, y) point
:param color: the color of the line
:param point_visibility: True if the points \
should be individually visible
:return: None | entailment |
def frange(start, stop, step, digits_to_round=3):
"""
Works like range for doubles
:param start: starting value
:param stop: ending value
:param step: the increment_value
:param digits_to_round: the digits to which to round \
(makes floating-point numbers much easier to work with)
:return: generator
"""
while start < stop:
yield round(start, digits_to_round)
start += step | Works like range for doubles
:param start: starting value
:param stop: ending value
:param step: the increment_value
:param digits_to_round: the digits to which to round \
(makes floating-point numbers much easier to work with)
:return: generator | entailment |
def _load_new(self, img_data: str):
"""
Load a new image.
:param img_data: the image data as a base64 string
:return: None
"""
self._image = tk.PhotoImage(data=img_data)
self._image = self._image.subsample(int(200 / self._size),
int(200 / self._size))
self._canvas.delete('all')
self._canvas.create_image(0, 0, image=self._image, anchor='nw')
if self._user_click_callback is not None:
self._user_click_callback(self._on) | Load a new image.
:param img_data: the image data as a base64 string
:return: None | entailment |
def to_grey(self, on: bool=False):
"""
Change the LED to grey.
:param on: Unused, here for API consistency with the other states
:return: None
"""
self._on = False
self._load_new(led_grey) | Change the LED to grey.
:param on: Unused, here for API consistency with the other states
:return: None | entailment |
def to_green(self, on: bool=False):
"""
Change the LED to green (on or off).
:param on: True or False
:return: None
"""
self._on = on
if on:
self._load_new(led_green_on)
if self._toggle_on_click:
self._canvas.bind('<Button-1>', lambda x: self.to_green(False))
else:
self._load_new(led_green)
if self._toggle_on_click:
self._canvas.bind('<Button-1>', lambda x: self.to_green(True)) | Change the LED to green (on or off).
:param on: True or False
:return: None | entailment |
def to_red(self, on: bool=False):
"""
Change the LED to red (on or off)
:param on: True or False
:return: None
"""
self._on = on
if on:
self._load_new(led_red_on)
if self._toggle_on_click:
self._canvas.bind('<Button-1>', lambda x: self.to_red(False))
else:
self._load_new(led_red)
if self._toggle_on_click:
self._canvas.bind('<Button-1>', lambda x: self.to_red(True)) | Change the LED to red (on or off)
:param on: True or False
:return: None | entailment |
def to_yellow(self, on: bool=False):
"""
Change the LED to yellow (on or off)
:param on: True or False
:return: None
"""
self._on = on
if on:
self._load_new(led_yellow_on)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(False))
else:
self._load_new(led_yellow)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(True)) | Change the LED to yellow (on or off)
:param on: True or False
:return: None | entailment |
def _redraw(self):
"""
Forgets the current layout and redraws with the most recent information
:return: None
"""
for row in self._rows:
for widget in row:
widget.grid_forget()
offset = 0 if not self.headers else 1
for i, row in enumerate(self._rows):
for j, widget in enumerate(row):
widget.grid(row=i+offset, column=j) | Forgets the current layout and redraws with the most recent information
:return: None | entailment |
def remove_row(self, row_number: int=-1):
"""
Removes a specified row of data
:param row_number: the row to remove (defaults to the last row)
:return: None
"""
if len(self._rows) == 0:
return
row = self._rows.pop(row_number)
for widget in row:
widget.destroy() | Removes a specified row of data
:param row_number: the row to remove (defaults to the last row)
:return: None | entailment |
def add_row(self, data: list):
"""
Add a row of data to the current widget
:param data: a row of data
:return: None
"""
# validation
if self.headers:
if len(self.headers) != len(data):
raise ValueError
if len(data) != self.num_of_columns:
raise ValueError
offset = 0 if not self.headers else 1
row = list()
for i, element in enumerate(data):
label = ttk.Label(self, text=str(element), relief=tk.GROOVE,
padding=self.padding)
label.grid(row=len(self._rows) + offset, column=i, sticky='E,W')
row.append(label)
self._rows.append(row) | Add a row of data to the current widget
:param data: a row of data
:return: None | entailment |
def add_row(self, data: list=None):
"""
Add a row of data to the current widget, add a <Tab> \
binding to the last element of the last row, and set \
the focus at the beginning of the next row.
:param data: a row of data
:return: None
"""
# validation
if self.headers and data:
if len(self.headers) != len(data):
raise ValueError
offset = 0 if not self.headers else 1
row = list()
if data:
for i, element in enumerate(data):
contents = '' if element is None else str(element)
entry = ttk.Entry(self)
entry.insert(0, contents)
entry.grid(row=len(self._rows) + offset,
column=i,
sticky='E,W')
row.append(entry)
else:
for i in range(self.num_of_columns):
entry = ttk.Entry(self)
entry.grid(row=len(self._rows) + offset,
column=i,
sticky='E,W')
row.append(entry)
self._rows.append(row)
# clear all bindings
for row in self._rows:
for widget in row:
widget.unbind('<Tab>')
def add(e):
self.add_row()
last_entry = self._rows[-1][-1]
last_entry.bind('<Tab>', add)
e = self._rows[-1][0]
e.focus_set()
self._redraw() | Add a row of data to the current widget, add a <Tab> \
binding to the last element of the last row, and set \
the focus at the beginning of the next row.
:param data: a row of data
:return: None | entailment |
def _read_as_dict(self):
"""
Read the data contained in all entries as a list of
dictionaries with the headers as the dictionary keys
:return: list of dicts containing all tabular data
"""
data = list()
for row in self._rows:
row_data = OrderedDict()
for i, header in enumerate(self.headers):
row_data[header.cget('text')] = row[i].get()
data.append(row_data)
return data | Read the data contained in all entries as a list of
dictionaries with the headers as the dictionary keys
:return: list of dicts containing all tabular data | entailment |
def _read_as_table(self):
"""
Read the data contained in all entries as a list of
lists containing all of the data
:return: list of dicts containing all tabular data
"""
rows = list()
for row in self._rows:
rows.append([row[i].get() for i in range(self.num_of_columns)])
return rows | Read the data contained in all entries as a list of
lists containing all of the data
:return: list of dicts containing all tabular data | entailment |
def add_row(self, data: list):
"""
Add a row of buttons each with their own callbacks to the
current widget. Each element in `data` will consist of a
label and a command.
:param data: a list of tuples of the form ('label', <callback>)
:return: None
"""
# validation
if self.headers and data:
if len(self.headers) != len(data):
raise ValueError
offset = 0 if not self.headers else 1
row = list()
for i, e in enumerate(data):
if not isinstance(e, tuple):
raise ValueError('all elements must be a tuple '
'consisting of ("label", <command>)')
label, command = e
button = tk.Button(self, text=str(label), relief=tk.RAISED,
command=command,
padx=self.padding,
pady=self.padding)
button.grid(row=len(self._rows) + offset, column=i, sticky='ew')
row.append(button)
self._rows.append(row) | Add a row of buttons each with their own callbacks to the
current widget. Each element in `data` will consist of a
label and a command.
:param data: a list of tuples of the form ('label', <callback>)
:return: None | entailment |
def add_row(self, key: str, default: str=None,
unit_label: str=None, enable: bool=None):
"""
Add a single row and re-draw as necessary
:param key: the name and dict accessor
:param default: the default value
:param unit_label: the label that should be \
applied at the right of the entry
:param enable: the 'enabled' state (defaults to True)
:return:
"""
self.keys.append(ttk.Label(self, text=key))
self.defaults.append(default)
self.unit_labels.append(
ttk.Label(self, text=unit_label if unit_label else '')
)
self.enables.append(enable)
self.values.append(ttk.Entry(self))
row_offset = 1 if self.title is not None else 0
for i in range(len(self.keys)):
self.keys[i].grid_forget()
self.keys[i].grid(row=row_offset, column=0, sticky='e')
self.values[i].grid(row=row_offset, column=1)
if self.unit_labels[i]:
self.unit_labels[i].grid(row=row_offset, column=3, sticky='w')
if self.defaults[i]:
self.values[i].config(state=tk.NORMAL)
self.values[i].delete(0, tk.END)
self.values[i].insert(0, self.defaults[i])
if self.enables[i] in [True, None]:
self.values[i].config(state=tk.NORMAL)
elif self.enables[i] is False:
self.values[i].config(state=tk.DISABLED)
row_offset += 1
# strip <Return> and <Tab> bindings, add callbacks to all entries
self.values[i].unbind('<Return>')
self.values[i].unbind('<Tab>')
if self.callback is not None:
def callback(event):
self.callback()
self.values[i].bind('<Return>', callback)
self.values[i].bind('<Tab>', callback) | Add a single row and re-draw as necessary
:param key: the name and dict accessor
:param default: the default value
:param unit_label: the label that should be \
applied at the right of the entry
:param enable: the 'enabled' state (defaults to True)
:return: | entailment |
def reset(self):
"""
Clears all entries.
:return: None
"""
for i in range(len(self.values)):
self.values[i].delete(0, tk.END)
if self.defaults[i] is not None:
self.values[i].insert(0, self.defaults[i]) | Clears all entries.
:return: None | entailment |
def change_enables(self, enables_list: list):
"""
Enable/disable inputs.
:param enables_list: list containing enables for each key
:return: None
"""
for i, entry in enumerate(self.values):
if enables_list[i]:
entry.config(state=tk.NORMAL)
else:
entry.config(state=tk.DISABLED) | Enable/disable inputs.
:param enables_list: list containing enables for each key
:return: None | entailment |
def load(self, data: dict):
"""
Load values into the key/values via dict.
:param data: dict containing the key/values that should be inserted
:return: None
"""
for i, label in enumerate(self.keys):
key = label.cget('text')
if key in data.keys():
entry_was_enabled = True if \
self.values[i].cget('state') == 'normal' else False
if not entry_was_enabled:
self.values[i].config(state='normal')
self.values[i].delete(0, tk.END)
self.values[i].insert(0, str(data[key]))
if not entry_was_enabled:
self.values[i].config(state='disabled') | Load values into the key/values via dict.
:param data: dict containing the key/values that should be inserted
:return: None | entailment |
def get(self):
"""
Retrieve the GUI elements for program use.
:return: a dictionary containing all \
of the data from the key/value entries
"""
data = dict()
for label, entry in zip(self.keys, self.values):
data[label.cget('text')] = entry.get()
return data | Retrieve the GUI elements for program use.
:return: a dictionary containing all \
of the data from the key/value entries | entailment |
def _pressed(self, evt):
"""
Clicked somewhere in the calendar.
"""
x, y, widget = evt.x, evt.y, evt.widget
item = widget.identify_row(y)
column = widget.identify_column(x)
if not column or not (item in self._items):
# clicked in the weekdays row or just outside the columns
return
item_values = widget.item(item)['values']
if not len(item_values): # row is empty for this month
return
text = item_values[int(column[1]) - 1]
if not text: # date is empty
return
bbox = widget.bbox(item, column)
if not bbox: # calendar not visible yet
return
# update and then show selection
text = '%02d' % text
self._selection = (text, item, column)
self._show_selection(text, bbox)
if self.callback is not None:
self.callback() | Clicked somewhere in the calendar. | entailment |
def add(self, string: (str, list)):
"""
Clear the contents of the entry field and
insert the contents of string.
:param string: an str containing the text to display
:return:
"""
if len(self._entries) == 1:
self._entries[0].delete(0, 'end')
self._entries[0].insert(0, string)
else:
if len(string) != len(self._entries):
raise ValueError('the "string" list must be '
'equal to the number of entries')
for i, e in enumerate(self._entries):
self._entries[i].delete(0, 'end')
self._entries[i].insert(0, string[i]) | Clear the contents of the entry field and
insert the contents of string.
:param string: an str containing the text to display
:return: | entailment |
def remove(self):
"""
Deletes itself.
:return: None
"""
for e in self._entries:
e.grid_forget()
e.destroy()
self._remove_btn.grid_forget()
self._remove_btn.destroy()
self.deleted = True
if self._remove_callback:
self._remove_callback() | Deletes itself.
:return: None | entailment |
def get(self):
"""
Returns the value for the slot.
:return: the entry value
"""
values = [e.get() for e in self._entries]
if len(self._entries) == 1:
return values[0]
else:
return values | Returns the value for the slot.
:return: the entry value | entailment |
def _redraw(self):
"""
Clears the current layout and re-draws all elements in self._slots
:return:
"""
if self._blank_label:
self._blank_label.grid_forget()
self._blank_label.destroy()
self._blank_label = None
for slot in self._slots:
slot.grid_forget()
self._slots = [slot for slot in self._slots if not slot.deleted]
max_per_col = 8
for i, slot in enumerate(self._slots):
slot.grid(row=i % max_per_col,
column=int(i / max_per_col), sticky='ew') | Clears the current layout and re-draws all elements in self._slots
:return: | entailment |
def add(self, string: (str, list)):
"""
Add a new slot to the multi-frame containing the string.
:param string: a string to insert
:return: None
"""
slot = _SlotFrame(self,
remove_callback=self._redraw,
entries=self._slot_columns)
slot.add(string)
self._slots.append(slot)
self._redraw() | Add a new slot to the multi-frame containing the string.
:param string: a string to insert
:return: None | entailment |
def clear(self):
"""
Clear out the multi-frame
:return:
"""
for slot in self._slots:
slot.grid_forget()
slot.destroy()
self._slots = [] | Clear out the multi-frame
:return: | entailment |
def clear(self):
"""
Clear the segment.
:return: None
"""
for _, frame in self._segments.items():
frame.configure(background=self._bg_color) | Clear the segment.
:return: None | entailment |
def set_value(self, value: str):
"""
Sets the value of the 7-segment display
:param value: the desired value
:return: None
"""
self.clear()
if '.' in value:
self._segments['period'].configure(background=self._color)
if value in ['0', '0.']:
self._segments['a'].configure(background=self._color)
self._segments['b'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
self._segments['d'].configure(background=self._color)
self._segments['e'].configure(background=self._color)
self._segments['f'].configure(background=self._color)
elif value in ['1', '1.']:
self._segments['b'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
elif value in ['2', '2.']:
self._segments['a'].configure(background=self._color)
self._segments['b'].configure(background=self._color)
self._segments['g'].configure(background=self._color)
self._segments['e'].configure(background=self._color)
self._segments['d'].configure(background=self._color)
elif value in ['3', '3.']:
self._segments['a'].configure(background=self._color)
self._segments['b'].configure(background=self._color)
self._segments['g'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
self._segments['d'].configure(background=self._color)
elif value in ['4', '4.']:
self._segments['f'].configure(background=self._color)
self._segments['g'].configure(background=self._color)
self._segments['b'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
elif value in ['5', '5.']:
self._segments['a'].configure(background=self._color)
self._segments['f'].configure(background=self._color)
self._segments['g'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
self._segments['d'].configure(background=self._color)
elif value in ['6', '6.']:
self._segments['f'].configure(background=self._color)
self._segments['g'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
self._segments['d'].configure(background=self._color)
self._segments['e'].configure(background=self._color)
elif value in ['7', '7.']:
self._segments['a'].configure(background=self._color)
self._segments['b'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
elif value in ['8', '8.']:
self._segments['a'].configure(background=self._color)
self._segments['b'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
self._segments['d'].configure(background=self._color)
self._segments['e'].configure(background=self._color)
self._segments['f'].configure(background=self._color)
self._segments['g'].configure(background=self._color)
elif value in ['9', '9.']:
self._segments['a'].configure(background=self._color)
self._segments['b'].configure(background=self._color)
self._segments['c'].configure(background=self._color)
self._segments['f'].configure(background=self._color)
self._segments['g'].configure(background=self._color)
elif value in ['-']:
self._segments['g'].configure(background=self._color)
else:
raise ValueError('unsupported character: {}'.format(value)) | Sets the value of the 7-segment display
:param value: the desired value
:return: None | entailment |
def _group(self, value: str):
"""
Takes a string and groups it appropriately with any
period or other appropriate punctuation so that it is
displayed correctly.
:param value: a string containing an integer or float
:return: None
"""
reversed_v = value[::-1]
parts = []
has_period = False
for c in reversed_v:
if has_period:
parts.append(c + '.')
has_period = False
elif c == '.':
has_period = True
else:
parts.append(c)
parts = parts[:len(self._digits)]
return parts | Takes a string and groups it appropriately with any
period or other appropriate punctuation so that it is
displayed correctly.
:param value: a string containing an integer or float
:return: None | entailment |
def set_value(self, value: str):
"""
Sets the displayed digits based on the value string.
:param value: a string containing an integer or float value
:return: None
"""
[digit.clear() for digit in self._digits]
grouped = self._group(value) # return the parts, reversed
digits = self._digits[::-1] # reverse the digits
# fill from right to left
has_period = False
for i, digit_value in enumerate(grouped):
try:
if has_period:
digits[i].set_value(digit_value + '.')
has_period = False
elif grouped[i] == '.':
has_period = True
else:
digits[i].set_value(digit_value)
except IndexError:
raise ValueError('the value "{}" contains too '
'many digits'.format(value)) | Sets the displayed digits based on the value string.
:param value: a string containing an integer or float value
:return: None | entailment |
def add_callback(self, callback: callable):
"""
Add a callback on change
:param callback: callable function
:return: None
"""
def internal_callback(*args):
try:
callback()
except TypeError:
callback(self.get())
self._var.trace('w', internal_callback) | Add a callback on change
:param callback: callable function
:return: None | entailment |
def set(self, value: int):
"""
Set the current value
:param value:
:return: None
"""
max_value = int(''.join(['1' for _ in range(self._bit_width)]), 2)
if value > max_value:
raise ValueError('the value {} is larger than '
'the maximum value {}'.format(value, max_value))
self._value = value
self._text_update() | Set the current value
:param value:
:return: None | entailment |
def get_bit(self, position: int):
"""
Returns the bit value at position
:param position: integer between 0 and <width>, inclusive
:return: the value at position as a integer
"""
if position > (self._bit_width - 1):
raise ValueError('position greater than the bit width')
if self._value & (1 << position):
return 1
else:
return 0 | Returns the bit value at position
:param position: integer between 0 and <width>, inclusive
:return: the value at position as a integer | entailment |
def toggle_bit(self, position: int):
"""
Toggles the value at position
:param position: integer between 0 and 7, inclusive
:return: None
"""
if position > (self._bit_width - 1):
raise ValueError('position greater than the bit width')
self._value ^= (1 << position)
self._text_update() | Toggles the value at position
:param position: integer between 0 and 7, inclusive
:return: None | entailment |
def set_bit(self, position: int):
"""
Sets the value at position
:param position: integer between 0 and 7, inclusive
:return: None
"""
if position > (self._bit_width - 1):
raise ValueError('position greater than the bit width')
self._value |= (1 << position)
self._text_update() | Sets the value at position
:param position: integer between 0 and 7, inclusive
:return: None | entailment |
def clear_bit(self, position: int):
"""
Clears the value at position
:param position: integer between 0 and 7, inclusive
:return: None
"""
if position > (self._bit_width - 1):
raise ValueError('position greater than the bit width')
self._value &= ~(1 << position)
self._text_update() | Clears the value at position
:param position: integer between 0 and 7, inclusive
:return: None | entailment |
def _populate(self):
""" Populate this list by calling populate(), but only once. """
if not self._populated:
logging.debug("Populating lazy list %d (%s)" % (id(self), self.__class__.__name__))
try:
self.populate()
self._populated = True
except Exception, e:
logging.debug("Currently unable to populate lazy list: %s" % e) | Populate this list by calling populate(), but only once. | entailment |
def _register_admin(admin_site, model, admin_class):
""" Register model in the admin, ignoring any previously registered models.
Alternatively it could be used in the future to replace a previously
registered model.
"""
try:
admin_site.register(model, admin_class)
except admin.sites.AlreadyRegistered:
pass | Register model in the admin, ignoring any previously registered models.
Alternatively it could be used in the future to replace a previously
registered model. | entailment |
def core_choice_fields(metadata_class):
""" If the 'optional' core fields (_site and _language) are required,
list them here.
"""
fields = []
if metadata_class._meta.use_sites:
fields.append('_site')
if metadata_class._meta.use_i18n:
fields.append('_language')
return fields | If the 'optional' core fields (_site and _language) are required,
list them here. | entailment |
def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site):
""" Monkey patch the inline onto the given admin_class instance. """
if model in metadata_class._meta.seo_models:
# *Not* adding to the class attribute "inlines", as this will affect
# all instances from this class. Explicitly adding to instance attribute.
admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class]
# Because we've missed the registration, we need to perform actions
# that were done then (on admin class instantiation)
inline_instance = inline_class(admin_class_instance.model, admin_site)
admin_class_instance.inline_instances.append(inline_instance) | Monkey patch the inline onto the given admin_class instance. | entailment |
def _with_inline(func, admin_site, metadata_class, inline_class):
""" Decorator for register function that adds an appropriate inline."""
def register(model_or_iterable, admin_class=None, **options):
# Call the (bound) function we were given.
# We have to assume it will be bound to admin_site
func(model_or_iterable, admin_class, **options)
_monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site)
return register | Decorator for register function that adds an appropriate inline. | entailment |
def auto_register_inlines(admin_site, metadata_class):
""" This is a questionable function that automatically adds our metadata
inline to all relevant models in the site.
"""
inline_class = get_inline(metadata_class)
for model, admin_class_instance in admin_site._registry.items():
_monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site)
# Monkey patch the register method to automatically add an inline for this site.
# _with_inline() is a decorator that wraps the register function with the same injection code
# used above (_monkey_inline).
admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class) | This is a questionable function that automatically adds our metadata
inline to all relevant models in the site. | entailment |
def get_linked_metadata(obj, name=None, context=None, site=None, language=None):
""" Gets metadata linked from the given object. """
# XXX Check that 'modelinstance' and 'model' metadata are installed in backends
# I believe that get_model() would return None if not
Metadata = _get_metadata_model(name)
InstanceMetadata = Metadata._meta.get_model('modelinstance')
ModelMetadata = Metadata._meta.get_model('model')
content_type = ContentType.objects.get_for_model(obj)
instances = []
if InstanceMetadata is not None:
try:
instance_md = InstanceMetadata.objects.get(_content_type=content_type, _object_id=obj.pk)
except InstanceMetadata.DoesNotExist:
instance_md = InstanceMetadata(_content_object=obj)
instances.append(instance_md)
if ModelMetadata is not None:
try:
model_md = ModelMetadata.objects.get(_content_type=content_type)
except ModelMetadata.DoesNotExist:
model_md = ModelMetadata(_content_type=content_type)
instances.append(model_md)
return FormattedMetadata(Metadata, instances, '', site, language) | Gets metadata linked from the given object. | entailment |
def populate_metadata(model, MetadataClass):
""" For a given model and metadata class, ensure there is metadata for every instance.
"""
content_type = ContentType.objects.get_for_model(model)
for instance in model.objects.all():
create_metadata_instance(MetadataClass, instance) | For a given model and metadata class, ensure there is metadata for every instance. | entailment |
def __instances(self):
""" Cache instances, allowing generators to be used and reused.
This fills a cache as the generator gets emptied, eventually
reading exclusively from the cache.
"""
for instance in self.__instances_cache:
yield instance
for instance in self.__instances_original:
self.__instances_cache.append(instance)
yield instance | Cache instances, allowing generators to be used and reused.
This fills a cache as the generator gets emptied, eventually
reading exclusively from the cache. | entailment |
def _resolve_value(self, name):
""" Returns an appropriate value for the given name.
This simply asks each of the instances for a value.
"""
for instance in self.__instances():
value = instance._resolve_value(name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
# TODO: This is duplicated in meta_models. Move this to a common home.
if name in self.__metadata._meta.elements:
populate_from = self.__metadata._meta.elements[name].populate_from
if callable(populate_from):
return populate_from(None)
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from) | Returns an appropriate value for the given name.
This simply asks each of the instances for a value. | entailment |
def _get_formatted_data(cls, path, context=None, site=None, language=None):
""" Return an object to conveniently access the appropriate values. """
return FormattedMetadata(cls(), cls._get_instances(path, context, site, language), path, site, language) | Return an object to conveniently access the appropriate values. | entailment |
def _get_instances(cls, path, context=None, site=None, language=None):
""" A sequence of instances to discover metadata.
Each instance from each backend is looked up when possible/necessary.
This is a generator to eliminate unnecessary queries.
"""
backend_context = {'view_context': context }
for model in cls._meta.models.values():
for instance in model.objects.get_instances(path, site, language, backend_context) or []:
if hasattr(instance, '_process_context'):
instance._process_context(backend_context)
yield instance | A sequence of instances to discover metadata.
Each instance from each backend is looked up when possible/necessary.
This is a generator to eliminate unnecessary queries. | entailment |
def _resolve(value, model_instance=None, context=None):
""" Resolves any template references in the given value.
"""
if isinstance(value, basestring) and "{" in value:
if context is None:
context = Context()
if model_instance is not None:
context[model_instance._meta.module_name] = model_instance
value = Template(value).render(context)
return value | Resolves any template references in the given value. | entailment |
def validate(options):
""" Validates the application of this backend to a given metadata
"""
try:
if options.backends.index('modelinstance') > options.backends.index('model'):
raise Exception("Metadata backend 'modelinstance' must come before 'model' backend")
except ValueError:
raise Exception("Metadata backend 'modelinstance' must be installed in order to use 'model' backend") | Validates the application of this backend to a given metadata | entailment |
def _register_elements(self, elements):
""" Takes elements from the metadata class and creates a base model for all backend models .
"""
self.elements = elements
for key, obj in elements.items():
obj.contribute_to_class(self.metadata, key)
# Create the common Django fields
fields = {}
for key, obj in elements.items():
if obj.editable:
field = obj.get_field()
if not field.help_text:
if key in self.bulk_help_text:
field.help_text = self.bulk_help_text[key]
fields[key] = field
# 0. Abstract base model with common fields
base_meta = type('Meta', (), self.original_meta)
class BaseMeta(base_meta):
abstract = True
app_label = 'seo'
fields['Meta'] = BaseMeta
# Do we need this?
fields['__module__'] = __name__ #attrs['__module__']
self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields) | Takes elements from the metadata class and creates a base model for all backend models . | entailment |
def _add_backend(self, backend):
""" Builds a subclass model for the given backend """
md_type = backend.verbose_name
base = backend().get_model(self)
# TODO: Rename this field
new_md_attrs = {'_metadata': self.metadata, '__module__': __name__ }
new_md_meta = {}
new_md_meta['verbose_name'] = '%s (%s)' % (self.verbose_name, md_type)
new_md_meta['verbose_name_plural'] = '%s (%s)' % (self.verbose_name_plural, md_type)
new_md_meta['unique_together'] = base._meta.unique_together
new_md_attrs['Meta'] = type("Meta", (), new_md_meta)
new_md_attrs['_metadata_type'] = backend.name
model = type("%s%s"%(self.name,"".join(md_type.split())), (base, self.MetadataBaseModel), new_md_attrs.copy())
self.models[backend.name] = model
# This is a little dangerous, but because we set __module__ to __name__, the model needs tobe accessible here
globals()[model.__name__] = model | Builds a subclass model for the given backend | entailment |
def _set_seo_models(self, value):
""" Gets the actual models to be used. """
seo_models = []
for model_name in value:
if "." in model_name:
app_label, model_name = model_name.split(".", 1)
model = models.get_model(app_label, model_name)
if model:
seo_models.append(model)
else:
app = models.get_app(model_name)
if app:
seo_models.extend(models.get_models(app))
self.seo_models = seo_models | Gets the actual models to be used. | entailment |
def validate(self):
""" Discover certain illegal configurations """
if not self.editable:
assert self.populate_from is not NotSet, u"If field (%s) is not editable, you must set populate_from" % self.name | Discover certain illegal configurations | entailment |
def populate_all_metadata():
""" Create metadata instances for all models in seo_models if empty.
Once you have created a single metadata instance, this will not run.
This is because it is a potentially slow operation that need only be
done once. If you want to ensure that everything is populated, run the
populate_metadata management command.
"""
for Metadata in registry.values():
InstanceMetadata = Metadata._meta.get_model('modelinstance')
if InstanceMetadata is not None:
for model in Metadata._meta.seo_models:
populate_metadata(model, InstanceMetadata) | Create metadata instances for all models in seo_models if empty.
Once you have created a single metadata instance, this will not run.
This is because it is a potentially slow operation that need only be
done once. If you want to ensure that everything is populated, run the
populate_metadata management command. | entailment |
def populate(self):
""" Populate this list with all views that take no arguments.
"""
from django.conf import settings
from django.core import urlresolvers
self.append(("", ""))
urlconf = settings.ROOT_URLCONF
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# Collect base level views
for key, value in resolver.reverse_dict.items():
if isinstance(key, basestring):
args = value[0][0][1]
url = "/" + value[0][0][0]
self.append((key, " ".join(key.split("_"))))
# Collect namespaces (TODO: merge these two sections into one)
for namespace, url in resolver.namespace_dict.items():
for key, value in url[1].reverse_dict.items():
if isinstance(key, basestring):
args = value[0][0][1]
full_key = '%s:%s' % (namespace, key)
self.append((full_key, "%s: %s" % (namespace, " ".join(key.split("_")))))
self.sort() | Populate this list with all views that take no arguments. | entailment |
def block_splitter(data, block_size):
"""
Creates a generator by slicing ``data`` into chunks of ``block_size``.
>>> data = range(10)
>>> list(block_splitter(data, 2))
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
If ``data`` cannot be evenly divided by ``block_size``, the last block will
simply be the remainder of the data. Example:
>>> data = range(10)
>>> list(block_splitter(data, 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
If the ``block_size`` is greater than the total length of ``data``, a
single block will be generated:
>>> data = range(3)
>>> list(block_splitter(data, 4))
[[0, 1, 2]]
:param data:
Any iterable. If ``data`` is a generator, it will be exhausted,
obviously.
:param int block_site:
Desired (maximum) block size.
"""
buf = []
for i, datum in enumerate(data):
buf.append(datum)
if len(buf) == block_size:
yield buf
buf = []
# If there's anything leftover (a partial block),
# yield it as well.
if buf:
yield buf | Creates a generator by slicing ``data`` into chunks of ``block_size``.
>>> data = range(10)
>>> list(block_splitter(data, 2))
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
If ``data`` cannot be evenly divided by ``block_size``, the last block will
simply be the remainder of the data. Example:
>>> data = range(10)
>>> list(block_splitter(data, 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
If the ``block_size`` is greater than the total length of ``data``, a
single block will be generated:
>>> data = range(3)
>>> list(block_splitter(data, 4))
[[0, 1, 2]]
:param data:
Any iterable. If ``data`` is a generator, it will be exhausted,
obviously.
:param int block_site:
Desired (maximum) block size. | entailment |
def round_geom(geom, precision=None):
"""Round coordinates of a geometric object to given precision."""
if geom['type'] == 'Point':
x, y = geom['coordinates']
xp, yp = [x], [y]
if precision is not None:
xp = [round(v, precision) for v in xp]
yp = [round(v, precision) for v in yp]
new_coords = tuple(zip(xp, yp))[0]
if geom['type'] in ['LineString', 'MultiPoint']:
xp, yp = zip(*geom['coordinates'])
if precision is not None:
xp = [round(v, precision) for v in xp]
yp = [round(v, precision) for v in yp]
new_coords = tuple(zip(xp, yp))
elif geom['type'] in ['Polygon', 'MultiLineString']:
new_coords = []
for piece in geom['coordinates']:
xp, yp = zip(*piece)
if precision is not None:
xp = [round(v, precision) for v in xp]
yp = [round(v, precision) for v in yp]
new_coords.append(tuple(zip(xp, yp)))
elif geom['type'] == 'MultiPolygon':
parts = geom['coordinates']
new_coords = []
for part in parts:
inner_coords = []
for ring in part:
xp, yp = zip(*ring)
if precision is not None:
xp = [round(v, precision) for v in xp]
yp = [round(v, precision) for v in yp]
inner_coords.append(tuple(zip(xp, yp)))
new_coords.append(inner_coords)
return {'type': geom['type'], 'coordinates': new_coords} | Round coordinates of a geometric object to given precision. | entailment |
def flatten_multi_dim(sequence):
"""Flatten a multi-dimensional array-like to a single dimensional sequence
(as a generator).
"""
for x in sequence:
if (isinstance(x, collections.Iterable)
and not isinstance(x, six.string_types)):
for y in flatten_multi_dim(x):
yield y
else:
yield x | Flatten a multi-dimensional array-like to a single dimensional sequence
(as a generator). | entailment |
def cli(input, verbose, quiet, output_format, precision, indent):
"""Convert text read from the first positional argument, stdin, or
a file to GeoJSON and write to stdout."""
verbosity = verbose - quiet
configure_logging(verbosity)
logger = logging.getLogger('geomet')
# Handle the case of file, stream, or string input.
try:
src = click.open_file(input).readlines()
except IOError:
src = [input]
stdout = click.get_text_stream('stdout')
# Read-write loop.
try:
for line in src:
text = line.strip()
logger.debug("Input: %r", text)
output = translate(
text,
output_format=output_format,
indent=indent,
precision=precision
)
logger.debug("Output: %r", output)
stdout.write(output)
stdout.write('\n')
sys.exit(0)
except Exception:
logger.exception("Failed. Exception caught")
sys.exit(1) | Convert text read from the first positional argument, stdin, or
a file to GeoJSON and write to stdout. | entailment |
def _get_geom_type(type_bytes):
"""Get the GeoJSON geometry type label from a WKB type byte string.
:param type_bytes:
4 byte string in big endian byte order containing a WKB type number.
It may also contain a "has SRID" flag in the high byte (the first type,
since this is big endian byte order), indicated as 0x20. If the SRID
flag is not set, the high byte will always be null (0x00).
:returns:
3-tuple ofGeoJSON geometry type label, the bytes resprenting the
geometry type, and a separate "has SRID" flag. If the input
`type_bytes` contains an SRID flag, it will be removed.
>>> # Z Point, with SRID flag
>>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == (
... 'Point', b'\\x00\\x00\\x03\\xe9', True)
True
>>> # 2D MultiLineString, without SRID flag
>>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == (
... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False)
True
"""
# slice off the high byte, which may contain the SRID flag
high_byte = type_bytes[0]
if six.PY3:
high_byte = bytes([high_byte])
has_srid = high_byte == b'\x20'
if has_srid:
# replace the high byte with a null byte
type_bytes = as_bin_str(b'\x00' + type_bytes[1:])
else:
type_bytes = as_bin_str(type_bytes)
# look up the geometry type
geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes)
return geom_type, type_bytes, has_srid | Get the GeoJSON geometry type label from a WKB type byte string.
:param type_bytes:
4 byte string in big endian byte order containing a WKB type number.
It may also contain a "has SRID" flag in the high byte (the first type,
since this is big endian byte order), indicated as 0x20. If the SRID
flag is not set, the high byte will always be null (0x00).
:returns:
3-tuple ofGeoJSON geometry type label, the bytes resprenting the
geometry type, and a separate "has SRID" flag. If the input
`type_bytes` contains an SRID flag, it will be removed.
>>> # Z Point, with SRID flag
>>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == (
... 'Point', b'\\x00\\x00\\x03\\xe9', True)
True
>>> # 2D MultiLineString, without SRID flag
>>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == (
... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False)
True | entailment |
def dumps(obj, big_endian=True):
"""
Dump a GeoJSON-like `dict` to a WKB string.
.. note::
The dimensions of the generated WKB will be inferred from the first
vertex in the GeoJSON `coordinates`. It will be assumed that all
vertices are uniform. There are 4 types:
- 2D (X, Y): 2-dimensional geometry
- Z (X, Y, Z): 3-dimensional geometry
- M (X, Y, M): 2-dimensional geometry with a "Measure"
- ZM (X, Y, Z, M): 3-dimensional geometry with a "Measure"
If the first vertex contains 2 values, we assume a 2D geometry.
If the first vertex contains 3 values, this is slightly ambiguous and
so the most common case is chosen: Z.
If the first vertex contains 4 values, we assume a ZM geometry.
The WKT/WKB standards provide a way of differentiating normal (2D), Z,
M, and ZM geometries (http://en.wikipedia.org/wiki/Well-known_text),
but the GeoJSON spec does not. Therefore, for the sake of interface
simplicity, we assume that geometry that looks 3D contains XYZ
components, instead of XYM.
If the coordinates list has no coordinate values (this includes nested
lists, for example, `[[[[],[]], []]]`, the geometry is considered to be
empty. Geometries, with the exception of points, have a reasonable
"empty" representation in WKB; however, without knowing the number of
coordinate values per vertex, the type is ambigious, and thus we don't
know if the geometry type is 2D, Z, M, or ZM. Therefore in this case
we expect a `ValueError` to be raised.
:param dict obj:
GeoJson-like `dict` object.
:param bool big_endian:
Defaults to `True`. If `True`, data values in the generated WKB will
be represented using big endian byte order. Else, little endian.
TODO: remove this
:param str dims:
Indicates to WKB representation desired from converting the given
GeoJSON `dict` ``obj``. The accepted values are:
* '2D': 2-dimensional geometry (X, Y)
* 'Z': 3-dimensional geometry (X, Y, Z)
* 'M': 3-dimensional geometry (X, Y, M)
* 'ZM': 4-dimensional geometry (X, Y, Z, M)
:returns:
A WKB binary string representing of the ``obj``.
"""
geom_type = obj['type']
meta = obj.get('meta', {})
exporter = _dumps_registry.get(geom_type)
if exporter is None:
_unsupported_geom_type(geom_type)
# Check for empty geometries. GeometryCollections have a slightly different
# JSON/dict structure, but that's handled.
coords_or_geoms = obj.get('coordinates', obj.get('geometries'))
if len(list(flatten_multi_dim(coords_or_geoms))) == 0:
raise ValueError(
'Empty geometries cannot be represented in WKB. Reason: The '
'dimensionality of the WKB would be ambiguous.'
)
return exporter(obj, big_endian, meta) | Dump a GeoJSON-like `dict` to a WKB string.
.. note::
The dimensions of the generated WKB will be inferred from the first
vertex in the GeoJSON `coordinates`. It will be assumed that all
vertices are uniform. There are 4 types:
- 2D (X, Y): 2-dimensional geometry
- Z (X, Y, Z): 3-dimensional geometry
- M (X, Y, M): 2-dimensional geometry with a "Measure"
- ZM (X, Y, Z, M): 3-dimensional geometry with a "Measure"
If the first vertex contains 2 values, we assume a 2D geometry.
If the first vertex contains 3 values, this is slightly ambiguous and
so the most common case is chosen: Z.
If the first vertex contains 4 values, we assume a ZM geometry.
The WKT/WKB standards provide a way of differentiating normal (2D), Z,
M, and ZM geometries (http://en.wikipedia.org/wiki/Well-known_text),
but the GeoJSON spec does not. Therefore, for the sake of interface
simplicity, we assume that geometry that looks 3D contains XYZ
components, instead of XYM.
If the coordinates list has no coordinate values (this includes nested
lists, for example, `[[[[],[]], []]]`, the geometry is considered to be
empty. Geometries, with the exception of points, have a reasonable
"empty" representation in WKB; however, without knowing the number of
coordinate values per vertex, the type is ambigious, and thus we don't
know if the geometry type is 2D, Z, M, or ZM. Therefore in this case
we expect a `ValueError` to be raised.
:param dict obj:
GeoJson-like `dict` object.
:param bool big_endian:
Defaults to `True`. If `True`, data values in the generated WKB will
be represented using big endian byte order. Else, little endian.
TODO: remove this
:param str dims:
Indicates to WKB representation desired from converting the given
GeoJSON `dict` ``obj``. The accepted values are:
* '2D': 2-dimensional geometry (X, Y)
* 'Z': 3-dimensional geometry (X, Y, Z)
* 'M': 3-dimensional geometry (X, Y, M)
* 'ZM': 4-dimensional geometry (X, Y, Z, M)
:returns:
A WKB binary string representing of the ``obj``. | entailment |
def loads(string):
"""
Construct a GeoJSON `dict` from WKB (`string`).
The resulting GeoJSON `dict` will include the SRID as an integer in the
`meta` object. This was an arbitrary decision made by `geomet, the
discussion of which took place here:
https://github.com/geomet/geomet/issues/28.
In order to be consistent with other libraries [1] and (deprecated)
specifications [2], also include the same information in a `crs`
object. This isn't ideal, but the `crs` member is no longer part of
the GeoJSON standard, according to RFC7946 [3]. However, it's still
useful to include this information in GeoJSON payloads because it
supports conversion to EWKT/EWKB (which are canonical formats used by
PostGIS and the like).
Example:
{'type': 'Point',
'coordinates': [0.0, 1.0],
'meta': {'srid': 4326},
'crs': {'type': 'name', 'properties': {'name': 'EPSG4326'}}}
NOTE(larsbutler): I'm not sure if it's valid to just prefix EPSG
(European Petroluem Survey Group) to an SRID like this, but we'll
stick with it for now until it becomes a problem.
NOTE(larsbutler): Ideally, we should use URNs instead of this
notation, according to the new GeoJSON spec [4]. However, in
order to be consistent with [1], we'll stick with this approach
for now.
References:
[1] - https://github.com/bryanjos/geo/issues/76
[2] - http://geojson.org/geojson-spec.html#coordinate-reference-system-objects
[3] - https://tools.ietf.org/html/rfc7946#appendix-B.1
[4] - https://tools.ietf.org/html/rfc7946#section-4
""" # noqa
string = iter(string)
# endianness = string[0:1]
endianness = as_bin_str(take(1, string))
if endianness == BIG_ENDIAN:
big_endian = True
elif endianness == LITTLE_ENDIAN:
big_endian = False
else:
raise ValueError("Invalid endian byte: '0x%s'. Expected 0x00 or 0x01"
% binascii.hexlify(endianness.encode()).decode())
endian_token = '>' if big_endian else '<'
# type_bytes = string[1:5]
type_bytes = as_bin_str(take(4, string))
if not big_endian:
# To identify the type, order the type bytes in big endian:
type_bytes = type_bytes[::-1]
geom_type, type_bytes, has_srid = _get_geom_type(type_bytes)
srid = None
if has_srid:
srid_field = as_bin_str(take(4, string))
[srid] = struct.unpack('%si' % endian_token, srid_field)
# data_bytes = string[5:] # FIXME: This won't work for GeometryCollections
data_bytes = string
importer = _loads_registry.get(geom_type)
if importer is None:
_unsupported_geom_type(geom_type)
data_bytes = iter(data_bytes)
result = importer(big_endian, type_bytes, data_bytes)
if has_srid:
# As mentioned in the docstring above, include both approaches to
# indicating the SRID.
result['meta'] = {'srid': int(srid)}
result['crs'] = {
'type': 'name',
'properties': {'name': 'EPSG%s' % srid},
}
return result | Construct a GeoJSON `dict` from WKB (`string`).
The resulting GeoJSON `dict` will include the SRID as an integer in the
`meta` object. This was an arbitrary decision made by `geomet, the
discussion of which took place here:
https://github.com/geomet/geomet/issues/28.
In order to be consistent with other libraries [1] and (deprecated)
specifications [2], also include the same information in a `crs`
object. This isn't ideal, but the `crs` member is no longer part of
the GeoJSON standard, according to RFC7946 [3]. However, it's still
useful to include this information in GeoJSON payloads because it
supports conversion to EWKT/EWKB (which are canonical formats used by
PostGIS and the like).
Example:
{'type': 'Point',
'coordinates': [0.0, 1.0],
'meta': {'srid': 4326},
'crs': {'type': 'name', 'properties': {'name': 'EPSG4326'}}}
NOTE(larsbutler): I'm not sure if it's valid to just prefix EPSG
(European Petroluem Survey Group) to an SRID like this, but we'll
stick with it for now until it becomes a problem.
NOTE(larsbutler): Ideally, we should use URNs instead of this
notation, according to the new GeoJSON spec [4]. However, in
order to be consistent with [1], we'll stick with this approach
for now.
References:
[1] - https://github.com/bryanjos/geo/issues/76
[2] - http://geojson.org/geojson-spec.html#coordinate-reference-system-objects
[3] - https://tools.ietf.org/html/rfc7946#appendix-B.1
[4] - https://tools.ietf.org/html/rfc7946#section-4 | entailment |
def _header_bytefmt_byteorder(geom_type, num_dims, big_endian, meta=None):
"""
Utility function to get the WKB header (endian byte + type header), byte
format string, and byte order string.
"""
dim = _INT_TO_DIM_LABEL.get(num_dims)
if dim is None:
pass # TODO: raise
type_byte_str = _WKB[dim][geom_type]
srid = meta.get('srid')
if srid is not None:
# Add the srid flag
type_byte_str = SRID_FLAG + type_byte_str[1:]
if big_endian:
header = BIG_ENDIAN
byte_fmt = b'>'
byte_order = '>'
else:
header = LITTLE_ENDIAN
byte_fmt = b'<'
byte_order = '<'
# reverse the byte ordering for little endian
type_byte_str = type_byte_str[::-1]
header += type_byte_str
if srid is not None:
srid = int(srid)
if big_endian:
srid_header = struct.pack('>i', srid)
else:
srid_header = struct.pack('<i', srid)
header += srid_header
byte_fmt += b'd' * num_dims
return header, byte_fmt, byte_order | Utility function to get the WKB header (endian byte + type header), byte
format string, and byte order string. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.