max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
cadl/dataset_utils.py | muxgt/pycadl | 398 | 11135305 | """Utils for creating datasets.
"""
"""
Copyright 2017 <NAME>. See also NOTICE.md.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pickle
import numpy as np
import tensorflow as tf
from . import dft
from .utils import download_and_extract_zip, download_and_extract_tar
def create_input_pipeline(files,
batch_size,
n_epochs,
shape,
crop_shape=None,
crop_factor=1.0,
n_threads=2):
"""Creates a pipefile from a list of image files.
Includes batch generator/central crop/resizing options.
The resulting generator will dequeue the images batch_size at a time until
it throws tf.errors.OutOfRangeError when there are no more images left in
the queue.
Parameters
----------
files : list
List of paths to image files.
batch_size : int
Number of image files to load at a time.
n_epochs : int
Number of epochs to run before raising tf.errors.OutOfRangeError
shape : list
[height, width, channels]
crop_shape : list
[height, width] to crop image to.
crop_factor : float
Percentage of image to take starting from center.
n_threads : int, optional
Number of threads to use for batch shuffling
Returns
-------
TYPE
Description
"""
# We first create a "producer" queue. It creates a production line which
# will queue up the file names and allow another queue to deque the file
# names all using a tf queue runner.
# Put simply, this is the entry point of the computational graph.
# It will generate the list of file names.
# We also specify it's capacity beforehand.
producer = tf.train.string_input_producer(
files, capacity=len(files), num_epochs=n_epochs)
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys, vals = reader.read(producer)
# And then have to decode its contents as we know it is a jpeg image
imgs = tf.image.decode_jpeg(
vals, channels=3 if len(shape) > 2 and shape[2] == 3 else 0)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [
int(shape[0] / shape[1] * crop_shape[0] / crop_factor), int(
crop_shape[1] / crop_factor)
]
else:
rsz_shape = [
int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)
]
rszs = tf.image.resize_images(imgs, rsz_shape)
crops = (tf.image.resize_image_with_crop_or_pad(rszs, crop_shape[0],
crop_shape[1])
if crop_shape is not None else imgs)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files) // 100
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch(
[crops],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads)
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
def gtzan_music_speech_download(dst='gtzan_music_speech'):
"""Download the GTZAN music and speech dataset.
Parameters
----------
dst : str, optional
Location to put the GTZAN music and speech datset.
"""
path = 'http://opihi.cs.uvic.ca/sound/music_speech.tar.gz'
download_and_extract_tar(path, dst)
def gtzan_music_speech_load(dst='gtzan_music_speech'):
"""Load the GTZAN Music and Speech dataset.
Downloads the dataset if it does not exist into the dst directory.
Parameters
----------
dst : str, optional
Location of GTZAN Music and Speech dataset.
Returns
-------
Xs, ys : np.ndarray, np.ndarray
Array of data, Array of labels
"""
from scipy.io import wavfile
if not os.path.exists(dst):
gtzan_music_speech_download(dst)
music_dir = os.path.join(os.path.join(dst, 'music_speech'), 'music_wav')
music = [
os.path.join(music_dir, file_i) for file_i in os.listdir(music_dir)
if file_i.endswith('.wav')
]
speech_dir = os.path.join(os.path.join(dst, 'music_speech'), 'speech_wav')
speech = [
os.path.join(speech_dir, file_i) for file_i in os.listdir(speech_dir)
if file_i.endswith('.wav')
]
Xs = []
ys = []
for i in music:
sr, s = wavfile.read(i)
s = s / 16384.0 - 1.0
re, im = dft.dft_np(s)
mag, phs = dft.ztoc(re, im)
Xs.append((mag, phs))
ys.append(0)
for i in speech:
sr, s = wavfile.read(i)
s = s / 16384.0 - 1.0
re, im = dft.dft_np(s)
mag, phs = dft.ztoc(re, im)
Xs.append((mag, phs))
ys.append(1)
Xs = np.array(Xs)
Xs = np.transpose(Xs, [0, 2, 3, 1])
ys = np.array(ys)
return Xs, ys
def cifar10_download(dst='cifar10'):
"""Download the CIFAR10 dataset.
Parameters
----------
dst : str, optional
Directory to download into.
"""
path = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
download_and_extract_tar(path, dst)
def tiny_imagenet_load(dst='tiny_imagenet'):
"""Loads the paths to every file in the Tiny Imagenet Dataset.
Downloads the dataset if it does not exist into the dst directory.
Parameters
----------
dst : str, optional
Location of Tiny ImageNet dataset.
Returns
-------
all_files : list
List of paths to every file in the Tiny ImageNet Dataset
"""
if not os.path.exists(dst):
tiny_imagenet_download(dst)
all_files = []
all_labels = []
words = {}
with open(
os.path.join(os.path.join(dst, 'tiny-imagenet-200'), 'words.txt'),
'r') as fp:
for line in fp:
s = line.split('\t', maxsplit=1)
words.update({s[0]: s[1].strip()})
for ds_type in ['train', 'val', 'test']:
path = os.path.join(dst, 'tiny-imagenet-200')
path = os.path.join(path, ds_type)
for root, dirs, files in os.walk(path):
for f in files:
if f.endswith('JPEG'):
if ds_type == 'train':
try:
label = words[root.split('/')[-2]]
except:
print(root, f)
raise
else:
label = ''
all_files.append(os.path.join(root, f))
all_labels.append(label)
return all_files, all_labels
def tiny_imagenet_download(dst='tiny_imagenet'):
"""Download the Tiny ImageNet dataset.
Parameters
----------
dst : str, optional
Directory to download into.
"""
path = 'http://cs231n.stanford.edu/tiny-imagenet-200.zip'
download_and_extract_zip(path, dst)
def cifar10_load(dst='cifar10'):
"""Load the CIFAR10 dataset.
Downloads the dataset if it does not exist into the dst directory.
Parameters
----------
dst : str, optional
Location of CIFAR10 dataset.
Returns
-------
Xs, ys : np.ndarray, np.ndarray
Array of data, Array of labels
"""
if not os.path.exists(dst):
cifar10_download(dst)
Xs = None
ys = None
for f in range(1, 6):
cf = pickle.load(
open('%s/cifar-10-batches-py/data_batch_%d' % (dst, f), 'rb'),
encoding='LATIN')
if Xs is not None:
Xs = np.r_[Xs, cf['data']]
ys = np.r_[ys, np.array(cf['labels'])]
else:
Xs = cf['data']
ys = cf['labels']
Xs = np.swapaxes(np.swapaxes(Xs.reshape(-1, 3, 32, 32), 1, 3), 1, 2)
return Xs, ys
def dense_to_one_hot(labels, n_classes=2):
"""Convert class labels from scalars to one-hot vectors.
Parameters
----------
labels : array
Input labels to convert to one-hot representation.
n_classes : int, optional
Number of possible one-hot.
Returns
-------
one_hot : array
One hot representation of input.
"""
return np.eye(n_classes).astype(np.float32)[labels]
class DatasetSplit(object):
"""Utility class for batching data and handling multiple splits.
Attributes
----------
current_batch_idx : int
Description
images : np.ndarray
Xs of the dataset. Not necessarily images.
labels : np.ndarray
ys of the dataset.
n_classes : int
Number of possible labels
num_examples : int
Number of total observations
"""
def __init__(self, images, labels):
"""Initialize a DatasetSplit object.
Parameters
----------
images : np.ndarray
Xs/inputs
labels : np.ndarray
ys/outputs
"""
self.images = np.array(images).astype(np.float32)
if labels is not None:
self.labels = np.array(labels).astype(np.int32)
self.n_classes = len(np.unique(labels))
else:
self.labels = None
self.num_examples = len(self.images)
def next_batch(self, batch_size=100):
"""Batch generator with randomization.
Parameters
----------
batch_size : int, optional
Size of each minibatch.
Yields
------
Xs, ys : np.ndarray, np.ndarray
Next batch of inputs and labels (if no labels, then None).
"""
# Shuffle each epoch
current_permutation = np.random.permutation(range(len(self.images)))
epoch_images = self.images[current_permutation, ...]
if self.labels is not None:
epoch_labels = self.labels[current_permutation, ...]
# Then iterate over the epoch
self.current_batch_idx = 0
while self.current_batch_idx < len(self.images):
end_idx = min(self.current_batch_idx + batch_size, len(self.images))
this_batch = {
'images':
epoch_images[self.current_batch_idx:end_idx],
'labels':
epoch_labels[self.current_batch_idx:end_idx]
if self.labels is not None else None
}
self.current_batch_idx += batch_size
yield this_batch['images'], this_batch['labels']
class Dataset(object):
"""Create a dataset from data and their labels.
Allows easy use of train/valid/test splits; Batch generator.
Attributes
----------
all_idxs : list
All indexes across all splits.
all_inputs : list
All inputs across all splits.
all_labels : list
All labels across all splits.
n_classes : int
Number of labels.
split : list
Percentage split of train, valid, test sets.
test_idxs : list
Indexes of the test split.
train_idxs : list
Indexes of the train split.
valid_idxs : list
Indexes of the valid split.
"""
def __init__(self, Xs, ys=None, split=[1.0, 0.0, 0.0], one_hot=False, n_classes=1):
"""Initialize a Dataset object.
Parameters
----------
Xs : np.ndarray
Images/inputs to a network
ys : np.ndarray
Labels/outputs to a network
split : list, optional
Percentage of train, valid, and test sets.
one_hot : bool, optional
Whether or not to use one-hot encoding of labels (ys).
n_classes : int, optional
Number of classes represented in ys (used for one hot embedding).
"""
self.all_idxs = []
self.all_labels = []
self.all_inputs = []
self.train_idxs = []
self.valid_idxs = []
self.test_idxs = []
self.n_classes = n_classes
self.split = split
# Now mix all the labels that are currently stored as blocks
self.all_inputs = Xs
n_idxs = len(self.all_inputs)
idxs = range(n_idxs)
rand_idxs = np.random.permutation(idxs)
self.all_inputs = self.all_inputs[rand_idxs, ...]
if ys is not None:
self.all_labels = ys if not one_hot else dense_to_one_hot(ys, n_classes=n_classes)
self.all_labels = self.all_labels[rand_idxs, ...]
else:
self.all_labels = None
# Get splits
self.train_idxs = idxs[:round(split[0] * n_idxs)]
self.valid_idxs = idxs[len(self.train_idxs):
len(self.train_idxs) + round(split[1] * n_idxs)]
self.test_idxs = idxs[(len(self.valid_idxs) + len(self.train_idxs)):
(len(self.valid_idxs) + len(self.train_idxs)
) + round(split[2] * n_idxs)]
@property
def X(self):
"""Inputs/Xs/Images.
Returns
-------
all_inputs : np.ndarray
Original Inputs/Xs.
"""
return self.all_inputs
@property
def Y(self):
"""Outputs/ys/Labels.
Returns
-------
all_labels : np.ndarray
Original Outputs/ys.
"""
return self.all_labels
@property
def train(self):
"""Train split.
Returns
-------
split : DatasetSplit
Split of the train dataset.
"""
if len(self.train_idxs):
inputs = self.all_inputs[self.train_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.train_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
@property
def valid(self):
"""Validation split.
Returns
-------
split : DatasetSplit
Split of the validation dataset.
"""
if len(self.valid_idxs):
inputs = self.all_inputs[self.valid_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.valid_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
@property
def test(self):
"""Test split.
Returns
-------
split : DatasetSplit
Split of the test dataset.
"""
if len(self.test_idxs):
inputs = self.all_inputs[self.test_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.test_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
def mean(self):
"""Mean of the inputs/Xs.
Returns
-------
mean : np.ndarray
Calculates mean across 0th (batch) dimension.
"""
return np.mean(self.all_inputs, axis=0)
def std(self):
"""Standard deviation of the inputs/Xs.
Returns
-------
std : np.ndarray
Calculates std across 0th (batch) dimension.
"""
return np.std(self.all_inputs, axis=0)
|
python/tests/pushSAXhtml.py | balabit-deps/libxml2 | 372 | 11135312 | <filename>python/tests/pushSAXhtml.py
#!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
log = ""
class callback:
def startDocument(self):
global log
log = log + "startDocument:"
def endDocument(self):
global log
log = log + "endDocument:"
def startElement(self, tag, attrs):
global log
log = log + "startElement %s %s:" % (tag, attrs)
def endElement(self, tag):
global log
log = log + "endElement %s:" % (tag)
def characters(self, data):
global log
log = log + "characters: %s:" % (data)
def warning(self, msg):
global log
log = log + "warning: %s:" % (msg)
def error(self, msg):
global log
log = log + "error: %s:" % (msg)
def fatalError(self, msg):
global log
log = log + "fatalError: %s:" % (msg)
handler = callback()
ctxt = libxml2.htmlCreatePushParser(handler, "<foo", 4, "test.xml")
chunk = " url='tst'>b"
ctxt.htmlParseChunk(chunk, len(chunk), 0)
chunk = "ar</foo>"
ctxt.htmlParseChunk(chunk, len(chunk), 1)
ctxt=None
reference = """startDocument:startElement html None:startElement body None:startElement foo {'url': 'tst'}:error: Tag foo invalid
:characters: bar:endElement foo:endElement body:endElement html:endDocument:"""
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
src/transformers/models/ibert/quant_modules.py | dctelus/transformers | 8,028 | 11135316 | # coding=utf-8
# Copyright 2021 The I-BERT Authors (<NAME>, <NAME>, <NAME>,
# <NAME>, <NAME> - UC Berkeley) and The HuggingFace Inc. team.
# Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import decimal
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from ...utils import logging
logger = logging.get_logger(__name__)
class QuantEmbedding(nn.Module):
"""
Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`.
Args:
weight_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the quantized weight.
momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
weight_bit=8,
momentum=0.95,
quant_mode=False,
):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer("weight_scaling_factor", torch.zeros(1))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (
nn.functional.embedding(
x,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
),
None,
)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
)
emb_int = nn.functional.embedding(
x,
self.weight_integer,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
class QuantAct(nn.Module):
"""
Quantizes the given activation.
Args:
activation_bit (`int`):
Bitwidth for the quantized activation.
act_range_momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
per_channel (`bool`, *optional*, defaults to `False`):
Whether to or not use channel-wise quantization.
channel_len (`int`, *optional*):
Specify the channel length when set the *per_channel* True.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
"""
def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer("x_min", torch.zeros(1))
self.register_buffer("x_max", torch.zeros(1))
self.register_buffer("act_scaling_factor", torch.zeros(1))
self.x_min -= 1e-5
self.x_max += 1e-5
else:
raise NotImplementedError("per-channel mode is not currently supported for activation.")
def __repr__(self):
return (
f"{self.__class__.__name__}(activation_bit={self.activation_bit}, "
f"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, "
f"Act_max: {self.x_max.item():.2f})"
)
def forward(
self,
x,
pre_act_scaling_factor=None,
identity=None,
identity_scaling_factor=None,
specified_min=None,
specified_max=None,
):
x_act = x if identity is None else identity + x
# collect running stats if training
if self.training:
assert not self.percentile, "percentile mode is not currently supported for activation."
assert not self.per_channel, "per-channel mode is not currently supported for activation."
x_min = x_act.data.min()
x_max = x_act.data.max()
assert (
x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0
), "NaN detected when computing min/max of the activation"
# Initialization
if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
# exponential moving average (EMA)
# use momentum to prevent the quantized values change greatly every iteration
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
self.x_max = self.x_max * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
if not self.quant_mode:
return x_act, None
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(
self.activation_bit, x_min, x_max, per_channel=self.per_channel
)
if pre_act_scaling_factor is None:
# this is for the input quantization
quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor)
else:
quant_act_int = FixedPointMul.apply(
x,
pre_act_scaling_factor,
self.activation_bit,
self.act_scaling_factor,
identity,
identity_scaling_factor,
)
correct_output_scale = self.act_scaling_factor.view(-1)
return quant_act_int * correct_output_scale, self.act_scaling_factor
class QuantLinear(nn.Module):
"""
Quantized version of `torch.nn.Linear`. Adds quantization-specific arguments on top of `torch.nn.Linear`.
Args:
weight_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the quantized weight.
bias_bit (`int`, *optional*, defaults to `32`):
Bitwidth for the quantized bias.
per_channel (`bool`, *optional*, defaults to `False`):
Whether or not to use channel-wise quantization.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
"""
def __init__(
self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer("bias_integer", torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})"
return s
def forward(self, x, prev_act_scaling_factor=None):
if not self.quant_mode:
return nn.functional.linear(x, weight=self.weight, bias=self.bias), None
# assert that prev_act_scaling_factor is a scalar tensor
assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (
"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. "
"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer"
)
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
w_min, _ = torch.min(w_transform, dim=1, out=None)
w_max, _ = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor
)
bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
if self.bias is not None:
self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
x_int = x / prev_act_scaling_factor
return (
nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor,
bias_scaling_factor,
)
class IntGELU(nn.Module):
"""
Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`.
Args:
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "gelu" or "nonlinear" is given.
"""
def __init__(self, quant_mode=True, force_dequant="none"):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "gelu"]:
logger.info("Force dequantize gelu")
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14 # dummy integer constant
self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor**2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor**2 * self.coeff[0]
# avoid overflow
y_int = floor_ste.apply(y_int / 2**self.const)
scaling_factor = scaling_factor * 2**self.const
return y_int, scaling_factor
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return self.activation_fn(x), None
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return x_int * scaling_factor, scaling_factor
class IntSoftmax(nn.Module):
"""
Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`.
Args:
output_bit (`int`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "softmax" or "nonlinear" is given.
"""
def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "softmax"]:
logger.info("Force dequantize softmax")
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931 # -ln2
self.const = 30 # dummy integer constant
self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor**2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor**2
return z, scaling_factor
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2**self.const
return exp_int, scaling_factor
def forward(self, x, scaling_factor):
if not self.quant_mode:
return nn.functional.softmax(x, dim=-1), None
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
# Avoid overflow
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2**self.output_bit
return exp_int * scaling_factor, scaling_factor
class IntLayerNorm(nn.Module):
"""
Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`.
Args:
output_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize the layer if either "layernorm" or "nonlinear" is given.
"""
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "layernorm"]:
logger.info("Force dequantize layernorm")
self.quant_mode = False
self.register_buffer("shift", torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f"Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}")
def overflow_fallback(self, y_int):
"""
This fallback function is called when overflow is detected during training time, and adjusts the `self.shift`
to avoid overflow in the subsequent runs.
"""
self.set_shift(y_int) # adjusts `self.shift`
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y**2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return x, None
# compute sqrt of the feature dimension if it is the first run
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
# Normalization: computes mean and variance(std)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
# overflow handling in training time
if self.training:
# if overflow is detected
if var_int.max() >= 2**self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2**self.max_bit + 0.1, (
"Error detected in overflow handling: "
"`var_int` exceeds `self.max_bit` (the maximum possible bit width)"
)
# To be replaced with integer-sqrt kernel that produces the same output
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift
factor = floor_ste.apply(2**31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2**30
# scaling and shifting
bias = self.bias.data.detach() / (self.weight.data.detach())
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return x, scaling_factor
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
"""
Calculate the percentile max and min values in a given tensor
Args:
input (`torch.Tensor`):
The target tensor to calculate percentile max and min.
lower_percentile (`float`):
If 0.1, means we return the value of the smallest 0.1% value in the tensor as percentile min.
upper_percentile (`float`):
If 99.9, means we return the value of the largest 0.1% value in the tensor as percentile max.
output_tensor (`bool`, *optional*, defaults to `False`):
If True, this function returns tensors, otherwise it returns values.
Returns:
`Tuple(torch.Tensor, torch.Tensor)`: Percentile min and max value of *input*
"""
input_length = input.shape[0]
lower_index = round(input_length * (1 - lower_percentile * 0.01))
upper_index = round(input_length * upper_percentile * 0.01)
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
# lower_index += 1
else:
lower_bound = -torch.kthvalue(-input, k=lower_index).values
if not output_tensor:
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return lower_bound, upper_bound
def linear_quantize(input, scale, zero_point, inplace=False):
"""
Quantize single-precision input tensor to integers with the given scaling factor and zeropoint.
Args:
input (`torch.Tensor`):
Single-precision input tensor to be quantized.
scale (`torch.Tensor`):
Scaling factor for quantization.
zero_pint (`torch.Tensor`):
Shift for quantization.
inplace (`bool`, *optional*, defaults to `False`):
Whether to compute inplace or not.
Returns:
`torch.Tensor`: Linearly quantized value of *input* according to *scale* and *zero_point*.
"""
# reshape scale and zeropoint for convolutional weights and activation
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = scale.view(-1)
zero_point = zero_point.view(-1)
# quantized = float / scale + zero_point
if inplace:
input.mul_(1.0 / scale).add_(zero_point).round_()
return input
return torch.round(1.0 / scale * input + zero_point)
def symmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, per_channel=False):
"""
Compute the scaling factor with the given quantization range for symmetric quantization.
Args:
saturation_min (`torch.Tensor`):
Lower bound for quantization range.
saturation_max (`torch.Tensor`):
Upper bound for quantization range.
per_channel (`bool`, *optional*, defaults to `False`):
Whether to or not use channel-wise quantization.
Returns:
`torch.Tensor`: Scaling factor that linearly quantizes the given range between *saturation_min* and
*saturation_max*.
"""
# in this part, we do not need any gradient computation,
# in order to enforce this, we put torch.no_grad()
with torch.no_grad():
n = 2 ** (num_bits - 1) - 1
if per_channel:
scale, _ = torch.max(torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1)
scale = torch.clamp(scale, min=1e-8) / n
else:
scale = max(saturation_min.abs(), saturation_max.abs())
scale = torch.clamp(scale, min=1e-8) / n
return scale
class SymmetricQuantFunction(Function):
"""
Class to quantize the given floating-point values using symmetric quantization with given range and bitwidth.
"""
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
"""
Args:
x (`torch.Tensor`):
Floating point tensor to be quantized.
k (`int`):
Quantization bitwidth.
percentile_mode (`bool`):
Whether or not to use percentile calibration.
scale (`torch.Tensor`):
Pre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction
requires pre-calculated scaling factor.
Returns:
`torch.Tensor`: Symmetric-quantized value of *input*.
"""
zero_point = torch.tensor(0.0).to(scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return grad_output.clone() / scale, None, None, None, None
class floor_ste(Function):
"""
Straight-through Estimator(STE) for torch.floor()
"""
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
class round_ste(Function):
"""
Straight-through Estimator(STE) for torch.round()
"""
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
def batch_frexp(inputs, max_bit=31):
"""
Decompose the scaling factor into mantissa and twos exponent.
Args:
scaling_factor (`torch.Tensor`):
Target scaling factor to decompose.
Returns:
``Tuple(torch.Tensor, torch.Tensor)`: mantisa and exponent
"""
shape_of_input = inputs.size()
# trans the input to be a 1-d tensor
inputs = inputs.view(-1)
output_m, output_e = np.frexp(inputs.cpu().numpy())
tmp_m = []
for m in output_m:
int_m_shifted = int(
decimal.Decimal(m * (2**max_bit)).quantize(decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP)
)
tmp_m.append(int_m_shifted)
output_m = np.array(tmp_m)
output_e = float(max_bit) - output_e
return (
torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),
torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),
)
class FixedPointMul(Function):
"""
Function to perform fixed-point arithmetic that can match integer arithmetic on hardware.
Args:
pre_act (`torch.Tensor`):
Input tensor.
pre_act_scaling_factor (`torch.Tensor`):
Scaling factor of the input tensor *pre_act*.
bit_num (`int`):
Quantization bitwidth.
z_scaling_factor (`torch.Tensor`):
Scaling factor of the output tensor.
identity (`torch.Tensor`, *optional*):
Identity tensor, if exists.
identity_scaling_factor (`torch.Tensor`, *optional*):
Scaling factor of the identity tensor *identity*, if exists.
Returns:
`torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and
*identity*), whose scale is rescaled to *z_scaling_factor*.
"""
@staticmethod
def forward(
ctx,
pre_act,
pre_act_scaling_factor,
bit_num,
z_scaling_factor,
identity=None,
identity_scaling_factor=None,
):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x # noqa: E731
else:
reshape = lambda x: x.view(1, 1, -1) # noqa: E731
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / (2.0**e))
if identity is not None:
# needs addition of identity activation
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / (2.0**e1))
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None
|
docs/examples/auto-instrumentation/server_uninstrumented.py | oxeye-nikolay/opentelemetry-python | 868 | 11135339 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, request
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
BatchSpanProcessor,
ConsoleSpanExporter,
)
app = Flask(__name__)
trace.set_tracer_provider(TracerProvider())
trace.get_tracer_provider().add_span_processor(
BatchSpanProcessor(ConsoleSpanExporter())
)
@app.route("/server_request")
def server_request():
print(request.args.get("param"))
return "served"
if __name__ == "__main__":
app.run(port=8082)
|
api/users/models.py | zefixlluja/flagsmith | 111 | 11135366 | <gh_stars>100-1000
import logging
import typing
from django.conf import settings
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import AbstractUser
from django.core.mail import send_mail
from django.db import models
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import gettext_lazy as _
from environments.identities.models import Identity
from environments.models import Environment
from environments.permissions.models import (
UserEnvironmentPermission,
UserPermissionGroupEnvironmentPermission,
)
from organisations.models import (
Organisation,
OrganisationRole,
UserOrganisation,
)
from organisations.permissions.models import (
UserOrganisationPermission,
UserPermissionGroupOrganisationPermission,
)
from projects.models import (
Project,
UserPermissionGroupProjectPermission,
UserProjectPermission,
)
from users.auth_type import AuthType
from users.exceptions import InvalidInviteError
logger = logging.getLogger(__name__)
class UserManager(BaseUserManager):
"""Define a model manager for User model with no username field."""
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""Create and save a User with the given email and password."""
if not email:
raise ValueError("The given email must be set")
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
"""Create and save a regular User with the given email and password."""
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
"""Create and save a SuperUser with the given email and password."""
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(email, password, **extra_fields)
def get_by_natural_key(self, email):
# Used to allow case insensitive login
return self.get(email__iexact=email)
@python_2_unicode_compatible
class FFAdminUser(AbstractUser):
organisations = models.ManyToManyField(
Organisation, related_name="users", blank=True, through=UserOrganisation
)
email = models.EmailField(unique=True, null=False)
objects = UserManager()
username = models.CharField(unique=True, max_length=150, null=True, blank=True)
first_name = models.CharField(_("first name"), max_length=30)
last_name = models.CharField(_("last name"), max_length=150)
google_user_id = models.CharField(max_length=50, null=True, blank=True)
github_user_id = models.CharField(max_length=50, null=True, blank=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["first_name", "last_name"]
class Meta:
ordering = ["id"]
verbose_name = "Feature flag admin user"
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@property
def auth_type(self):
if self.google_user_id:
return AuthType.GOOGLE.value
if self.github_user_id:
return AuthType.GITHUB.value
return AuthType.EMAIL.value
def get_full_name(self):
if not self.first_name:
return None
return " ".join([self.first_name, self.last_name]).strip()
def join_organisation(self, invite):
organisation = invite.organisation
if invite.email.lower() != self.email.lower():
raise InvalidInviteError("Registered email does not match invited email")
self.add_organisation(organisation, role=OrganisationRole(invite.role))
invite.delete()
def is_admin(self, organisation):
return self.get_organisation_role(organisation) == OrganisationRole.ADMIN.name
def get_admin_organisations(self):
return Organisation.objects.filter(
userorganisation__user=self,
userorganisation__role=OrganisationRole.ADMIN.name,
)
def add_organisation(self, organisation, role=OrganisationRole.USER):
UserOrganisation.objects.create(
user=self, organisation=organisation, role=role.name
)
def remove_organisation(self, organisation):
UserOrganisation.objects.filter(user=self, organisation=organisation).delete()
def get_organisation_role(self, organisation):
user_organisation = self.get_user_organisation(organisation)
if user_organisation:
return user_organisation.role
def get_organisation_join_date(self, organisation):
user_organisation = self.get_user_organisation(organisation)
if user_organisation:
return user_organisation.date_joined
def get_user_organisation(self, organisation):
try:
return self.userorganisation_set.get(organisation=organisation)
except UserOrganisation.DoesNotExist:
logger.warning(
"User %d is not part of organisation %d" % (self.id, organisation.id)
)
def get_permitted_projects(self, permissions):
"""
Get all projects that the user has the given permissions for.
Rules:
- User has the required permissions directly (UserProjectPermission)
- User is in a UserPermissionGroup that has required permissions (UserPermissionGroupProjectPermissions)
- User is an admin for the organisation the project belongs to
"""
user_permission_query = Q()
group_permission_query = Q()
for permission in permissions:
user_permission_query = user_permission_query & Q(
userpermission__permissions__key=permission
)
group_permission_query = group_permission_query & Q(
grouppermission__permissions__key=permission
)
user_query = Q(userpermission__user=self) & (
user_permission_query | Q(userpermission__admin=True)
)
group_query = Q(grouppermission__group__users=self) & (
group_permission_query | Q(grouppermission__admin=True)
)
organisation_query = Q(
organisation__userorganisation__user=self,
organisation__userorganisation__role=OrganisationRole.ADMIN.name,
)
query = user_query | group_query | organisation_query
return Project.objects.filter(query).distinct()
def has_project_permission(self, permission, project):
if self.is_project_admin(project) or self.is_admin(project.organisation):
return True
return project in self.get_permitted_projects([permission])
def has_environment_permission(self, permission, environment):
if self.is_environment_admin(environment) or self.is_admin(
environment.project.organisation
):
return True
return environment in self.get_permitted_environments([permission])
def is_project_admin(self, project):
if self.is_admin(project.organisation):
return True
return (
UserProjectPermission.objects.filter(
admin=True, user=self, project=project
).exists()
or UserPermissionGroupProjectPermission.objects.filter(
group__users=self, admin=True, project=project
).exists()
)
def get_permitted_environments(self, permissions):
"""
Get all environments that the user has the given permissions for.
Rules:
- User has the required permissions directly (UserEnvironmentPermission)
- User is in a UserPermissionGroup that has required permissions (UserPermissionGroupEnvironmentPermissions)
- User is an admin for the organisation the environment belongs to
"""
user_permission_query = Q()
group_permission_query = Q()
for permission in permissions:
user_permission_query = user_permission_query & Q(
userpermission__permissions__key=permission
)
group_permission_query = group_permission_query & Q(
grouppermission__permissions__key=permission
)
user_query = Q(userpermission__user=self) & (
user_permission_query | Q(userpermission__admin=True)
)
group_query = Q(grouppermission__group__users=self) & (
group_permission_query | Q(grouppermission__admin=True)
)
organisation_query = Q(
project__organisation__userorganisation__user=self,
project__organisation__userorganisation__role=OrganisationRole.ADMIN.name,
)
project_admin_query = Q(
project__userpermission__user=self, project__userpermission__admin=True
) | Q(
project__grouppermission__group__users=self,
project__grouppermission__admin=True,
)
query = user_query | group_query | organisation_query | project_admin_query
return Environment.objects.filter(query).distinct()
def get_permitted_identities(self):
return Identity.objects.filter(
environment__in=self.get_permitted_environments(
permissions=["VIEW_ENVIRONMENT"]
)
)
@staticmethod
def send_alert_to_admin_users(subject, message):
send_mail(
subject=subject,
message=message,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=FFAdminUser._get_admin_user_emails(),
fail_silently=True,
)
@classmethod
def send_organisation_over_limit_alert(cls, organisation):
cls.send_alert_to_admin_users(
subject="Organisation over number of seats",
message="Organisation %s has used %d seats which is over their plan limit of %d "
"(plan: %s)"
% (
str(organisation.name),
organisation.num_seats,
organisation.subscription.max_seats,
organisation.subscription.plan,
),
)
@staticmethod
def _get_admin_user_emails():
return [
user["email"]
for user in FFAdminUser.objects.filter(is_staff=True).values("email")
]
def belongs_to(self, organisation_id: int) -> bool:
return organisation_id in self.organisations.all().values_list("id", flat=True)
def is_environment_admin(self, environment):
if self.is_admin(environment.project.organisation) or self.is_project_admin(
environment.project
):
return True
return (
UserEnvironmentPermission.objects.filter(
admin=True, user=self, environment=environment
).exists()
or UserPermissionGroupEnvironmentPermission.objects.filter(
group__users=self, admin=True, environment=environment
).exists()
)
def has_organisation_permission(
self, organisation: Organisation, permission_key: str
) -> bool:
if self.is_admin(organisation):
return True
return (
UserOrganisationPermission.objects.filter(
user=self, organisation=organisation, permissions__key=permission_key
).exists()
or UserPermissionGroupOrganisationPermission.objects.filter(
group__users=self,
organisation=organisation,
permissions__key=permission_key,
).exists()
)
def get_permission_keys_for_organisation(
self, organisation: Organisation
) -> typing.Iterable[str]:
user_permission = UserOrganisationPermission.objects.filter(
user=self, organisation=organisation
).first()
group_permissions = UserPermissionGroupOrganisationPermission.objects.filter(
group__users=self, organisation=organisation
)
all_permission_keys = set()
for organisation_permission in [user_permission, *group_permissions]:
if organisation_permission is not None:
all_permission_keys.update(
{
permission.key
for permission in organisation_permission.permissions.all()
}
)
return all_permission_keys
class UserPermissionGroup(models.Model):
"""
Model to group users within an organisation for the purposes of permissioning.
"""
name = models.CharField(max_length=200)
users = models.ManyToManyField(
"users.FFAdminUser", related_name="permission_groups"
)
organisation = models.ForeignKey(
Organisation, on_delete=models.CASCADE, related_name="permission_groups"
)
class Meta:
ordering = ("id",) # explicit ordering to prevent pagination warnings
def add_users_by_id(self, user_ids: list):
users_to_add = []
for user_id in user_ids:
try:
user = FFAdminUser.objects.get(
id=user_id, organisations=self.organisation
)
except FFAdminUser.DoesNotExist:
# re-raise exception with useful error message
raise FFAdminUser.DoesNotExist(
"User %d does not exist in this organisation" % user_id
)
users_to_add.append(user)
self.users.add(*users_to_add)
def remove_users_by_id(self, user_ids: list):
self.users.remove(*user_ids)
|
Cryptography/xor_cipher.py | TeacherManoj0131/HacktoberFest2020-Contributions | 256 | 11135383 | <reponame>TeacherManoj0131/HacktoberFest2020-Contributions
"""
author: <NAME>
date: 21.12.2017
class: XORCipher
This class implements the XOR-cipher algorithm and provides
some useful methods for encrypting and decrypting strings and
files.
Overview about methods
- encrypt : list of char
- decrypt : list of char
- encrypt_string : str
- decrypt_string : str
- encrypt_file : boolean
- decrypt_file : boolean
"""
class XORCipher:
def __init__(self, key=0):
"""
simple constructor that receives a key or uses
default key = 0
"""
# private field
self.__key = key
def encrypt(self, content, key):
"""
input: 'content' of type string and 'key' of type int
output: encrypted string 'content' as a list of chars
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(key, int) and isinstance(content, str)
key = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
ans = []
for ch in content:
ans.append(chr(ord(ch) ^ key))
return ans
def decrypt(self, content, key):
"""
input: 'content' of type list and 'key' of type int
output: decrypted string 'content' as a list of chars
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(key, int) and isinstance(content, list)
key = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
ans = []
for ch in content:
ans.append(chr(ord(ch) ^ key))
return ans
def encrypt_string(self, content, key=0):
"""
input: 'content' of type string and 'key' of type int
output: encrypted string 'content'
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(key, int) and isinstance(content, str)
key = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
ans = ""
for ch in content:
ans += chr(ord(ch) ^ key)
return ans
def decrypt_string(self, content, key=0):
"""
input: 'content' of type string and 'key' of type int
output: decrypted string 'content'
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(key, int) and isinstance(content, str)
key = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
ans = ""
for ch in content:
ans += chr(ord(ch) ^ key)
return ans
def encrypt_file(self, file, key=0):
"""
input: filename (str) and a key (int)
output: returns true if encrypt process was
successful otherwise false
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(file, str) and isinstance(key, int)
try:
with open(file, "r") as fin:
with open("encrypt.out", "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(line, key))
except IOError:
return False
return True
def decrypt_file(self, file, key):
"""
input: filename (str) and a key (int)
output: returns true if decrypt process was
successful otherwise false
if key not passed the method uses the key by the constructor.
otherwise key = 1
"""
# precondition
assert isinstance(file, str) and isinstance(key, int)
try:
with open(file, "r") as fin:
with open("decrypt.out", "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(line, key))
except IOError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test enrcypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
|
tests/console/commands/source/conftest.py | uda/poetry | 12,347 | 11135421 | <gh_stars>1000+
import pytest
from poetry.config.source import Source
@pytest.fixture
def source_one():
return Source(name="one", url="https://one.com")
@pytest.fixture
def source_two():
return Source(name="two", url="https://two.com")
@pytest.fixture
def source_default():
return Source(name="default", url="https://default.com", default=True)
@pytest.fixture
def source_secondary():
return Source(name="secondary", url="https://secondary.com", secondary=True)
_existing_source = Source(name="existing", url="https://existing.com")
@pytest.fixture
def source_existing():
return _existing_source
PYPROJECT_WITH_SOURCES = f"""
[tool.poetry]
name = "source-command-test"
version = "0.1.0"
description = ""
authors = ["Poetry Tester <<EMAIL>>"]
[tool.poetry.dependencies]
python = "^3.9"
[tool.poetry.dev-dependencies]
[[tool.poetry.source]]
name = "{_existing_source.name}"
url = "{_existing_source.url}"
"""
@pytest.fixture
def poetry_with_source(project_factory):
return project_factory(pyproject_content=PYPROJECT_WITH_SOURCES)
@pytest.fixture
def add_multiple_sources(
command_tester_factory, poetry_with_source, source_one, source_two
):
add = command_tester_factory("source add", poetry=poetry_with_source)
for source in [source_one, source_two]:
add.execute(f"{source.name} {source.url}")
|
video/dataset/HMDB51/raw/video_processing_nvvl.py | Arun-George-Zachariah/Hadamard-Matrix-for-hashing | 202 | 11135439 | <gh_stars>100-1000
import os
import csv
import logging
import subprocess
from os import listdir
from joblib import delayed
from joblib import Parallel
def exe_cmd(cmd):
try:
dst_file = cmd.split()[-1]
if os.path.exists(dst_file):
return "exist"
cmd = cmd.replace('(', '\(').replace(')', '\)').replace('\'', '\\\'')
output = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
logging.warning("failed: {}".format(cmd))
# logging.warning("failed: {}: {}".format(cmd, err.output.decode("utf-8"))) # detailed error
return False
return output
scr_root = 'data'
dst_root = 'nvvl_data_avi'
if not os.path.exists(dst_root):
os.makedirs(dst_root)
cmd_format = 'ffmpeg -i {} -map v:0 -c:v libx264 -crf 18 -pix_fmt yuv420p -g 5 -profile:v high {}.mp4'
#commands = []
in_parallel = False
for f in listdir(scr_root):
commands = []
sub_root = scr_root + '/'+ f +'/'
print('processing in: %s'%sub_root)
#scr_root = os.path.join(scr_root, f)
output_root_dir = os.path.join(dst_root, f)
if not os.path.exists(output_root_dir):
os.makedirs(output_root_dir)
for sub_name in listdir(sub_root):
#print('processing: %s'%sub_name)
basename = os.path.splitext(sub_name)[0]
#print(basename)
input_video_path = os.path.join(sub_root,sub_name)
#print(input_video_path)
output_root_prefix = output_root_dir+'/'+basename
#print(output_video_path)
cmd = cmd_format.format(input_video_path, output_root_prefix)
commands.append(cmd)
num_jobs = 8
logging.info("processing videos in parallel, num_jobs={}".format(num_jobs))
Parallel(n_jobs=num_jobs)(delayed(exe_cmd)(cmd) for cmd in commands)
|
openbook_hashtags/tests/tests_hashtag.py | TamaraAbells/okuna-api | 164 | 11135462 | <filename>openbook_hashtags/tests/tests_hashtag.py
import json
from django.urls import reverse
from faker import Faker
from rest_framework import status
from openbook_common.tests.helpers import make_user, make_authentication_headers_for_user, make_hashtag, \
make_fake_post_text, make_community, make_circle, make_moderation_category, make_global_moderator
from openbook_common.tests.models import OpenbookAPITestCase
from openbook_communities.models import Community
from openbook_moderation.models import ModeratedObject
fake = Faker()
class HashtagAPITests(OpenbookAPITestCase):
"""
HashtagAPITests
"""
def test_can_retrieve_hashtag(self):
"""
should be able to retrieve a hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
hashtag = make_hashtag()
hashtag_name = hashtag.name
url = self._get_url(hashtag_name=hashtag_name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertIn('name', parsed_response)
response_name = parsed_response['name']
self.assertEqual(response_name, hashtag_name)
def test_can_retrieve_foreign_user_reported_hashtag(self):
"""
should be able to retrieve a foreign user reported hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
hashtag = make_hashtag()
hashtag_name = hashtag.name
reporter = make_user()
report_category = make_moderation_category()
reporter.report_hashtag_with_name(hashtag_name=hashtag_name, category_id=report_category.pk)
url = self._get_url(hashtag_name=hashtag_name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertIn('name', parsed_response)
response_name = parsed_response['name']
self.assertEqual(response_name, hashtag_name)
def test_cant_retrieve_reported_hashtag(self):
"""
should not be able to retrieve a reported hashtag and return 403
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
hashtag = make_hashtag()
hashtag_name = hashtag.name
report_category = make_moderation_category()
user.report_hashtag_with_name(hashtag_name=hashtag_name, category_id=report_category.pk)
url = self._get_url(hashtag_name=hashtag_name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cant_retrieve_reported_and_approved_hashtag(self):
"""
should not be able to retrieve a reported and approved hashtag and return 403
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
hashtag = make_hashtag()
hashtag_name = hashtag.name
reporter = make_user()
report_category = make_moderation_category()
reporter.report_hashtag_with_name(hashtag_name=hashtag_name, category_id=report_category.pk)
global_moderator = make_global_moderator()
moderated_object = ModeratedObject.get_or_create_moderated_object_for_hashtag(hashtag=hashtag,
category_id=report_category.pk)
global_moderator.approve_moderated_object(moderated_object=moderated_object)
url = self._get_url(hashtag_name=hashtag_name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_retrieve_hashtag_with_posts_count(self):
"""
should be able to retrieve a hashtag with its posts count and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
hashtag = make_hashtag()
hashtag_name = hashtag.name
amount_of_posts = 3
for i in range(0, amount_of_posts):
user = make_user()
post_text = '#%s' % hashtag_name
user.create_public_post(text=post_text)
url = self._get_url(hashtag_name=hashtag_name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertIn('posts_count', parsed_response)
posts_count = parsed_response['posts_count']
self.assertEqual(posts_count, amount_of_posts)
def _get_url(self, hashtag_name):
return reverse('hashtag', kwargs={
'hashtag_name': hashtag_name
})
class HashtagPostsAPITests(OpenbookAPITestCase):
"""
HashtagPostsAPITests
"""
def test_retrieves_public_community_post_with_hashtag(self):
"""
should retrieve posts with a given hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_creator = make_user()
community = make_community(creator=community_creator)
post_creator = make_user()
post_creator.join_community_with_name(community_name=community.name)
hashtag = make_hashtag()
fake_post_text = make_fake_post_text() + ' and a little hashtag #%s' % hashtag.name
post_creator.create_community_post(community_name=community.name, text=fake_post_text)
url = self._get_url(hashtag_name=hashtag.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertEqual(len(parsed_response), 1)
retrieved_posts = parsed_response[0]
self.assertEqual(retrieved_posts['text'], fake_post_text)
def test_retrieves_world_circle_post_with_hashtag(self):
"""
should retrieve world circle post with a given hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
post_creator = make_user()
hashtag = make_hashtag()
fake_post_text = make_fake_post_text() + ' and a little hashtag #%s' % hashtag.name
post_creator.create_public_post(text=fake_post_text)
url = self._get_url(hashtag_name=hashtag.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertEqual(len(parsed_response), 1)
retrieved_posts = parsed_response[0]
self.assertEqual(retrieved_posts['text'], fake_post_text)
def test_does_not_retrieve_private_community_not_part_of_post_with_hashtag(self):
"""
should not retrieve a private community not part of post with a givne hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_creator = make_user()
community = make_community(creator=community_creator, type=Community.COMMUNITY_TYPE_PRIVATE)
post_creator = make_user()
community_creator.invite_user_with_username_to_community_with_name(community_name=community.name,
username=post_creator.username)
post_creator.join_community_with_name(community_name=community.name)
hashtag = make_hashtag()
fake_post_text = make_fake_post_text() + ' and a little hashtag #%s' % hashtag.name
post_creator.create_community_post(community_name=community.name, text=fake_post_text)
url = self._get_url(hashtag_name=hashtag.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertEqual(len(parsed_response), 0)
def test_does_not_retrieve_private_community_part_of_post_with_hashtag(self):
"""
should not retrieve a private community part of post with a givne hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
community_creator = make_user()
community = make_community(creator=community_creator, type=Community.COMMUNITY_TYPE_PRIVATE)
post_creator = make_user()
community_creator.invite_user_with_username_to_community_with_name(community_name=community.name,
username=post_creator.username)
post_creator.join_community_with_name(community_name=community.name)
community_creator.invite_user_with_username_to_community_with_name(community_name=community.name,
username=user.username)
user.join_community_with_name(community_name=community.name)
hashtag = make_hashtag()
fake_post_text = make_fake_post_text() + ' and a little hashtag #%s' % hashtag.name
post_creator.create_community_post(community_name=community.name, text=fake_post_text)
url = self._get_url(hashtag_name=hashtag.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertEqual(len(parsed_response), 0)
def test_does_not_retrieve_encircled_post_with_hashtag(self):
"""
should not retrieve an encircled post with a givne hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
post_creator = make_user()
circle = make_circle(creator=post_creator)
hashtag = make_hashtag()
fake_post_text = make_fake_post_text() + ' and a little hashtag #%s' % hashtag.name
post_creator.create_encircled_post(circles_ids=[circle.pk], text=fake_post_text)
url = self._get_url(hashtag_name=hashtag.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertEqual(len(parsed_response), 0)
def test_does_not_retrieve_post_from_blocked_person_with_hashtag(self):
"""
should not retrieve a post from a blocked person with a given hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
post_creator = make_user()
hashtag = make_hashtag()
fake_post_text = make_fake_post_text() + ' and a little hashtag #%s' % hashtag.name
post_creator.create_public_post(text=fake_post_text)
user.block_user_with_username(username=post_creator.username)
url = self._get_url(hashtag_name=hashtag.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertEqual(len(parsed_response), 0)
def test_does_not_retrieve_post_from_blocking_person_with_hashtag(self):
"""
should not retrieve a post from a blocking person with a given hashtag and return 200
"""
user = make_user()
headers = make_authentication_headers_for_user(user)
post_creator = make_user()
hashtag = make_hashtag()
fake_post_text = make_fake_post_text() + ' and a little hashtag #%s' % hashtag.name
post_creator.create_public_post(text=fake_post_text)
post_creator.block_user_with_username(username=user.username)
url = self._get_url(hashtag_name=hashtag.name)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parsed_response = json.loads(response.content)
self.assertEqual(len(parsed_response), 0)
def _get_url(self, hashtag_name):
return reverse('hashtag-posts', kwargs={
'hashtag_name': hashtag_name
})
|
tests/conftest.py | pagladashu38/pytest-splinter4 | 226 | 11135496 | """Configuration for pytest runner."""
pytest_plugins = "pytester"
|
caffe2/quantization/server/concat_dnnlowp_op_test.py | Hacky-DH/pytorch | 60,067 | 11135516 | <filename>caffe2/quantization/server/concat_dnnlowp_op_test.py
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPConcatOpTest(hu.HypothesisTestCase):
@given(
dim1=st.integers(0, 256),
dim2=st.integers(0, 256),
axis=st.integers(0, 1),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_concat_int(
self, dim1, dim2, axis, in_quantized, out_quantized, gc, dc
):
# X has scale 1, so exactly represented after quantization
min_ = -100
max_ = min_ + 255
X = np.round(np.random.rand(dim1, dim2) * (max_ - min_) + min_)
X = X.astype(np.float32)
if dim1 >= 1 and dim2 >= 2:
X[0, 0] = min_
X[0, 1] = max_
elif dim2 == 1:
return
# Y has scale 1/2, so exactly represented after quantization
Y = np.round(np.random.rand(dim1, dim2) * 255 / 2 - 64)
Y = Y.astype(np.float32)
if dim1 >= 1 and dim2 >= 2:
Y[0, 0] = -64
Y[0, 1] = 127.0 / 2
Output = collections.namedtuple("Output", ["Z", "op_type", "engine"])
outputs = []
op_engine_list = [
("Concat", ""),
("Concat", "DNNLOWP"),
("Int8Concat", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize_x = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
quantize_y = core.CreateOperator(
"Quantize", ["Y"], ["Y_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_x, quantize_y])
concat = core.CreateOperator(
op_type,
["X_q", "Y_q"] if do_quantize else ["X", "Y"],
["Z_q" if do_dequantize else "Z", "split"],
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
axis=axis,
)
net.Proto().op.extend([concat])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Z_q"], ["Z"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("Y").feed(Y, device_option=gc)
self.ws.create_blob("split")
self.ws.run(net)
outputs.append(
Output(Z=self.ws.blobs["Z"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
|
scripts/sg-toolbox/SG-Font-Check-Metrics.py | tphinney/science-gothic | 104 | 11135561 | <filename>scripts/sg-toolbox/SG-Font-Check-Metrics.py
#FLM: Check: Metric links
# ----------------------------------------
# (C) <NAME>, 2019 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#-----------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies -----------------
import string
import fontlab as fl6
from typerig.proxy import pFont, pGlyph
def output(msg_str, glyph_list):
result_string = '/'+' /'.join(sorted(glyph_list)) if len(glyph_list) else None
print '%s: %s\n' %(msg_str, result_string)
# - Init ------------------------------------------------
app_version = '0.02'
app_name = '[SG] Check: Metric links'
font = pFont()
process_glyphs = font.pGlyphs()
glyphs_left = []
glyphs_right = []
glyphs_search = set()
# -- Config
left_search = 'l('
right_search = 'r('
layer_criteria =' S'
check_layers = [layer for layer in font.masters() if layer_criteria in layer]
# - Process --------------------------------------------
print '%s %s\n' %(app_name, app_version) + '-'*30
# - Decompose mixed reference glyphs
for work_glyph in process_glyphs:
for layerName in check_layers:
lsb_eq, rsb_eq = work_glyph.getSBeq(layerName)
if left_search in lsb_eq or right_search in lsb_eq:
glyphs_left.append(work_glyph.name)
break
if left_search in rsb_eq or right_search in rsb_eq:
glyphs_right.append(work_glyph.name)
break
# - Output
output('Glyphs LSB', list(set(glyphs_left)))
output('Glyphs RSB', list(set(glyphs_right)))
# - Finish --------------------------------------------
print 'DONE.'
|
igibson/examples/demo/vr_demos/test/vr_scroll_wrap_text_test.py | NishanthJKumar/iGibson | 360 | 11135585 | """ This VR hand dexterity benchmark allows the user to interact with many types of objects
and interactive objects, and provides a good way to qualitatively measure the dexterity of a VR hand.
You can use the left and right controllers to start/stop/reset the timer,
as well as show/hide its display. The "overlay toggle" action and its
corresponding button index mapping can be found in the vr_config.yaml file in the igibson folder.
"""
import os
import numpy as np
import pybullet as p
import pybullet_data
import igibson
from igibson.objects.articulated_object import ArticulatedObject
from igibson.objects.ycb_object import YCBObject
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings
from igibson.robots.behavior_robot import BehaviorRobot
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
# Objects in the benchmark - corresponds to Rs kitchen environment, for range of items and
# transferability to the real world
# Note: the scene will automatically load in walls/ceilings/floors in addition to these objects
benchmark_names = [
"bottom_cabinet",
"countertop",
"dishwasher",
"door",
"fridge",
"microwave",
"oven",
"sink",
"top_cabinet",
"trash_can",
]
# Set to true to print Simulator step() statistics
PRINT_STATS = False
# Set to true to use gripper instead of VR hands
USE_GRIPPER = False
# HDR files for PBR rendering
hdr_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_02.hdr")
hdr_texture2 = os.path.join(igibson.ig_dataset_path, "scenes", "background", "probe_03.hdr")
light_modulation_map_filename = os.path.join(
igibson.ig_dataset_path, "scenes", "Rs_int", "layout", "floor_lighttype_0.png"
)
background_texture = os.path.join(igibson.ig_dataset_path, "scenes", "background", "urban_street_01.jpg")
SYMBOL_LIST = [l for l in ".,:;!?()+-=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"]
def gen_rand_string():
"""
Generates random string of random length to test wrapping and scrolling.
"""
letter_num = np.random.randint(500)
text = "".join(np.random.choice(SYMBOL_LIST) for i in range(letter_num))
return text
def main():
# VR rendering settings
vr_rendering_settings = MeshRendererSettings(
optimized=True,
fullscreen=False,
env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True,
enable_pbr=True,
msaa=True,
light_dimming_factor=1.0,
)
s = Simulator(mode="vr", rendering_settings=vr_rendering_settings)
scene = InteractiveIndoorScene(
"Rs_int", load_object_categories=benchmark_names, load_room_types=["kitchen", "lobby"]
)
# scene.load_object_categories(benchmark_names)
s.import_ig_scene(scene)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
vr_agent = BehaviorRobot(s, use_gripper=USE_GRIPPER, normal_color=True)
# Move VR agent to the middle of the kitchen
s.set_vr_start_pos(start_pos=[0, 2.1, 0], vr_height_offset=-0.02)
# Mass values to use for each object type - len(masses) objects will be created of each type
masses = [1, 5, 10]
# List of objects to load with name: filename, type, scale, base orientation, start position, spacing vector and spacing value
obj_to_load = {
"mustard": ("006_mustard_bottle", "ycb", 1, (0.0, 0.0, 0.0, 1.0), (0.0, 1.6, 1.18), (-1, 0, 0), 0.15),
"marker": ("040_large_marker", "ycb", 1, (0.0, 0.0, 0.0, 1.0), (1.5, 2.6, 0.92), (0, -1, 0), 0.15),
"can": ("005_tomato_soup_can", "ycb", 1, (0.0, 0.0, 0.0, 1.0), (1.7, 2.6, 0.95), (0, -1, 0), 0.15),
"drill": ("035_power_drill", "ycb", 1, (0.0, 0.0, 0.0, 1.0), (1.5, 2.2, 1.15), (0, -1, 0), 0.2),
"small_jenga": (
"jenga/jenga.urdf",
"pb",
1,
(0.000000, 0.707107, 0.000000, 0.707107),
(-0.9, 1.6, 1.18),
(-1, 0, 0),
0.1,
),
"large_jenga": (
"jenga/jenga.urdf",
"pb",
2,
(0.000000, 0.707107, 0.000000, 0.707107),
(-1.3, 1.6, 1.31),
(-1, 0, 0),
0.15,
),
"small_duck": (
"duck_vhacd.urdf",
"pb",
1,
(0.000000, 0.000000, 0.707107, 0.707107),
(-1.8, 1.95, 1.12),
(1, 0, 0),
0.15,
),
"large_duck": (
"duck_vhacd.urdf",
"pb",
2,
(0.000000, 0.000000, 0.707107, 0.707107),
(-1.95, 2.2, 1.2),
(1, 0, 0),
0.2,
),
"small_sphere": (
"sphere_small.urdf",
"pb",
1,
(0.000000, 0.000000, 0.707107, 0.707107),
(-0.5, 1.63, 1.15),
(-1, 0, 0),
0.15,
),
"large_sphere": (
"sphere_small.urdf",
"pb",
2,
(0.000000, 0.000000, 0.707107, 0.707107),
(-0.5, 1.47, 1.15),
(-1, 0, 0),
0.15,
),
}
for name in obj_to_load:
fpath, obj_type, scale, orn, pos, space_vec, space_val = obj_to_load[name]
for i in range(len(masses)):
if obj_type == "ycb":
handle = YCBObject(fpath, scale=scale)
elif obj_type == "pb":
handle = ArticulatedObject(fpath, scale=scale)
s.import_object(handle, use_pbr=False, use_pbr_mapping=False)
# Calculate new position along spacing vector
new_pos = (
pos[0] + space_vec[0] * space_val * i,
pos[1] + space_vec[1] * space_val * i,
pos[2] + space_vec[2] * space_val * i,
)
handle.set_position(new_pos)
handle.set_orientation(orn)
p.changeDynamics(handle.body_id, -1, mass=masses[i])
# Text position/size is described in percentage of axes in screen space
wrap_scroll_text = s.add_vr_overlay_text(
text_data=gen_rand_string(), font_size=70, font_style="Bold", color=[0, 0, 0]
)
# Main simulation loop
while True:
s.step(print_stats=PRINT_STATS)
r_toggle = s.query_vr_event("right_controller", "overlay_toggle")
l_toggle = s.query_vr_event("left_controller", "overlay_toggle")
# Overlay toggle action on right controller is used to start/stop timer
if r_toggle:
wrap_scroll_text.set_text(gen_rand_string())
if l_toggle:
s.set_hud_show_state(not s.get_hud_show_state())
scroll_dir = s.get_scroll_input()
if scroll_dir > -1:
wrap_scroll_text.scroll_text(up=scroll_dir)
# Update VR agent
vr_agent.apply_action()
s.disconnect()
if __name__ == "__main__":
main()
|
setup.py | mohamad-amin/falkon | 130 | 11135604 | import os
import os.path as osp
from typing import Any, Tuple, List
import numpy
from setuptools import setup, find_packages, Extension
try:
import torch
except ImportError:
raise ImportError("PyTorch must be pre-installed before installing Falkon.")
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME, CppExtension
WITH_CUDA = torch.cuda.is_available() and CUDA_HOME is not None
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
WITH_CYTHON = False
else:
WITH_CYTHON = True
CURRENT_DIR = "." # osp.dirname(__file__)
def get_version(root_dir):
with open(os.path.join(root_dir, 'VERSION')) as version_file:
version = version_file.read().strip()
return version
def parallel_backend():
# https://github.com/suphoff/pytorch_parallel_extension_cpp/blob/master/setup.py
from torch.__config__ import parallel_info
parallel_info_string = parallel_info()
parallel_info_array = parallel_info_string.splitlines()
backend_lines = [line for line in parallel_info_array if line.startswith('ATen parallel backend:')]
if len(backend_lines) != 1:
return None
backend = backend_lines[0].rsplit(': ')[1]
return backend
def parallel_extra_compile_args():
backend = parallel_backend()
if (backend == 'OpenMP'):
return ['-DAT_PARALLEL_OPENMP', '-fopenmp']
elif (backend == 'native thread pool'):
return ['-DAT_PARALLEL_NATIVE']
elif (backend == 'native thread pool and TBB'):
return ['-DAT_PARALLEL_NATIVE_TBB']
return []
def torch_version():
version = torch.__version__
split_version = version.split(".")
# With torch 1.10.0 the version 'number' include CUDA version (e.g. '1.10.0+cu102').
# Here we remove the CUDA version.
for i in range(len(split_version)):
if '+' in split_version[i]:
split_version[i] = split_version[i].split('+')[0]
return [int(v) for v in split_version]
def torch_version_macros():
int_version = torch_version()
return [('TORCH_VERSION_MAJOR', int_version[0]),
('TORCH_VERSION_MINOR', int_version[1]),
('TORCH_VERSION_PATCH', int_version[2])]
def get_extensions():
extensions = []
torch_v = torch_version()
# All C/CUDA routines are compiled into a single extension
extension_cls = CppExtension
ext_dir = osp.join(CURRENT_DIR, 'falkon', 'csrc')
ext_files = [
'pytorch_bindings.cpp', 'cpu/sparse_norm.cpp'
]
if torch_v[0] >= 1 and torch_v[1] >= 7:
ext_files.append('cpu/square_norm_cpu.cpp')
compile_args = {'cxx': parallel_extra_compile_args()}
link_args = []
macros: List[Tuple[str, Any]] = torch_version_macros()
libraries = []
if WITH_CUDA:
extension_cls = CUDAExtension
ext_files.extend([
'cuda/vec_mul_triang_cuda.cu', 'cuda/spspmm_cuda.cu', 'cuda/multigpu_potrf.cu',
'cuda/mul_triang_cuda.cu', 'cuda/lauum.cu', 'cuda/csr2dense_cuda.cu',
'cuda/copy_transpose_cuda.cu', 'cuda/copy_triang_cuda.cu',
])
if torch_v[0] >= 1 and torch_v[1] >= 7:
ext_files.append('cuda/square_norm_cuda.cu')
macros.append(('WITH_CUDA', None))
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ')
nvcc_flags += ['--expt-relaxed-constexpr', '--expt-extended-lambda']
compile_args['nvcc'] = nvcc_flags
link_args += ['-lcusparse', '-l', 'cusparse',
'-lcublas', '-l', 'cublas',
'-lcusolver', '-l', 'cusolver']
libraries.extend(['cusolver', 'cublas', 'cusparse'])
extensions.append(
extension_cls(
"falkon.c_ext",
sources=[osp.join(ext_dir, f) for f in ext_files],
include_dirs=[ext_dir],
define_macros=macros,
extra_compile_args=compile_args,
extra_link_args=link_args,
libraries=libraries,
)
)
# Cyblas helpers
file_ext = '.pyx' if WITH_CYTHON else '.c'
cyblas_compile_args = [
'-shared', '-fPIC', '-fopenmp', '-O3', '-Wall', '-std=c99']
cyblas_ext = [Extension('falkon.la_helpers.cyblas',
sources=[osp.join('falkon', 'la_helpers', 'cyblas' + file_ext)],
include_dirs=[numpy.get_include()],
extra_compile_args=cyblas_compile_args,
#define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
extra_link_args=['-fPIC', '-fopenmp', '-s'])]
if WITH_CYTHON:
cyblas_ext = cythonize(cyblas_ext)
extensions.extend(cyblas_ext)
return extensions
# Requirements -- TODO: We also have requirements.txt files lying around which are out of sync.
install_requires = [
'torch>=1.9',
'scipy',
'numpy',
'scikit-learn',
'psutil',
'dataclasses;python_version<"3.7"',
]
test_requires = [
'pandas',
'pytest',
'pytest-cov',
'coverage',
'codecov',
'flake8',
]
doc_requires = [
'pandas',
'numpydoc',
'sphinx',
'nbsphinx',
'sphinx-rtd-theme',
'matplotlib',
'jupyter',
'ghp-import',
# There is also pandoc
]
extras = {
'test': test_requires,
'doc': doc_requires
}
setup(
name="falkon",
version=get_version("falkon"),
description="Fast, GPU enabled, approximate kernel ridge regression solver.",
python_requires='~=3.6',
setup_requires=[
# Setuptools 18.0 properly handles Cython extensions.
'setuptools>=18.0',
'numpy',
],
tests_require=test_requires,
extras_require=extras,
ext_modules=get_extensions(),
packages=find_packages(),
cmdclass={
'build_ext': BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=True)
},
install_requires=install_requires,
include_package_data=True, # Since we have a MANIFEST.in this will take all from there.
)
|
common/src/stack/command/stack/commands/remove/vm/storage/__init__.py | sammeidinger/stack | 123 | 11135624 | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
import stack.commands
from stack.exception import ParamError
class Command(stack.commands.remove.vm.Command):
"""
Mark a virtual machine's disk for deletion.
<arg type='string' name='host' optional='1'>
A single hostname to remove a disk from.
</arg>
<param type='string' name='disk' optional='1'>
The name of the disk to remove from the virtual machine.
</param>
<example cmd='remove vm storage virtual-backend-0-1 disk=sda'>
Mark disk sda for deletion from virtual-backend-0-1. Upon running
sync vm, the disk will be deleted on the hypervisor and removed from
the VM config.
</example>
"""
def run(self, params, args):
# We only want a single host argument
host = self.valid_vm_args([self.getSingleHost(args)])
# Get the disk names for the host
host_disks = [disk['Name'] for disk in self.call('list.vm.storage', host)]
(disk_name, ) = self.fillParams([
('disk', None, True)
])
if disk_name not in host_disks:
raise ParamError(self, 'disk', f'{disk_name} not a defined disk on {host[0]}')
vm_id = self.vm_id_by_name(host)
self.db.execute(
"""
UPDATE virtual_machine_disks
SET disk_delete = True
WHERE virtual_machine_disks.virtual_machine_id = %s
AND virtual_machine_disks.disk_name = %s
""",
(vm_id, disk_name)
)
|
ck/repo/module/math.frontier/module.py | krai/ck | 480 | 11135657 | <filename>ck/repo/module/math.frontier/module.py
#
# Collective Knowledge (detect frontier for multi-objective optimizations (such as execution time vs energy vs code size vs faults vs price ...))
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: <NAME>, <EMAIL>, http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# Filter frontier (leave only best points) - my own greedy and probably not very optimal algorithm
#
# TBD: should leave only a few points otherwise can be quickly too many
# particularly if more than 2 dimensions (performance, energy, size, faults)
#
# Note, that we minimize all dimensions. Otherwise, provide reversed dimension.
#
# HELP IS APPRECIATED!
def xfilter(i):
"""
Input: {
points - dict with points, each has dict with optimization dimensions (should have the same names)
(frontier_keys) - list of keys to leave only best points during multi-objective autotuning
(multi-objective optimization)
If omited, use all keys
(reverse_keys) - list of values associated with above keys. If True, reverse sorting for a give key
(by default descending) - can't be used without "frontier_keys" due to lack of order in python dicts2
(margins) - list of margins when comparing values, i.e. Vold/Vnew < this number (such as 1.10 instead of 1).
will be used if !=None
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
points - filtered points!
deleted_points - deleted points
}
"""
oo=i.get('out','')
points=i['points']
lp=len(points)
dpoints={}
uids=list(points.keys())
if oo=='con':
ck.out('Original number of points: '+str(lp))
fk=i.get('frontier_keys',[])
fkr=i.get('reverse_keys',[])
lrk=len(fkr)
mar=i.get('margins',[])
lmar=len(mar)
if lp>1:
for l0 in range(0,lp,1):
ul0=uids[l0]
if ul0!='':
p0=points[ul0]
# Check if there is at least one point with all better dimensions
keep=True
for l1 in range(0,lp,1):
ul1=uids[l1]
if ul1!='' and ul1!=ul0:
p1=points[ul1]
better=True
if len(fk)>0:
ks=fk
else:
ks=list(p0.keys())
for dim in range(0, len(ks)):
d0=ks[dim]
v0=p0.get(d0, None)
if v0!=None and v0!='':
v0=float(v0)
v1=p1.get(d0,None)
if v1!=None and v1!='':
v1=float(v1)
if v1==0: v1=v0/10
if v1==0: v1=0.01
m=1.0
if dim<lmar and mar[dim]!=None:
m=mar[dim]
if dim<lrk and fkr[dim]==True:
if v1==0 or (v0/v1)>m:
better=False
break
elif v0==0 or (v0/v1)<m:
better=False
break
if better:
keep=False
break
if not keep:
dpoints[ul0]=points[ul0]
del(points[ul0])
uids[l0]=''
lp=len(points)
if oo=='con':
ck.out('Number of points after filtering: '+str(lp))
return {'return':0, 'points':points, 'deleted_points':dpoints}
##############################################################################
# Leave points on 2D frontier
# TBD: need to redesign to support any number of dimensions
def filter_2d(i):
"""
Input: {
points (list) : [{"dim1":value11, "dim2":value12, ...},
{"dim1":value21, "dim2":value22, ...}]
frontier_keys (list) : ["dim1", "dim2"] - which keys to use for the frontier
(reverse_keys) (list) : ["dim2"] - which keys to reverse (smaller is better)
(plot) (str) : if "yes", plot graph with a frontier using matplotlib
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
frontier [list] - list of points on a 2D frontier
}
"""
plot=i.get('plot','')=='yes'
points=i['points']
frontier_keys=i['frontier_keys']
assert len(frontier_keys)==2, 'must be 2 frontier keys'
reverse_keys=i.get('reverse_keys',[])
kx=frontier_keys[0]
ky=frontier_keys[1]
revx=True if kx in reverse_keys else False
revy=True if ky in reverse_keys else False
if len(points)<3:
frontier=points
else:
# Sort by 0 dim
spoints=sorted(points, key=lambda x: x.get(kx,0), reverse=revx)
frontier=[spoints[0]]
for p in spoints[1:]:
if revy:
if p.get(ky,0)>=frontier[-1].get(ky,0):
frontier.append(p)
elif p.get(ky,0)<=frontier[-1].get(ky,0):
frontier.append(p)
if plot:
import matplotlib.pyplot as plt
x1=[]
y1=[]
for v in points:
x1.append(v.get(kx,0))
y1.append(v.get(ky,0))
plt.scatter(x1,y1)
x2=[]
y2=[]
for v in frontier:
x2.append(v.get(kx,0))
y2.append(v.get(ky,0))
plt.plot(x2,y2)
plt.show()
return {'return':0, 'frontier':frontier}
|
DaPy/matlib.py | huihui7987/DaPy | 552 | 11135666 | <reponame>huihui7987/DaPy<gh_stars>100-1000
from array import array
from .core import Matrix, SeriesSet, Series
from .core import nan, inf
from .core import range, filter, zip, range
from .core import is_math, is_seq, is_iter, is_value
from .core.base import STR_TYPE
from .core.base.IndexArray import SortedIndex
from collections import namedtuple, deque, Iterable, deque, Counter
from itertools import repeat, chain
from warnings import warn
from functools import reduce
from math import sqrt, exp as math_exp
__all__ = ['dot', 'multiply', 'exp', 'zeros', 'ones', 'C', 'P',
'cov', 'corr', 'frequency', 'quantiles', '_sum', '_max',
'distribution','describe', 'mean', 'diag', 'log']
def P(n, k):
'''"k" is for permutation numbers.
A permutation is an ordered sequence of elements selected from a given
finite set, without repetitions, and not necessarily using all elements
of the given set.
Formula
-------
n!
P(n, k) = ----------
(n - k)!
'''
if k == 0 or n == k:
return 1
upper = reduce(multiply, range(1, 1+n))
down = reduce(multiply, range(1, 1+ n - k))
return float(upper / down)
def C(n, k):
'''"C" is for combination numbers.
A combination number is an un-ordered collection of distinct elements,
usually of a prescribed size and taken from a given set.
Formula
-------
n!
C(n, k) = -----------
k!(n - k)!
'''
if k == 0 or n == k:
return 1
upper = reduce(multiply, range(1, 1+n))
left = reduce(multiply, range(1, 1+k))
right = reduce(multiply, range(1, 1+ n - k))
return float(upper / (left * right))
def add(m1, m2):
if hasattr(m1, '__add__'):
return m1 + m2
if is_seq(m1):
return Matrix(m1) + m2
raise TypeError('add() expectes elements which can add.')
def _abs(data):
if hasattr(data, 'shape'):
new = [0] * data.shape[0]
if hasattr(data, 'tolist'):
data = data.tolist()
for i, line in enumerate(data):
try:
new[i] = Series(map(abs, line))
except TypeError:
new[i] = abs(line)
return Matrix(new)
if is_math(data):
return abs(data)
if is_iter(data):
return Series(map(_abs, data))
raise TypeError('expects an iterable or numeric for exp(), got %s'%type(other))
def sign(x):
'''sign([-2, -3, 2, 2, 1]) -> [-1, -1, 1, 1, 1])'''
if not hasattr(x, '__abs__') or not hasattr(x, '__div__'):
x = Matrix(x)
return x / abs(x)
def multiply(m1, m2):
if is_math(m1) and is_math(m2):
return m1 * m2
if isinstance(m1, Matrix) or isinstance(m2, Matrix):
return m1 * m2
return Matrix(m1) * m2
def dot(matrix_1, matrix_2):
if hasattr(matrix_1, 'dot'):
return matrix_1.dot(matrix_2)
try:
col_size_1 = len(matrix_1[0])
col_size_2 = len(matrix_2[0])
line_size_1 = len(matrix_1)
line_size_2 = len(matrix_2)
columns = None
except TypeError:
raise TypeError('unsupported operation dot(), with type'+\
' %s and ,' % type(matrix_1) +\
'%s.' % type(matrix_2))
if col_size_1 != line_size_2:
raise ValueError('shapes (%d, %d) '%(line_size_1, col_size_1)+\
'and (%d, %d) not aligned.'%(line_size_2, col_size_2))
new_ = list()
for i in range(line_size_1):
new_line = list()
for pos in range(col_size_2):
sumup = sum(matrix_1[i][j]*matrix_2[j][pos]
for j in range(col_size_1))
new_line.append(sumup)
new_.append(new_line)
return Matrix(new_)
def exp(other):
if hasattr(other, 'shape'):
new = [0] * other.shape[0]
for i, line in enumerate(other):
new[i] = map(math_exp, line)
return Matrix(new)
if is_math(other):
return math_exp(other)
if is_iter(other):
new_ = list()
for item in other:
new_.append(exp(item))
return new_
raise TypeError('expects an iterable or numeric for exp(), got %s'%type(other))
def create_mat(shape, num):
return Matrix().make(shape[0], shape[1], num)
def cumsum(series):
series, new = Series(series), Series()
init = 0
for value in series:
init += value
new.append(init)
return new
def count(df, value, axis=None):
assert axis in (None, 0, 1)
if axis == None:
if hasattr(container, 'count'):
return container.count(value)
if axis == 1:
if hasattr(container, 'count'):
return c
def zeros(shape):
return create_mat(shape, 0)
def ones(shape):
return create_mat(shape, 1)
def diag(values):
return Matrix().make_eye(len(values), values)
def diff(seq, lag=1):
return Series(seq).diff(lag)
def log(data, base=2.71828183):
if is_seq(data):
if is_seq(data[0]):
return [map(log, record, [base] * len(record)) for record in data]
return list(map(log, data, [base] * len(data)))
return math.log(data, base)
def boxcox(value, lambda_=1, a=0, k=1):
if lambda_ == 0:
return log(value)
return ((value + a) ** lambda_ - 1) / (k + lambda_)
def _sum(data, axis=None):
'''Sum of sequence elements.
Parameters
----------
data : array_like
elements to sum.
axis : None, 0, 1
determine how to summary this data.
None - summary all elements into one value,
0 - summary the elements in each variable,
1 - summary the elements in each record.
Return
------
Sum : float or int
a number of sum.
Examples
--------
>>> dp.sum([0.5, 1.5])
2.0
>>> dp.sum([[0.5, 0.7],
[0.2, 1.5]])
2.9
>>> dp.sum([[0, 1],
[0, 5]], axis=0) # sum of each record
[1, 5]
>>> dp.sum([[0, 1],
[0, 5]], axis=1) # sum of each variable
[0, 6]
'''
if hasattr(data, 'sum'):
return data.sum(axis)
if is_math(data):
return data
if is_iter(data) and not is_seq(data):
data = tuple(data)
if axis is None:
if any(map(is_value, data)) is False:
return _sum(map(_sum, data))
return sum(data)
if axis == 1:
return Matrix(map(sum, data)).T
if axis == 0:
return _sum([line for line in zip(*data)], axis=1)
def _max(data, axis=None):
data = SeriesSet(data)
if axis is None:
return max(map(max, data))
if axis == 0:
return map(max, data.values())
if axis == 1:
return map(max, data)
def median(data):
'''median value of sequence data'''
sub_lenth = len(data) // 2 + 1
sort_data = sorted(data)
med = sort_data[sub_lenth]
if len(data) % 2 == 0:
return (med + sort_data[sub_lenth + 1]) / 2.0
return med
def mean(data, axis=None):
'''average of sequence elements.
Parameters
----------
data : array_like
elements to average.
axis : None, 0, 1
determine how to summary this data.
None - average value of all elements into one value,
0 - average value of the elements in each variable,
1 - average value of the elements in each record.
Return
------
number : number or numbers in list
the mean of data
Examples
--------
>>> a = [[1, 2], [3, 4]]
>>> dp.mean(a)
2.5
>>> dp.mean([[0.5, 0.7],
[0.2, 1.5]])
0.725
>>> dp.mean([[0, 1],
[0, 5]], axis=1) # mean of each record
[0.5, 2.5]
>>> dp.mean([[0, 1],
[0, 5]], axis=0) # mean of each variable
[0.0, 3.0]
'''
if axis is None:
if hasattr(data, 'shape'):
return float(_sum(data, None)) / _sum(data.shape)
if is_seq(data) or isinstance(data, Series):
if is_seq(data[0]):
return float(_sum(data, axis)) / (len(data[0]) + len(data))
return float(_sum(data, axis)) / len(data)
if hasattr(data, 'shape'):
size = float(data.shape[axis])
elif axis == 1:
size = float(len(data))
else:
size = float(len(data[0]))
result = Matrix([value / size for value in _sum(data, axis)]).T
if result.shape.Ln == result.shape.Col == 1:
return result[0][0]
return result
def std(series):
return Series(series).std()
def cov(x, y=None, **kwrds):
'''
formula: cov(x,y) = E(xy) - E(x)E(y)
'''
# high level data structure
assert is_iter(x) and is_iter(y), 'input X and Y must be containers'
if hasattr(x, 'shape') and x.shape[1] != 1 or y is None:
if hasattr(x, 'tolist'):
x = x.tolist()
size = len(x)
covX = [[0] * size for t in range(size)]
for i, x_1 in enumerate(x):
for j, x_2 in enumerate(x):
cov_num = cov(x_1, x_2)
covX[i][j] = cov_num
covX[j][i] = cov_num
return Matrix(covX)
# sequence level data structure
try:
X, Y = array('f', x), array('f', y)
except TypeError:
X, Y = array('f'), array('f')
for x, y in zip(x, y):
if is_math(x) and is_math(y):
X.append(x)
Y.append(y)
assert len(X) == len(Y), 'two variables have different lenth.'
size = float(len(X))
if size == 0:
warn('x and y has no efficient numbers.')
return 0
Ex, Ey = kwrds.get('Ex', None), kwrds.get('Ey', None)
if not Ex:
Ex = sum(X) / size
if not Ey:
Ey = sum(Y) / size
return sum(((x-Ex) * (y-Ey) for x, y in zip(X, Y))) / (size - 1)
def corr(x, y, method='pearson'):
'''calculate the correlation between X and Y
Users can calculate the correlation between two sequence of numerical
data with three possible methods, which are Pearson Correlation
(K. Pearson, 1990), Spearmsn Correlation (Kendall M, 1990) and
Kendall Correlation (Kendall M, 1990). According to some researches,
Pearson is the best method when data is fully distributed with Gauss
Distribution. When significant nonlinear impacts or movement bias
appear in the data, Spearman and Kendall correlations are better.
Formulas
--------
cov(x,y)
r = -----------------, Pearson Correlation
std(x) * std(y)
6 * sum(di^2)
r = 1 - ----------------, Spearman Correlation
n(n^2 - 1)
Nc - Nd
r = --------------, Kendall Correlation
n(n - 1) / 2
Parameters
----------
x, y : array-like
sequence of values to calculate the correlation
method : str (default="pearson")
the method used to calculate correlation.
("pearson", "spearman" and 'kendall' are supported).
Returns
-------
value of correlation
Examples
--------
>>> from DaPy import corr
>>> x = []
>>> y = []
TODO
----
1. Gini Correlation, GC
2. Order Statistics Correlation Coeeficient, OSCC
References
----------
1. <NAME>. Statistical Methods, Experimental Design, and Scientific
Inference [M]. New York: Oxford University-Press, 1990.
2. <NAME>, <NAME>. Rank Correlation Methods[M]. 5th. New Yrok:
Oxford University Press, 1990.
3. <NAME>. A Review on Correlation Coefficients. Journal of Guandong
University of Technology. Vol.29 No.3. 2012.
4. <NAME> & <NAME>. Non-parametetric Statistics (Second Edition).
Publication of Tsinghua University. 2009.
5. <NAME> & <NAME>. Applied Regression Analysis. China People's University
Publication House. 2015.
See Also
--------
>>> help(DaPy.matlib._corr_kendall)
>>> help(DaPy.matlib._corr_spearman)
>>> help(DaPy.matlib._corr_pearson)
'''
assert isinstance(method, STR_TYPE), 'method should be a str or unicode'
assert method in ('pearson', 'spearman', 'kendall'), 'method should be "pearson" or "spearman"'
if method == 'pearson':
return _corr_pearson(x, y)
if method == 'spearman':
return _corr_spearman(x, y)
if method == 'kendall':
return _corr_kendall(x, y)
def _corr_kendall(x, y):
'''calculate the kendall rank correlation between X and Y
In this function, we use the following formula to calculate the kendall
correlation between two series. In order to speed up the operation of
counting Nc and Nd parameters in the formula, binary select algorithm
is applied here. Finally, the worst time complexity of this algorithm
is O(4NlnN).
Formula
-------
Nc - Nd
tau = ---------------
n(n - 1) / 2
Reference
---------
<NAME> & <NAME>. Non-parametetric Statistics (Second Edition).
Publication of Tsinghua University. 2009.
'''
data = SeriesSet({'x': x, 'y': y}) # initialize the data
ranks = data.get_ranks(['x', 'y']).sort('x_rank', 'y_rank') # O(3NlogN)
sorted_y_rank_index = SortedIndex(ranks.y_rank) # O(NlogN)
cache = {} # used to remember selected values
Nc, Nd, n = 0, 0, data.shape.Ln
for i, value in enumerate(ranks.y_rank): # O(N)
upper, lower = cache.get(value, (None, None))
if not upper:
# find the data which are greater or lease than the current: O(logN)
upper = sorted_y_rank_index.upper(value, include_equal=False)
lower = sorted_y_rank_index.lower(value, include_equal=False)
cache[value] = [upper, lower]
# count numbers of values which are greater than the current: O(1)
Nc += sum(1 for ind_ in upper if ind_ > i)
Nd += sum(1 for ind_ in lower if ind_ > i)
r = 2.0 * (Nc - Nd) / (n ** 2 - n)
stat = tau * sqrt(9.0 * (n ** 2 - 1.0) / (4 * n + 20))
if abs(stat) < 1.65:
return 0
return tau
def _corr_spearman(x, y):
'''calculate the spearman rank correlation between X and Y
Formula
-------
6*SUM(di^2)
r = 1 - ----------------
n(n^2 - 1)
Reference
---------
<NAME> & <NAME>. Applied Regression Analysis. China People's University
Publication House. 2015.
'''
data = SeriesSet({'x': x, 'y': y})
ranks = data.get_ranks(['x', 'y'])
diff_sqrt = (ranks.x_rank - ranks.y_rank) ** 2
n = ranks.shape.Ln
r = 1 - (6.0 * diff_sqrt.sum()) / (n ** 3 - n)
stat = r * sqrt((n - 2) / (1 - r))
if abs(stat) < 1.65:
return 0
return r
def _corr_pearson(x, y):
'''calculate the pearson correlation between X and Y
formula
-------
cov(x,y)
corr(x,y) = -----------------
std(x) * std(y)
'''
return cov(x, y) / (cov(x, x) * cov(y, y)) ** 0.5
def frequency(data, cut=0.5):
statistic = namedtuple('Frequency', ['Lower', 'Equal', 'Upper'])
Group1, Group2, Group3 = 0, 0, 0
size = float(len(data))
for each in data:
if each < cut:
Group1 += 1
elif each > cut:
Group3 += 1
else:
Group2 += 1
return statistic(Group1/size, Group2/size, Group3/size)
def quantiles(data, points=[0.05,0.1,0.25,0.5,0.75,0.9,0.95]):
data = sorted(data)
lenth = len(data)
return [data[int(lenth * data)] for point in points]
def distribution(data, breaks=10, x_label=False):
assert isinstance(breaks, int)
data = Series(data)
groups = [0] * breaks
maxn, minn = max(data), min(data)
ranges = maxn - minn
size = len(data)
breaks = [minn+i*ranges/breaks for i in range(1, breaks+1)]
for record in data:
for i,cut_point in enumerate(breaks):
if cut_point >= record:
groups[i] += 1
break
if x_label:
return ([minn+i*ranges/(breaks*2) for i in range(1, breaks+1)],
[float(i)/size for i in groups])
return [float(i)/size for i in groups]
def describe(data, detail=0):
'''
Help you compute some basic describe statistic indexes.
It only supports 1 dimention data.
Parameter
---------
data : array - Like
The sequence store your data inside.
Return
------
NamedTuple(Mean, S, Sn, CV, Range, Min, Max, Skew, Kurt)
Mean : float
mean of data.
S : float
adjusted variance of data.
Sn : float
sample variance of data.
CV : float
coeffient variance of the data.
Min : value
the minimun value of the data.
Max : value
the maximun value of the data.
Range : value
the range of the data.
Formulas
--------
<1> E(x) = sum(x)/n # Average of samples
<2> D(x) = E(x^2) - E(x)^2 # Sample Variance
<3> D(x)' = n/(n-1) * D(x) # Modified Sample Variance
<4> CV = D(x) / E(x) # Coefficient of Variation
E(x^3) - 3*E(x)*D(x) - E(x)^3
<5> S(x) = ------------------------------------ # Skewness of samples
D(x)^1.5
<6> K(x) = E(x)^4 / D(x)^2 - 3 # Excess Kurtosis of samples
'''
statistic = namedtuple('STAT',
['Mean', 'S', 'Sn', 'CV', 'Range',
'Min', 'Max', 'Mode', 'Skew', 'Kurt'])
if len(data) == 0:
return statistic(*chain(repeat(None, 10)))
mode = Counter(data).most_common(1)[0][0]
try:
data = array('f', x)
except:
data = array('f', filter(lambda x: is_math(x) and x != nan, data))
size = float(len(data))
if size == 0:
return statistic(*chain(repeat(None, 7), [mode, None, None]))
min_, max_ = min(data), max(data)
if is_math(min_) and is_math(max_):
rang = max_ - min_
else:
rang = '-'
Ex, Ex2, Ex3, Ex4 = 0, 0, 0, 0
for i in data:
Ex += i; i *= i
Ex2 += i; i *= i
if detail == 1:
Ex3 += i; i *= i
Ex4 += i
Ex /= size
Ex2 /= size
std = (Ex2 - Ex**2) ** 0.5
if std == 0 or size == 1.0:
std_n = size
else:
std_n = size / (size - 1.0) * std
S, K = '-','-'
if detail == 1:
Ex3 /= size
Ex4 /= size
if std != 0:
S = (Ex3 - 3 * Ex * std ** 2 - Ex ** 3) / std ** 1.5
K = Ex4 / std ** 4 - 3
if Ex == 0:
return statistic(Ex, std, std_n, None, rang, min_, max_, mode, S, K)
return statistic(Ex, std, std_n, std/Ex, rang, min_, max_, mode, S, K)
|
gwd/jigsaw/crop.py | kazakh-shai/kaggle-global-wheat-detection | 136 | 11135698 | import argparse
import os
import os.path as osp
from functools import partial
from multiprocessing import Pool
import cv2
import pandas as pd
from tqdm import tqdm
from gwd.converters import images2coco
CROP_SIZE = 1024
OFFSET_SIZE = 512
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--annotation-path", default="/data/folds_v2/0/mosaic_train.csv")
parser.add_argument("--img-root", default="/data/mosaics")
parser.add_argument("--output-root", default="/data/crops_fold0")
parser.add_argument("--output-path", default="/data/coco_crops_fold0.json")
return parser.parse_args()
def crop_and_save(img_path, output_root):
img = cv2.imread(img_path)
h, w = img.shape[:2]
if h <= 1024 and w <= 1024:
return
for i in range(0, h // OFFSET_SIZE - 1):
for j in range(0, w // OFFSET_SIZE - 1):
if i % 2 or j % 2:
crop = img[i * OFFSET_SIZE : i * OFFSET_SIZE + CROP_SIZE, j * OFFSET_SIZE : j * OFFSET_SIZE + CROP_SIZE]
img_name = osp.basename(img_path)
crop_path = osp.join(output_root, f"{i}_{j}_{img_name}")
cv2.imwrite(crop_path, crop)
def main():
args = parse_args()
os.makedirs(args.output_root, exist_ok=True)
annotations = pd.read_csv(args.annotation_path)
annotations["img_path"] = annotations["image_id"].apply(lambda x: f"{args.img_root}/{x}.jpg")
img_paths = annotations["img_path"].drop_duplicates().tolist()
with Pool(32) as p:
list(
tqdm(iterable=p.imap(partial(crop_and_save, output_root=args.output_root), img_paths), total=len(img_paths))
)
images2coco.main(img_pattern=osp.join(args.output_root, "*.jpg"), output_path=args.output_path)
if __name__ == "__main__":
main()
|
codes/scripts/rename.py | arthur-qiu/BasicSR | 106 | 11135717 | <gh_stars>100-1000
import os
import os.path
import glob
input_folder = '/home/xtwang/Projects/PIRM18/results/pirm_selfval_img06/*' # glob matching pattern
save_folder = '/home/xtwang/Projects/PIRM18/results/pirm_selfval_img'
mode = 'cp' # 'cp' | 'mv'
file_list = sorted(glob.glob(input_folder))
if not os.path.exists(save_folder):
os.makedirs(save_folder)
print('mkdir ... ' + save_folder)
else:
print('File [{}] already exists. Exit.'.format(save_folder))
for i, path in enumerate(file_list):
base_name = os.path.splitext(os.path.basename(path))[0]
new_name = base_name.split('_')[0]
new_path = os.path.join(save_folder, new_name + '.png')
os.system(mode + ' ' + path + ' ' + new_path)
print(i, base_name)
|
e2cnn/nn/modules/batchnormalization/gnorm.py | marcelroed/e2cnn | 356 | 11135740 |
from collections import defaultdict
from e2cnn.gspaces import *
from e2cnn.nn import FieldType
from e2cnn.nn import GeometricTensor
from ..equivariant_module import EquivariantModule
import torch
from torch.nn import Parameter
from typing import List, Tuple, Any, Union
import numpy as np
__all__ = ["GNormBatchNorm"]
class GNormBatchNorm(EquivariantModule):
def __init__(self,
in_type: FieldType,
eps: float = 1e-05,
momentum: float = 0.1,
affine: bool = True,
):
r"""
Batch normalization for generic representations.
.. todo ::
Add more details about how stats are computed and how affine transformation is done.
Args:
in_type (FieldType): the input field type
eps (float, optional): a value added to the denominator for numerical stability. Default: ``1e-5``
momentum (float, optional): the value used for the ``running_mean`` and ``running_var`` computation.
Can be set to ``None`` for cumulative moving average (i.e. simple average). Default: ``0.1``
affine (bool, optional): if ``True``, this module has learnable affine parameters. Default: ``True``
"""
assert isinstance(in_type.gspace, GeneralOnR2)
super(GNormBatchNorm, self).__init__()
self.space = in_type.gspace
self.in_type = in_type
self.out_type = in_type
self.affine = affine
self._nfields = None
# group fields by their type and
# - check if fields of the same type are contiguous
# - retrieve the indices of the fields
# number of fields of each type
self._nfields = defaultdict(int)
# indices of the channels corresponding to fields belonging to each group
_indices = defaultdict(lambda: [])
# whether each group of fields is contiguous or not
self._contiguous = {}
ntrivials = 0
position = 0
last_field = None
for i, r in enumerate(self.in_type.representations):
for irr in r.irreps:
if self.in_type.fibergroup.irreps[irr].is_trivial():
ntrivials += 1
if r.name != last_field:
if not r.name in self._contiguous:
self._contiguous[r.name] = True
else:
self._contiguous[r.name] = False
last_field = r.name
_indices[r.name] += list(range(position, position + r.size))
self._nfields[r.name] += 1
position += r.size
for name, contiguous in self._contiguous.items():
if contiguous:
# for contiguous fields, only the first and last indices are kept
_indices[name] = [min(_indices[name]), max(_indices[name])+1]
setattr(self, f"{name}_indices", _indices[name])
else:
# otherwise, transform the list of indices into a tensor
_indices[name] = torch.LongTensor(_indices[name])
# register the indices tensors as parameters of this module
self.register_buffer(f"{name}_indices", _indices[name])
# store the size of each field type
self._sizes = []
# store for each field type the indices of the trivial irreps in it
self._trivial_idxs = {}
# store for each field type the sizes and the indices of all its irreps, grouped by their size
self._irreps_sizes = {}
for r in self.in_type._unique_representations:
p = 0
irreps = defaultdict(lambda: [])
trivials = []
aggregator = torch.zeros(r.size, len(r.irreps))
for i, irr in enumerate(r.irreps):
irr = self.in_type.fibergroup.irreps[irr]
if irr.is_trivial():
trivials.append(p)
aggregator[p:p+irr.size, i] = 1. / irr.size
irreps[irr.size] += list(range(p, p+irr.size))
p += irr.size
propagator = (aggregator > 0).clone().to(dtype=torch.float)
name = r.name
self._trivial_idxs[name] = torch.tensor(trivials, dtype=torch.long)
self._irreps_sizes[name] = [(s, idxs) for s, idxs in irreps.items()]
self._sizes.append((name, r.size))
if not np.allclose(r.change_of_basis, np.eye(r.size)):
self.register_buffer(f'{name}_change_of_basis', torch.tensor(r.change_of_basis, dtype=torch.float))
self.register_buffer(f'{name}_change_of_basis_inv', torch.tensor(r.change_of_basis_inv, dtype=torch.float))
self.register_buffer(f'vars_aggregator_{name}', aggregator)
self.register_buffer(f'vars_propagator_{name}', propagator)
running_var = torch.ones((self._nfields[r.name], len(r.irreps)), dtype=torch.float)
running_mean = torch.zeros((self._nfields[r.name], len(trivials)), dtype=torch.float)
self.register_buffer(f'{name}_running_var', running_var)
self.register_buffer(f'{name}_running_mean', running_mean)
if self.affine:
weight = Parameter(torch.ones((self._nfields[r.name], len(r.irreps))), requires_grad=True)
bias = Parameter(torch.zeros((self._nfields[r.name], len(trivials))), requires_grad=True)
self.register_parameter(f'{name}_weight', weight)
self.register_parameter(f'{name}_bias', bias)
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
self.eps = eps
self.momentum = momentum
def reset_running_stats(self):
for name, size in self._sizes:
running_var = getattr(self, f"{name}_running_var")
running_mean = getattr(self, f"{name}_running_mean")
running_var.fill_(1)
running_mean.fill_(0)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
for name, size in self._sizes:
weight = getattr(self, f"{name}_weight")
bias = getattr(self, f"{name}_bias")
weight.data.fill_(1)
bias.data.fill_(0)
def forward(self, input: GeometricTensor) -> GeometricTensor:
r"""
Apply norm non-linearities to the input feature map
Args:
input (GeometricTensor): the input feature map
Returns:
the resulting feature map
"""
assert input.type == self.in_type
exponential_average_factor = 0.0
if self.training:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
input = input.tensor
b, c, h, w = input.shape
output = torch.empty_like(input)
# iterate through all field types
for name, size in self._sizes:
indices = getattr(self, f"{name}_indices")
if self._contiguous[name]:
slice = input[:, indices[0]:indices[1], ...]
else:
slice = input[:, indices, ...]
slice = slice.view(b, -1, size, h, w)
if hasattr(self, f"{name}_change_of_basis_inv"):
cob_inv = getattr(self, f"{name}_change_of_basis_inv")
slice = torch.einsum("ds,bcsxy->bcdxy", (cob_inv, slice))
if self.training:
# compute the mean and variance of the fields
means, vars = self._compute_statistics(slice, name)
running_var = getattr(self, f"{name}_running_var")
running_mean = getattr(self, f"{name}_running_mean")
running_var *= 1 - exponential_average_factor
running_var += exponential_average_factor * vars
running_mean *= 1 - exponential_average_factor
running_mean += exponential_average_factor * means
assert torch.allclose(running_mean, getattr(self, f"{name}_running_mean"))
assert torch.allclose(running_var, getattr(self, f"{name}_running_var"))
else:
vars = getattr(self, f"{name}_running_var")
means = getattr(self, f"{name}_running_mean")
if self.affine:
weight = getattr(self, f"{name}_weight")
else:
weight = 1.
# compute the scalar multipliers needed
scales = weight / (vars + self.eps).sqrt()
# compute the point shifts
# shifts = bias - self._scale(means, scales, name=name)
centered = self._shift(slice, -1*means, name=name, out=None)
normalized = self._scale(centered, scales, name=name, out=None)
if self.affine:
bias = getattr(self, f"{name}_bias")
normalized = self._shift(normalized, bias, name=name, out=None)
if hasattr(self, f"{name}_change_of_basis"):
cob = getattr(self, f"{name}_change_of_basis")
normalized = torch.einsum("ds,bcsxy->bcdxy", (cob, normalized))
if not self._contiguous[name]:
output[:, indices, ...] = normalized.view(b, -1, h, w)
else:
output[:, indices[0]:indices[1], ...] = normalized.view(b, -1, h, w)
# if self._contiguous[name]:
# slice2 = output[:, indices[0]:indices[1], ...]
# else:
# slice2 = output[:, indices, ...]
# assert torch.allclose(slice2.view(b, -1, size, h, w), slice), name
# wrap the result in a GeometricTensor
return GeometricTensor(output, self.out_type)
def evaluate_output_shape(self, input_shape: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
assert len(input_shape) == 4
assert input_shape[1] == self.in_type.size
b, c, hi, wi = input_shape
return b, self.out_type.size, hi, wi
def check_equivariance(self, atol: float = 1e-6, rtol: float = 1e-5) -> List[Tuple[Any, float]]:
# return super(NormBatchNorm, self).check_equivariance(atol=atol, rtol=rtol)
pass
def _compute_statistics(self, t: torch.Tensor, name: str):
trivial_idxs = self._trivial_idxs[name]
vars_aggregator = getattr(self, f"vars_aggregator_{name}")
b, c, s, x, y = t.shape
l = trivial_idxs.numel()
# number of samples in the tensor used to estimate the statistics
N = b * x * y
# compute the mean of the trivial fields
trivial_means = t[:, :, trivial_idxs, ...].view(b, c, l, x, y).sum(dim=(0, 3, 4), keepdim=False).detach() / N
# compute the mean of squares of all channels
vars = (t ** 2).view(b, c, s, x, y).sum(dim=(0, 3, 4), keepdim=False).detach() / N
# For the non-trivial fields the mean of the fields is 0, so we can compute the variance as the mean of the
# norms squared.
# For trivial channels, we need to subtract the squared mean
vars[:, trivial_idxs] -= trivial_means**2
# aggregate the squared means of the channels which belong to the same irrep
vars = torch.einsum("io,ci->co", (vars_aggregator, vars))
# Correct the estimation of the variance with Bessel's correction
correction = N/(N-1) if N > 1 else 1.
vars *= correction
return trivial_means, vars
def _scale(self, t: torch.Tensor, scales: torch.Tensor, name: str, out: torch.Tensor = None):
if out is None:
out = torch.empty_like(t)
vars_aggregator = getattr(self, f"vars_propagator_{name}")
ndims = len(t.shape[3:])
scale_shape = (1, scales.shape[0], vars_aggregator.shape[0]) + (1,)*ndims
# scale all fields
out[...] = t * torch.einsum("oi,ci->co", (vars_aggregator, scales)).reshape(scale_shape)
return out
def _shift(self, t: torch.Tensor, trivial_bias: torch.Tensor, name: str, out: torch.Tensor = None):
if out is None:
out = t.clone()
else:
out[:] = t
trivial_idxs = self._trivial_idxs[name]
bias_shape = (1,) + trivial_bias.shape + (1,)*(len(t.shape) - 3)
# add bias to the trivial fields
out[:, :, trivial_idxs, ...] += trivial_bias.view(bias_shape)
return out
|
display_utils.py | imRushabhShah/nlp_adversarial_examples | 168 | 11135745 | <reponame>imRushabhShah/nlp_adversarial_examples
from IPython.core.display import display, HTML
import numpy as np
def html_render(x_orig, x_adv):
x_orig_words = x_orig.split(' ')
x_adv_words = x_adv.split(' ')
orig_html = []
adv_html = []
# For now, we assume both original and adversarial text have equal lengths.
assert(len(x_orig_words) == len(x_adv_words))
for i in range(len(x_orig_words)):
if x_orig_words[i] == x_adv_words[i]:
orig_html.append(x_orig_words[i])
adv_html.append(x_adv_words[i])
else:
orig_html.append(format("<b style='color:green'>%s</b>" %x_orig_words[i]))
adv_html.append(format("<b style='color:red'>%s</b>" %x_adv_words[i]))
orig_html = ' '.join(orig_html)
adv_html = ' '.join(adv_html)
return orig_html, adv_html
def visualize_attack(sess, model, dataset, x_orig, x_adv):
x_len = np.sum(np.sign(x_orig))
orig_list = list(x_orig[:x_len])
adv_list = list(x_adv[:x_len])
orig_pred = model.predict(sess,x_orig[np.newaxis,:])
adv_pred = model.predict(sess, x_adv[np.newaxis,:])
orig_txt = dataset.build_text(orig_list)
adv_txt = dataset.build_text(adv_list)
orig_html, adv_html = html_render(orig_txt, adv_txt)
print('Original Prediction = %s. (Confidence = %0.2f) ' %(('Positive' if np.argmax(orig_pred[0]) == 1 else 'Negative'), np.max(orig_pred)*100.0))
display(HTML(orig_html))
print('--------- After attack -------------')
print('New Prediction = %s. (Confidence = %0.2f) ' %(('Positive' if np.argmax(adv_pred[0]) == 1 else 'Negative'), np.max(adv_pred)*100.0))
display(HTML(adv_html))
def visualize_attack2(dataset, test_idx, x_orig, x_adv, label):
raw_text = dataset.test_text[test_idx]
print('RAW TEXT: ')
display(HTML(raw_text))
print('-'*20)
x_len = np.sum(np.sign(x_orig))
orig_list = list(x_orig[:x_len])
#orig_pred = model.predict(sess,x_orig[np.newaxis,:])
#adv_pred = model.predict(sess, x_adv[np.newaxis,:])
orig_txt = dataset.build_text(orig_list)
if x_adv is None:
adv_txt = "FAILED"
else:
adv_list = list(x_adv[:x_len])
adv_txt = dataset.build_text(adv_list)
orig_html, adv_html = html_render(orig_txt, adv_txt)
print('Original Prediction = %s. ' %('Positive' if label == 1 else 'Negative'))
display(HTML(orig_html))
print('--------- After attack -------------')
print('New Prediction = %s.' %('Positive' if label == 0 else 'Negative'))
display(HTML(adv_html)) |
typed_python/compiler/native_function_pointer.py | APrioriInvestments/typed_python | 105 | 11135755 | import ctypes
import typed_python.compiler.native_ast as native_ast
from typed_python import PointerTo
class NativeFunctionPointer:
def __init__(self, fname, fp, input_types, output_type):
self.fp = fp
self.fname = fname
self.input_types = input_types
self.output_type = output_type
def __repr__(self):
return "NativeFunctionPointer(name=%s,addr=%x,in=%s,out=%s)" \
% (self.fname, self.fp, [str(x) for x in self.input_types], str(self.output_type))
def __call__(self, *args):
"""Attempt to call the function directly from python.
We only allow very simple transformations and types - PointerTo, ints, and floats.
"""
def mapToCtype(T):
if T == native_ast.Void:
return None
if T == native_ast.Int64:
return ctypes.c_long
if T == native_ast.Float64:
return ctypes.c_double
if T.matches.Pointer:
return ctypes.c_void_p
raise Exception(f"Can't convert {T} to a ctypes type")
def mapArg(a):
if isinstance(a, (int, float)):
return a
if isinstance(a, PointerTo):
return int(a)
raise Exception(f"Can't convert {a} to a ctypes argument")
# it should be initialized to zero
func = ctypes.CFUNCTYPE(
mapToCtype(self.output_type),
*[mapToCtype(t) for t in self.input_types]
)(self.fp)
# get out the pointer table
return func(*[mapArg(a) for a in args])
|
speedml/feature.py | Dennis055/speedml | 212 | 11135762 | """
Speedml Feature component with methods that work on dataset features or the feature engineering workflow. Contact author https://twitter.com/manavsehgal. Code, docs and demos https://speedml.com.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from .base import Base
from .util import DataFrameImputer
import numpy as np
from sklearn.preprocessing import LabelEncoder
import re
class Feature(Base):
def drop(self, features):
"""
Drop one or more list of strings naming ``features`` from train and test datasets.
"""
start = Base.train.shape[1]
Base.train = Base.train.drop(features, axis=1)
Base.test = Base.test.drop(features, axis=1)
end = Base.train.shape[1]
message = 'Dropped {} features with {} features available.'
return message.format(start - end, end)
def impute(self):
"""
Replace empty values in the entire dataframe with median value for numerical features and most common values for text features.
"""
start = Base.train.isnull().sum().sum()
Base.test[Base.target] = -1
combine = Base.train.append(Base.test)
combine = DataFrameImputer().fit_transform(combine)
Base.train = combine[0:Base.train.shape[0]]
Base.test = combine[Base.train.shape[0]::]
Base.test = Base.test.drop([Base.target], axis=1)
end = Base.train.isnull().sum().sum()
message = 'Imputed {} empty values to {}.'
return message.format(start, end)
def mapping(self, a, data):
"""
Convert values for categorical feature ``a`` using ``data`` dictionary. Use when number of categories are limited otherwise use labels.
"""
Base.train[a] = Base.train[a].apply(lambda x: data[x])
Base.test[a] = Base.test[a].apply(lambda x: data[x])
def fillna(self, a, new):
"""
Fills empty or null values in ``a`` feature name with ``new`` string value.
"""
start = Base.train[a].isnull().sum() + Base.test[a].isnull().sum()
Base.train[a] = Base.train[a].fillna(new)
Base.test[a] = Base.test[a].fillna(new)
message = 'Filled {} null values across test and train datasets.'
return message.format(start)
def replace(self, a, match, new):
"""
In feature ``a`` values ``match`` string or list of strings and replace with a ``new`` string.
"""
if type(match) is str:
# [TODO] What is the performance cost of message ops?
start = Base.train[Base.train[a] == match][a].shape[0] + Base.test[Base.test[a] == match][a].shape[0]
message = 'Replaced {} matching values across train and test datasets.'
message = message.format(start)
else:
# [TODO] Can we possibly use pandas.isin to check counts?
message = 'Replaced matching list of strings across train and test datasets.'
Base.train[a] = Base.train[a].replace(match, new)
Base.test[a] = Base.test[a].replace(match, new)
return message
def outliers(self, a, lower = None, upper = None):
"""
Fix outliers for ``lower`` or ``upper`` or both percentile of values within ``a`` feature.
"""
if upper:
upper_value = np.percentile(Base.train[a].values, upper)
change = Base.train.loc[Base.train[a] > upper_value, a].shape[0]
Base.train.loc[Base.train[a] > upper_value, a] = upper_value
message = 'Fixed {} or {:.2f}% upper outliers. '.format(change, change/Base.train.shape[0]*100)
if lower:
lower_value = np.percentile(Base.train[a].values, lower)
change = Base.train.loc[Base.train[a] < lower_value, a].shape[0]
Base.train.loc[Base.train[a] < lower_value, a] = lower_value
message = message + 'Fixed {} or {:.2f}% lower outliers.'.format(change, change/Base.train.shape[0]*100)
return message
def _density_by_feature(self, a):
vals = Base.train[a].value_counts()
dvals = vals.to_dict()
Base.train[a + '_density'] = Base.train[a].apply(lambda x: dvals.get(x, vals.min()))
Base.test[a + '_density'] = Base.test[a].apply(lambda x: dvals.get(x, vals.min()))
def density(self, a):
"""
Create new feature named ``a`` feature name + suffix '_density', based on density or value_counts for each unique value in ``a`` feature specified as a string or multiple features as a list of strings.
"""
if isinstance(a, str):
self._density_by_feature(a)
if isinstance(a, list):
for feature in a:
self._density_by_feature(feature)
def add(self, a, num):
"""
Update ``a`` numeric feature by adding ``num`` number to each values.
"""
Base.train[a] = Base.train[a] + num
Base.test[a] = Base.test[a] + num
def sum(self, new, a, b):
"""
Create ``new`` numeric feature by adding ``a`` + ``b`` feature values.
"""
Base.train[new] = Base.train[a] + Base.train[b]
Base.test[new] = Base.test[a] + Base.test[b]
def diff(self, new, a, b):
"""
Create ``new`` numeric feature by subtracting ``a`` - ``b`` feature values.
"""
Base.train[new] = Base.train[a] - Base.train[b]
Base.test[new] = Base.test[a] - Base.test[b]
def product(self, new, a, b):
"""
Create ``new`` numeric feature by multiplying ``a`` * ``b`` feature values.
"""
Base.train[new] = Base.train[a] * Base.train[b]
Base.test[new] = Base.test[a] * Base.test[b]
def divide(self, new, a, b):
"""
Create ``new`` numeric feature by dividing ``a`` / ``b`` feature values. Replace division-by-zero with zero values.
"""
Base.train[new] = Base.train[a] / Base.train[b]
Base.test[new] = Base.test[a] / Base.test[b]
# Histograms require finite values
Base.train[new] = Base.train[new].replace([np.inf, -np.inf], 0)
Base.test[new] = Base.test[new].replace([np.inf, -np.inf], 0)
def round(self, new, a, precision):
"""
Create ``new`` numeric feature by rounding ``a`` feature value to ``precision`` decimal places.
"""
Base.train[new] = round(Base.train[a], precision)
Base.test[new] = round(Base.test[a], precision)
def concat(self, new, a, sep, b):
"""
Create ``new`` text feature by concatenating ``a`` and ``b`` text feature values, using ``sep`` separator.
"""
Base.train[new] = Base.train[a].astype(str) + sep + Base.train[b].astype(str)
Base.test[new] = Base.test[a].astype(str) + sep + Base.test[b].astype(str)
def list_len(self, new, a):
"""
Create ``new`` numeric feature based on length or item count from ``a`` feature containing list object as values.
"""
Base.train[new] = Base.train[a].apply(len)
Base.test[new] = Base.test[a].apply(len)
def word_count(self, new, a):
"""
Create ``new`` numeric feature based on length or word count from ``a`` feature containing free-form text.
"""
Base.train[new] = Base.train[a].apply(lambda x: len(x.split(" ")))
Base.test[new] = Base.test[a].apply(lambda x: len(x.split(" ")))
def _regex_text(self, regex, text):
regex_search = re.search(regex, text)
# If the word exists, extract and return it.
if regex_search:
return regex_search.group(1)
return ""
def extract(self, a, regex, new=None):
"""
Match ``regex`` regular expression with ``a`` text feature values to update ``a`` feature with matching text if ``new`` = None. Otherwise create ``new`` feature based on matching text.
"""
Base.train[new if new else a] = Base.train[a].apply(lambda x: self._regex_text(regex=regex, text=x))
Base.test[new if new else a] = Base.test[a].apply(lambda x: self._regex_text(regex=regex, text=x))
def labels(self, features):
"""
Generate numerical labels replacing text values from list of categorical ``features``.
"""
Base.test[Base.target] = -1
combine = Base.train.append(Base.test)
le = LabelEncoder()
for feature in features:
combine[feature] = le.fit_transform(combine[feature])
Base.train = combine[0:Base.train.shape[0]]
Base.test = combine[Base.train.shape[0]::]
Base.test = Base.test.drop([Base.target], axis=1)
|
tests/ahocorapy_test.py | Gavingx/ahocorapy | 175 | 11135769 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from builtins import str
from io import open
from pickle import dumps, loads
import unittest
from ahocorapy.keywordtree import KeywordTree
class TestAhocorapy(unittest.TestCase):
def test_empty_tree(self):
kwtree = KeywordTree()
kwtree.finalize()
result = kwtree.search('zef')
self.assertIsNone(result)
def test_empty_input(self):
kwtree = KeywordTree()
kwtree.add('bla')
kwtree.finalize()
result = kwtree.search('')
self.assertIsNone(result)
def test_empty_keyword(self):
kwtree = KeywordTree()
kwtree.add('')
kwtree.finalize()
result = kwtree.search('')
self.assertIsNone(result)
def test_readme_example(self):
'''
As used in the projects README. If you have to change this test case,
please update the README accordingly.
'''
kwtree = KeywordTree(case_insensitive=True)
kwtree.add('malaga')
kwtree.add('lacrosse')
kwtree.add('mallorca')
kwtree.add('mallorca bella')
kwtree.add('orca')
kwtree.finalize()
result = kwtree.search('My favorite islands are malaga and sylt.')
self.assertEqual(('malaga', 24), result)
result = kwtree.search(
'idontlikewhitespaceswhereismalacrossequestionmark')
self.assertEqual(('lacrosse', 29), result)
results = kwtree.search_all('malheur on mallorca bellacrosse')
self.assertIsNotNone(results)
self.assertEqual(('mallorca', 11), next(results))
self.assertEqual(('orca', 15), next(results))
self.assertEqual(('mallorca bella', 11), next(results))
self.assertEqual(('lacrosse', 23), next(results))
with self.assertRaises(StopIteration):
next(results)
def test_suffix_stuff(self):
kwtree = KeywordTree()
kwtree.add('blaaaaaf')
kwtree.add('bluez')
kwtree.add('aaaamen')
kwtree.add('uebergaaat')
kwtree.finalize()
result = kwtree.search('blaaaaamentada')
self.assertEqual(('aaaamen', 3), result)
result = kwtree.search('clueuebergaaameblaaaamenbluez')
self.assertEqual(('aaaamen', 17), result)
def test_text_end_situation(self):
kwtree = KeywordTree()
kwtree.add('blaaaaaf')
kwtree.add('a')
kwtree.finalize()
result = kwtree.search_one('bla')
self.assertEqual(('a', 2), result)
def test_text_end_situation_2(self):
kwtree = KeywordTree()
kwtree.add('blaaaaaf')
kwtree.add('la')
kwtree.finalize()
result = kwtree.search('bla')
self.assertEqual(('la', 1), result)
def test_simple(self):
kwtree = KeywordTree()
kwtree.add('bla')
kwtree.add('blue')
kwtree.finalize()
result = kwtree.search('bl')
self.assertIsNone(result)
result = kwtree.search('')
self.assertIsNone(result)
result = kwtree.search('zef')
self.assertIsNone(result)
result = kwtree.search('blaaaa')
self.assertEqual(('bla', 0), result)
result = kwtree.search('red green blue grey')
self.assertEqual(('blue', 10), result)
def test_simple_back_to_zero_state_example(self):
kwtree = KeywordTree()
keyword_list = ['ab', 'bca']
for keyword in keyword_list:
kwtree.add(keyword)
kwtree.finalize()
result = kwtree.search('blbabca')
self.assertEqual(('ab', 3), result)
def test_domains(self):
kwtree = KeywordTree()
kwtree.add('searchenginemarketingfordummies.com')
kwtree.add('linkpt.com')
kwtree.add('fnbpeterstown.com')
kwtree.finalize()
result = kwtree.search('<EMAIL>')
self.assertEqual(('linkpt.com', 10), result)
def test_unicode(self):
kwtree = KeywordTree()
kwtree.add('bla')
kwtree.add('blue')
kwtree.add(u'颜到')
kwtree.finalize()
result = kwtree.search(u'春华变苍颜到处群魔乱')
self.assertEqual((u'颜到', 4), result)
result = kwtree.search(u'三年过')
self.assertIsNone(result)
def test_case_sensitivity(self):
kwtree = KeywordTree()
kwtree.add('bla')
kwtree.add('blue')
kwtree.add('blISs')
kwtree.finalize()
result = kwtree.search('bLa')
self.assertIsNone(result)
result = kwtree.search('BLISS')
self.assertIsNone(result)
result = kwtree.search('bliss')
self.assertIsNone(result)
result = kwtree.search('blISs')
self.assertEqual(('blISs', 0), result)
def test_case_insensitivity_mode(self):
kwtree = KeywordTree(case_insensitive=True)
kwtree.add('bla')
kwtree.add('blue')
kwtree.add('blISs')
kwtree.finalize()
result = kwtree.search('bLa')
self.assertEqual(('bla', 0), result)
result = kwtree.search('BLISS')
self.assertEqual(('blISs', 0), result)
def test_utility_calls(self):
kwtree = KeywordTree(case_insensitive=True)
kwtree.add('bla')
kwtree.add('blue')
kwtree.finalize()
# Just test that there are no errors
rep = repr(kwtree)
self.assertGreater(len(rep), 0)
tostring = str(kwtree)
self.assertGreater(len(tostring), 0)
def test_finalize_errors(self):
kwtree = KeywordTree(case_insensitive=True)
kwtree.add('bla')
kwtree.add('blue')
self.assertRaises(ValueError, kwtree.search, 'blueb')
kwtree = KeywordTree(case_insensitive=True)
kwtree.add('bla')
kwtree.finalize()
self.assertRaises(ValueError, kwtree.add, 'blueb')
kwtree = KeywordTree(case_insensitive=True)
kwtree.add('bla')
kwtree.finalize()
self.assertRaises(ValueError, kwtree.finalize)
def test_many_keywords(self):
kwtree = KeywordTree(case_insensitive=True)
with open('tests/data/names.txt') as keyword_file:
keyword_list = list(map(str.strip, keyword_file.readlines()))
for kw in keyword_list:
kwtree.add(kw)
kwtree.finalize()
with open('tests/data/textblob.txt') as keyword_file:
textblob = keyword_file.read()
result = kwtree.search(textblob)
self.assertEqual(('<NAME>', 34153), result)
results = kwtree.search_all(textblob)
self.assertIsNotNone(results)
self.assertEqual(('<NAME>', 34153), next(results))
with self.assertRaises(StopIteration):
next(results)
def test_search_all_issue_1(self):
text = '/foo/bar'
words = ['/bar', '/foo/bar', 'bar']
tree = KeywordTree(case_insensitive=True)
for word in words:
tree.add(word)
tree.finalize()
results = tree.search_all(text)
self.assertEqual(('/foo/bar', 0), next(results))
self.assertEqual(('/bar', 4), next(results))
self.assertEqual(('bar', 5), next(results))
def test_search_all_issue_1_similar(self):
text = '/foo/bar'
words = ['/bara', '/foo/barb', 'bar']
tree = KeywordTree(case_insensitive=True)
for word in words:
tree.add(word)
tree.finalize()
results = tree.search_all(text)
self.assertEqual(('bar', 5), next(results))
def test_search_all_issue_3_similar(self):
text = '/foo/bar'
words = ['foo/', 'foo', '/foo/', '/bar']
tree = KeywordTree(case_insensitive=True)
for word in words:
tree.add(word)
tree.finalize()
results = tree.search_all(text)
self.assertEqual(('foo', 1), next(results))
self.assertEqual(('/foo/', 0), next(results))
self.assertEqual(('foo/', 1), next(results))
self.assertEqual(('/bar', 4), next(results))
def test_pickling_simple(self):
words = ['peter', 'horst', 'gandalf', 'frodo']
tree = KeywordTree(case_insensitive=True)
for word in words:
tree.add(word)
tree.finalize()
as_bytes = dumps(tree)
self.assertIsNotNone(as_bytes)
deserialized = loads(as_bytes)
self.assertIsNotNone(deserialized)
text = 'Gollum did not like frodo. But gandalf did.'
results = deserialized.search_all(text)
self.assertEqual(('frodo', 20), next(results))
self.assertEqual(('gandalf', 31), next(results))
def test_pickling_before_finalizing(self):
words = ['peter', 'horst', 'gandalf', 'frodo']
tree = KeywordTree(case_insensitive=True)
for word in words:
tree.add(word)
as_bytes = dumps(tree)
self.assertIsNotNone(as_bytes)
deserialized = loads(as_bytes)
self.assertIsNotNone(deserialized)
deserialized.finalize()
text = 'Gollum did not like frodo. But gandalf did.'
results = deserialized.search_all(text)
self.assertEqual(('frodo', 20), next(results))
self.assertEqual(('gandalf', 31), next(results))
def test_state_to_string(self):
words = ['peter', 'horst', 'gandalf', 'frodo']
tree = KeywordTree(case_insensitive=True)
for word in words:
tree.add(word)
tree.finalize()
as_string = str(tree._zero_state)
self.assertIsNotNone(as_string)
if __name__ == '__main__':
unittest.main()
|
tests/test_sklearn_multi_output.py | onnx/sklearn-onnx | 323 | 11135787 | <filename>tests/test_sklearn_multi_output.py
# SPDX-License-Identifier: Apache-2.0
import unittest
from logging import getLogger
import numpy
from numpy.testing import assert_almost_equal
from onnxruntime import InferenceSession
from sklearn.datasets import load_linnerud, make_multilabel_classification
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
from sklearn.linear_model import Ridge, LogisticRegression
from skl2onnx import to_onnx
from test_utils import dump_data_and_model, TARGET_OPSET
class TestMultiOutputConverter(unittest.TestCase):
def setUp(self):
if __name__ == "__main__":
log = getLogger('skl2onnx')
log.disabled = True
# log.setLevel(logging.DEBUG)
# logging.basicConfig(level=logging.DEBUG)
pass
def test_multi_output_regressor(self):
X, y = load_linnerud(return_X_y=True)
clf = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y)
onx = to_onnx(clf, X[:1].astype(numpy.float32),
target_opset=TARGET_OPSET)
dump_data_and_model(
X.astype(numpy.float32), clf, onx,
basename="SklearnMultiOutputRegressor")
@unittest.skipIf(TARGET_OPSET < 11,
reason="SequenceConstruct not available.")
def test_multi_output_classifier(self):
X, y = make_multilabel_classification(n_classes=3, random_state=0)
X = X.astype(numpy.float32)
clf = MultiOutputClassifier(LogisticRegression()).fit(X, y)
with self.assertRaises(NameError):
to_onnx(clf, X[:1], target_opset=TARGET_OPSET,
options={id(clf): {'zipmap': False}})
onx = to_onnx(clf, X[:1], target_opset=TARGET_OPSET)
self.assertNotIn("ZipMap", str(onx))
sess = InferenceSession(onx.SerializeToString())
res = sess.run(None, {'X': X})
exp_lab = clf.predict(X)
exp_prb = clf.predict_proba(X)
assert_almost_equal(exp_lab, res[0])
self.assertEqual(len(exp_prb), len(res[1]))
for e, g in zip(exp_prb, res[1]):
assert_almost_equal(e, g, decimal=5)
# check option nocl=True
onx = to_onnx(clf, X[:1], target_opset=TARGET_OPSET,
options={id(clf): {'nocl': True}})
self.assertNotIn("ZipMap", str(onx))
sess = InferenceSession(onx.SerializeToString())
res = sess.run(None, {'X': X})
exp_lab = clf.predict(X)
exp_prb = clf.predict_proba(X)
assert_almost_equal(exp_lab, res[0])
self.assertEqual(len(exp_prb), len(res[1]))
for e, g in zip(exp_prb, res[1]):
assert_almost_equal(e, g, decimal=5)
# check option nocl=False
onx = to_onnx(clf, X[:1], target_opset=TARGET_OPSET,
options={id(clf): {'nocl': False}})
self.assertNotIn("ZipMap", str(onx))
sess = InferenceSession(onx.SerializeToString())
res = sess.run(None, {'X': X})
exp_lab = clf.predict(X)
exp_prb = clf.predict_proba(X)
assert_almost_equal(exp_lab, res[0])
self.assertEqual(len(exp_prb), len(res[1]))
for e, g in zip(exp_prb, res[1]):
assert_almost_equal(e, g, decimal=5)
if __name__ == "__main__":
unittest.main()
|
tests/settings.py | amcgavin/react-render | 138 | 11135791 | DEBUG = True
SECRET_KEY = '_'
STATIC_URL = '/static/'
INSTALLED_APPS = (
'django.contrib.staticfiles',
'tests.test_app',
)
|
tests/unit/test_okd.py | gordonmessmer/ansible-bender | 513 | 11135804 | """ Making sure that ab can be used as a custom builder in okd """
import json
import os
from flexmock import flexmock
from ansible_bender.api import Application
from ansible_bender.okd import build_inside_openshift
# OKD sets an env var BUILD to this VALUE
# oc get --template '{{ (index (index .spec.containers 0).env 0).value }}' pod/ab-in-okd-1-build | jq
# fun fun
BUILD_ENV = {
"kind": "Build",
"apiVersion": "build.openshift.io/v1",
"metadata": {
"name": "ab-in-okd-1",
"namespace": "myproject",
"selfLink": "/apis/build.openshift.io/v1/namespaces/myproject/builds/ab-in-okd-1",
"uid": "b0f55118-09d8-11e9-8e48-8c164572b096",
"resourceVersion": "39780",
"creationTimestamp": "2018-12-27T13:09:49Z",
"labels": {
"buildconfig": "ab-in-okd",
"openshift.io/build-config.name": "ab-in-okd",
"openshift.io/build.start-policy": "Serial"
},
"annotations": {
"openshift.io/build-config.name": "ab-in-okd",
"openshift.io/build.number": "1"
},
"ownerReferences": [
{
"apiVersion": "build.openshift.io/v1",
"kind": "BuildConfig",
"name": "ab-in-okd",
"uid": "afd64f3d-09d8-11e9-8e48-8c164572b096",
"controller": True
}
]
},
"spec": {
"serviceAccount": "builder",
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/TomasTomecek/ansible-bender",
"ref": "master"
}
},
"strategy": {
"type": "Custom",
"customStrategy": {
"from": {
"kind": "DockerImage",
"name": "ansible-bender:latest"
},
"pullSecret": {
"name": "builder-dockercfg-mfvxv"
},
"env": [
{
"name": "AB_BASE_IMAGE",
"value": "registry.fedoraproject.org/fedora:29"
},
{
"name": "AB_PLAYBOOK_PATH",
"value": "recipe.yml"
},
{
"name": "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE",
"value": "ansible-bender:latest"
}
]
}
},
"output": {
"to": {
"kind": "DockerImage",
"name": "lolzor"
},
"pushSecret": {
"name": "builder-dockercfg-mfvxv"
}
},
"resources": {},
"postCommit": {},
"nodeSelector": None,
"triggeredBy": [
{
"message": "Manually triggered"
}
]
},
"status": {
"phase": "New",
"outputDockerImageReference": "lolzor",
"config": {
"kind": "BuildConfig",
"namespace": "myproject",
"name": "ab-in-okd"
},
"output": {}
}
}
BUILD_ENV_RAW = json.dumps(BUILD_ENV)
def test_bio(tmpdir):
database_path = str(tmpdir)
flexmock(Application, build=lambda build, extra_ansible_args=None: True)
application = Application(db_path=database_path, debug=True)
ose = os.environ
ose["BUILD"] = BUILD_ENV_RAW
ose["AB_PLAYBOOK_PATH"] = "asdqwe.yml"
ose["AB_BASE_IMAGE"] = "pancake"
flexmock(os, environ=ose)
try:
build_inside_openshift(application)
finally:
application.clean()
|
scripts/torch/train.py | mohitktanwr/Improved-Inverse-ResNest-Isprs | 3,168 | 11135819 | <reponame>mohitktanwr/Improved-Inverse-ResNest-Isprs
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: <NAME>
## Email: <EMAIL>
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import time
import json
import logging
import argparse
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from resnest.torch.config import get_cfg
from resnest.torch.models.build import get_model
from resnest.torch.datasets import get_dataset
from resnest.torch.transforms import get_transform
from resnest.torch.loss import get_criterion
from resnest.torch.utils import (save_checkpoint, accuracy,
AverageMeter, LR_Scheduler, torch_dist_sum, mkdir,
cached_log_stream, PathManager)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Options():
def __init__(self):
# data settings
parser = argparse.ArgumentParser(description='ResNeSt Training')
parser.add_argument('--config-file', type=str, default=None,
help='training configs')
parser.add_argument('--outdir', type=str, default='output',
help='output directory')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
# distributed
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
# evaluation option
parser.add_argument('--eval-only', action='store_true', default= False,
help='evaluating')
parser.add_argument('--export', type=str, default=None,
help='put the path to resuming file if needed')
self.parser = parser
def parse(self):
args = self.parser.parse_args()
return args
def main():
args = Options().parse()
ngpus_per_node = torch.cuda.device_count()
args.world_size = ngpus_per_node * args.world_size
# load config
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.OPTIMIZER.LR = cfg.OPTIMIZER.LR * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args, cfg))
# global variable
best_pred = 0.0
acclist_train = []
acclist_val = []
def main_worker(gpu, ngpus_per_node, args, cfg):
args.gpu = gpu
args.rank = args.rank * ngpus_per_node + gpu
logger.info(f'rank: {args.rank} / {args.world_size}')
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
torch.cuda.set_device(args.gpu)
if args.gpu == 0:
mkdir(args.outdir)
filename = os.path.join(args.outdir, 'log.txt')
fh = logging.StreamHandler(cached_log_stream(filename))
fh.setLevel(logging.INFO)
logger.addHandler(fh)
plain_formatter = logging.Formatter(
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
)
fh.setFormatter(plain_formatter)
logger.info(args)
# init the global
global best_pred, acclist_train, acclist_val
# seed
torch.manual_seed(cfg.SEED)
torch.cuda.manual_seed(cfg.SEED)
# init dataloader
transform_train, transform_val = get_transform(cfg.DATA.DATASET)(
cfg.DATA.BASE_SIZE, cfg.DATA.CROP_SIZE, cfg.DATA.RAND_AUG)
trainset = get_dataset(cfg.DATA.DATASET)(root=cfg.DATA.ROOT,
transform=transform_train,
train=True,
download=True)
valset = get_dataset(cfg.DATA.DATASET)(root=cfg.DATA.ROOT,
transform=transform_val,
train=False,
download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=cfg.TRAINING.BATCH_SIZE, shuffle=False,
num_workers=cfg.TRAINING.WORKERS, pin_memory=True,
sampler=train_sampler)
val_sampler = torch.utils.data.distributed.DistributedSampler(valset, shuffle=False)
val_loader = torch.utils.data.DataLoader(
valset, batch_size=cfg.TRAINING.TEST_BATCH_SIZE, shuffle=False,
num_workers=cfg.TRAINING.WORKERS, pin_memory=True,
sampler=val_sampler)
# init the model
model_kwargs = {}
if cfg.MODEL.FINAL_DROP > 0.0:
model_kwargs['final_drop'] = cfg.MODEL.FINAL_DROP
if cfg.TRAINING.LAST_GAMMA:
model_kwargs['last_gamma'] = True
model = get_model(cfg.MODEL.NAME)(**model_kwargs)
if args.gpu == 0:
logger.info(model)
criterion, train_loader = get_criterion(cfg, train_loader, args.gpu)
model.cuda(args.gpu)
criterion.cuda(args.gpu)
model = DistributedDataParallel(model, device_ids=[args.gpu])
# criterion and optimizer
if cfg.OPTIMIZER.DISABLE_BN_WD:
parameters = model.named_parameters()
param_dict = {}
for k, v in parameters:
param_dict[k] = v
bn_params = [v for n, v in param_dict.items() if ('bn' in n or 'bias' in n)]
rest_params = [v for n, v in param_dict.items() if not ('bn' in n or 'bias' in n)]
if args.gpu == 0:
logger.info(" Weight decay NOT applied to BN parameters ")
logger.info(f'len(parameters): {len(list(model.parameters()))} = {len(bn_params)} + {len(rest_params)}')
optimizer = torch.optim.SGD([{'params': bn_params, 'weight_decay': 0 },
{'params': rest_params, 'weight_decay': cfg.OPTIMIZER.WEIGHT_DECAY}],
lr=cfg.OPTIMIZER.LR,
momentum=cfg.OPTIMIZER.MOMENTUM,
weight_decay=cfg.OPTIMIZER.WEIGHT_DECAY)
else:
optimizer = torch.optim.SGD(model.parameters(),
lr=cfg.OPTIMIZER.LR,
momentum=cfg.OPTIMIZER.MOMENTUM,
weight_decay=cfg.OPTIMIZER.WEIGHT_DECAY)
# check point
if args.resume is not None:
if os.path.isfile(args.resume):
if args.gpu == 0:
logger.info(f"=> loading checkpoint '{args.resume}'")
with PathManager.open(args.resume, "rb") as f:
checkpoint = torch.load(f)
cfg.TRAINING.START_EPOCHS = checkpoint['epoch'] + 1 if cfg.TRAINING.START_EPOCHS == 0 \
else cfg.TRAINING.START_EPOCHS
best_pred = checkpoint['best_pred']
acclist_train = checkpoint['acclist_train']
acclist_val = checkpoint['acclist_val']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.gpu == 0:
logger.info(f"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})")
else:
raise RuntimeError (f"=> no resume checkpoint found at '{args.resume}'")
scheduler = LR_Scheduler(cfg.OPTIMIZER.LR_SCHEDULER,
base_lr=cfg.OPTIMIZER.LR,
num_epochs=cfg.TRAINING.EPOCHS,
iters_per_epoch=len(train_loader),
warmup_epochs=cfg.OPTIMIZER.WARMUP_EPOCHS)
def train(epoch):
train_sampler.set_epoch(epoch)
model.train()
losses = AverageMeter()
top1 = AverageMeter()
global best_pred, acclist_train
for batch_idx, (data, target) in enumerate(train_loader):
scheduler(optimizer, batch_idx, epoch, best_pred)
if not cfg.DATA.MIXUP:
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if not cfg.DATA.MIXUP:
acc1 = accuracy(output, target, topk=(1,))
top1.update(acc1[0], data.size(0))
losses.update(loss.item(), data.size(0))
if batch_idx % 100 == 0 and args.gpu == 0:
if cfg.DATA.MIXUP:
logger.info('Batch: %d| Loss: %.3f'%(batch_idx, losses.avg))
else:
logger.info('Batch: %d| Loss: %.3f | Top1: %.3f'%(batch_idx, losses.avg, top1.avg))
acclist_train += [top1.avg]
def validate(epoch):
model.eval()
top1 = AverageMeter()
top5 = AverageMeter()
global best_pred, acclist_train, acclist_val
is_best = False
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
with torch.no_grad():
output = model(data)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], data.size(0))
top5.update(acc5[0], data.size(0))
# sum all
sum1, cnt1, sum5, cnt5 = torch_dist_sum(args.gpu, top1.sum, top1.count, top5.sum, top5.count)
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
if args.gpu == 0:
logger.info('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
if args.eval_only:
return top1_acc, top5_acc
# save checkpoint
acclist_val += [top1_acc]
if top1_acc > best_pred:
best_pred = top1_acc
is_best = True
save_checkpoint({
'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
},
directory=args.outdir,
is_best=False,
filename=f'checkpoint_{epoch}.pth')
return top1_acc.item(), top5_acc.item()
if args.export:
if args.gpu == 0:
with PathManager.open(args.export + '.pth', "wb") as f:
torch.save(model.module.state_dict(), f)
return
if args.eval_only:
top1_acc, top5_acc = validate(cfg.TRAINING.START_EPOCHS)
metrics = {
"top1": top1_acc,
"top5": top5_acc,
}
if args.gpu == 0:
with PathManager.open(os.path.join(args.outdir, 'metrics.json'), "w") as f:
json.dump(metrics, f)
return
for epoch in range(cfg.TRAINING.START_EPOCHS, cfg.TRAINING.EPOCHS):
tic = time.time()
train(epoch)
if epoch % 10 == 0:
top1_acc, top5_acc = validate(epoch)
elapsed = time.time() - tic
if args.gpu == 0:
logger.info(f'Epoch: {epoch}, Time cost: {elapsed}')
# final evaluation
top1_acc, top5_acc = validate(cfg.TRAINING.START_EPOCHS - 1)
if args.gpu == 0:
# save final checkpoint
save_checkpoint({
'epoch': cfg.TRAINING.EPOCHS - 1,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
},
directory=args.outdir,
is_best=False,
filename='checkpoint_final.pth')
# save final model weights
with PathManager.open(os.path.join(args.outdir, 'model_weights.pth'), "wb") as f:
torch.save(model.module.state_dict(), f)
metrics = {
"top1": top1_acc,
"top5": top5_acc,
}
with PathManager.open(os.path.join(args.outdir, 'metrics.json'), "w") as f:
json.dump(metrics, f)
if __name__ == "__main__":
main()
|
test/core_typecheck.py | loongson-zn/build | 215 | 11135830 | <gh_stars>100-1000
#!/usr/bin/python
# Copyright 2003 <NAME>
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or https://www.bfgroup.xyz/b2/LICENSE.txt)
# This tests the typechecking facilities.
import BoostBuild
t = BoostBuild.Tester(["-ffile.jam"], pass_toolset=0)
t.write("file.jam", """
module .typecheck
{
rule "[path]" ( x )
{
if ! [ MATCH "^(::)" : $(x) ]
{
ECHO "Error: $(x) is not a path" ;
return true ;
}
}
}
rule do ( [path] a )
{
}
do $(ARGUMENT) ;
actions dummy { }
dummy all ;
""")
t.run_build_system(["-sARGUMENT=::a/b/c"])
t.run_build_system(["-sARGUMENT=a/b/c"], status=1, stdout="""\
Error: a/b/c is not a path
file.jam:18: in module scope
*** argument error
* rule do ( [path] a )
* called with: ( a/b/c )
* true a
file.jam:16:see definition of rule 'do' being called
""")
t.cleanup()
|
models/modules.py | ztmo520/NeuralRecon | 1,001 | 11135880 | <reponame>ztmo520/NeuralRecon<filename>models/modules.py
import torch
import torch.nn as nn
import torchsparse
import torchsparse.nn as spnn
from torchsparse.tensor import PointTensor
from torchsparse.utils import *
from ops.torchsparse_utils import *
__all__ = ['SPVCNN', 'SConv3d', 'ConvGRU']
class BasicConvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride), spnn.BatchNorm(outc),
spnn.ReLU(True))
def forward(self, x):
out = self.net(x)
return out
class BasicDeconvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
stride=stride,
transposed=True), spnn.BatchNorm(outc),
spnn.ReLU(True))
def forward(self, x):
return self.net(x)
class ResidualBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride), spnn.BatchNorm(outc),
spnn.ReLU(True),
spnn.Conv3d(outc,
outc,
kernel_size=ks,
dilation=dilation,
stride=1), spnn.BatchNorm(outc))
self.downsample = nn.Sequential() if (inc == outc and stride == 1) else \
nn.Sequential(
spnn.Conv3d(inc, outc, kernel_size=1, dilation=1, stride=stride),
spnn.BatchNorm(outc)
)
self.relu = spnn.ReLU(True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out
class SPVCNN(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.dropout = kwargs['dropout']
cr = kwargs.get('cr', 1.0)
cs = [32, 64, 128, 96, 96]
cs = [int(cr * x) for x in cs]
if 'pres' in kwargs and 'vres' in kwargs:
self.pres = kwargs['pres']
self.vres = kwargs['vres']
self.stem = nn.Sequential(
spnn.Conv3d(kwargs['in_channels'], cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]), spnn.ReLU(True)
)
self.stage1 = nn.Sequential(
BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1),
ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1),
ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1),
)
self.stage2 = nn.Sequential(
BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1),
ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1),
ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1),
)
self.up1 = nn.ModuleList([
BasicDeconvolutionBlock(cs[2], cs[3], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[3] + cs[1], cs[3], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1),
)
])
self.up2 = nn.ModuleList([
BasicDeconvolutionBlock(cs[3], cs[4], ks=2, stride=2),
nn.Sequential(
ResidualBlock(cs[4] + cs[0], cs[4], ks=3, stride=1,
dilation=1),
ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1),
)
])
self.point_transforms = nn.ModuleList([
nn.Sequential(
nn.Linear(cs[0], cs[2]),
nn.BatchNorm1d(cs[2]),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[2], cs[4]),
nn.BatchNorm1d(cs[4]),
nn.ReLU(True),
)
])
self.weight_initialization()
if self.dropout:
self.dropout = nn.Dropout(0.3, True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, z):
# x: SparseTensor z: PointTensor
x0 = initial_voxelize(z, self.pres, self.vres)
x0 = self.stem(x0)
z0 = voxel_to_point(x0, z, nearest=False)
z0.F = z0.F
x1 = point_to_voxel(x0, z0)
x1 = self.stage1(x1)
x2 = self.stage2(x1)
z1 = voxel_to_point(x2, z0)
z1.F = z1.F + self.point_transforms[0](z0.F)
y3 = point_to_voxel(x2, z1)
if self.dropout:
y3.F = self.dropout(y3.F)
y3 = self.up1[0](y3)
y3 = torchsparse.cat([y3, x1])
y3 = self.up1[1](y3)
y4 = self.up2[0](y3)
y4 = torchsparse.cat([y4, x0])
y4 = self.up2[1](y4)
z3 = voxel_to_point(y4, z1)
z3.F = z3.F + self.point_transforms[1](z1.F)
return z3.F
class SConv3d(nn.Module):
def __init__(self, inc, outc, pres, vres, ks=3, stride=1, dilation=1):
super().__init__()
self.net = spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride)
self.point_transforms = nn.Sequential(
nn.Linear(inc, outc),
)
self.pres = pres
self.vres = vres
def forward(self, z):
x = initial_voxelize(z, self.pres, self.vres)
x = self.net(x)
out = voxel_to_point(x, z, nearest=False)
out.F = out.F + self.point_transforms(z.F)
return out
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128, pres=1, vres=1):
super(ConvGRU, self).__init__()
self.convz = SConv3d(hidden_dim + input_dim, hidden_dim, pres, vres, 3)
self.convr = SConv3d(hidden_dim + input_dim, hidden_dim, pres, vres, 3)
self.convq = SConv3d(hidden_dim + input_dim, hidden_dim, pres, vres, 3)
def forward(self, h, x):
'''
:param h: PintTensor
:param x: PintTensor
:return: h.F: Tensor (N, C)
'''
hx = PointTensor(torch.cat([h.F, x.F], dim=1), h.C)
z = torch.sigmoid(self.convz(hx).F)
r = torch.sigmoid(self.convr(hx).F)
x.F = torch.cat([r * h.F, x.F], dim=1)
q = torch.tanh(self.convq(x).F)
h.F = (1 - z) * h.F + z * q
return h.F
|
tests/test_backprop.py | Gausx/flashtorch | 685 | 11135925 | <reponame>Gausx/flashtorch
import inspect
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from flashtorch.saliency import Backprop
#####################
# Utility functions #
#####################
def find_first_conv_layer(model, layer_type, in_channels):
for _, module in model.named_modules():
if isinstance(module, layer_type) and \
module.in_channels == in_channels:
return module
def find_relu_layers(model, layer_type):
modules = []
for _, module in model.named_modules():
if isinstance(module, layer_type):
modules.append(module)
return modules
# Mock the output from the neural network
def make_mock_output(mocker, model, top_class):
num_classes = 10
mock_tensor = torch.zeros((1, num_classes))
mock_tensor[0][top_class] = 1
mock_output = mocker.Mock(spec=mock_tensor, shape=(1, num_classes))
mocker.patch.object(model, 'forward', return_value=mock_output)
# Mock the return value of output.topk()
mock_topk = (None, top_class)
mocker.patch.object(mock_output, 'topk', return_value=mock_topk)
return mock_output
# Make expected target of the gradient calculation
def make_expected_gradient_target(top_class):
num_classes = 10
target = torch.zeros((1, num_classes))
target[0][top_class] = 1
return target
class CnnGrayscale(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=3, padding=1)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(64, 10, kernel_size=3, stride=3, padding=1)
self.fc1 = nn.Linear(10 * 25 * 25, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
return F.softmax(self.fc1(x.view(-1, 10 * 25 * 25)), dim=1)
#################
# Test fixtures #
#################
@pytest.fixture
def model():
return models.alexnet()
@pytest.fixture
def model_grayscale():
return CnnGrayscale()
##############
# Test cases #
##############
def test_set_model_to_eval_mode(mocker, model):
mocker.spy(model, 'eval')
Backprop(model)
model.eval.assert_called_once()
def test_zero_out_gradients(mocker, model):
backprop = Backprop(model)
mocker.spy(model, 'zero_grad')
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
make_mock_output(mocker, model, target_class)
backprop.calculate_gradients(input_, target_class)
model.zero_grad.assert_called_once()
def test_handle_binary_classifier(mocker, model):
backprop = Backprop(model)
target_class = 0
input_ = torch.zeros([1, 3, 224, 224])
mock_output = torch.tensor([0.8])
mock_output.requires_grad = True
mocker.patch.object(model, 'forward', return_value=mock_output)
backprop.calculate_gradients(input_, target_class)
def test_calculate_gradients_of_target_class_only(mocker, model):
backprop = Backprop(model)
top_class = 5
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
target = make_expected_gradient_target(top_class)
mock_output = make_mock_output(mocker, model, target_class)
backprop.calculate_gradients(input_, target_class)
args, kwargs = mock_output.backward.call_args
assert torch.all(kwargs['gradient'].eq(target))
def test_calc_gradients_of_top_class_if_target_not_provided(mocker, model):
backprop = Backprop(model)
top_class = 5
input_ = torch.zeros([1, 3, 224, 224])
target = make_expected_gradient_target(top_class)
mock_output = make_mock_output(mocker, model, top_class)
backprop.calculate_gradients(input_)
args, kwargs = mock_output.backward.call_args
assert torch.all(kwargs['gradient'].eq(target))
def test_calc_gradients_of_top_class_if_prediction_is_wrong(mocker, model):
backprop = Backprop(model)
top_class = torch.tensor(5)
target_class = 7
input_ = torch.zeros([1, 3, 224, 224])
target = make_expected_gradient_target(top_class)
mock_output = make_mock_output(mocker, model, top_class)
with pytest.warns(UserWarning):
backprop.calculate_gradients(input_, target_class)
args, kwargs = mock_output.backward.call_args
assert torch.all(kwargs['gradient'].eq(target))
def test_handle_greyscale_input(mocker, model_grayscale):
backprop = Backprop(model_grayscale)
input_ = torch.zeros([1, 1, 224, 224], requires_grad=True)
gradients = backprop.calculate_gradients(input_)
assert gradients.shape == (1, 224, 224)
def test_return_max_across_color_channels_if_specified(mocker, model):
backprop = Backprop(model)
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
make_mock_output(mocker, model, target_class)
gradients = backprop.calculate_gradients(input_,
target_class,
take_max=True)
assert gradients.shape == (1, 224, 224)
def test_checks_input_size_for_inception_model(mocker):
with pytest.raises(ValueError) as error:
model = models.inception_v3()
backprop = Backprop(model)
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
backprop.calculate_gradients(input_, target_class)
assert 'Image must be 299x299 for Inception models.' in str(error.value)
def test_warn_when_prediction_is_wrong(mocker, model):
backprop = Backprop(model)
top_class = torch.tensor(1)
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
make_mock_output(mocker, model, top_class)
with pytest.warns(UserWarning):
backprop.calculate_gradients(input_, target_class)
# Test visualize method
def test_visualize_calls_calculate_gradients_twice(mocker, model):
backprop = Backprop(model)
mocker.spy(backprop, 'calculate_gradients')
top_class = 5
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
make_expected_gradient_target(top_class)
make_mock_output(mocker, model, target_class)
backprop.visualize(input_, target_class, use_gpu=True)
assert backprop.calculate_gradients.call_count == 2
def test_visualize_passes_gpu_flag(mocker, model):
backprop = Backprop(model)
mocker.spy(backprop, 'calculate_gradients')
top_class = 5
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
make_expected_gradient_target(top_class)
make_mock_output(mocker, model, target_class)
backprop.visualize(input_, target_class, use_gpu=True)
_, _, kwargs = backprop.calculate_gradients.mock_calls[0]
assert kwargs['use_gpu']
# Test compatibilities with torchvision models
available_models = inspect.getmembers(models, inspect.isfunction)
@pytest.mark.parametrize("name, model_module", available_models)
def test_register_hook_to_first_conv_layer(mocker, name, model_module):
model = model_module()
conv_layer = find_first_conv_layer(model, nn.modules.conv.Conv2d, 3)
mocker.spy(conv_layer, 'register_backward_hook')
Backprop(model)
conv_layer.register_backward_hook.assert_called_once()
@pytest.mark.parametrize("name, model_module", available_models)
def test_register_hooks_to_relu_layers(mocker, name, model_module):
model = model_module()
relu_layers = find_relu_layers(model, nn.ReLU)
for layer in relu_layers:
mocker.spy(layer, 'register_forward_hook')
mocker.spy(layer, 'register_backward_hook')
backprop = Backprop(model)
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
if 'inception' in name:
input_ = torch.zeros([1, 3, 299, 299])
make_mock_output(mocker, model, target_class)
backprop.calculate_gradients(input_, target_class, guided=True)
for layer in relu_layers:
layer.register_forward_hook.assert_called_once()
layer.register_backward_hook.assert_called_once()
@pytest.mark.parametrize("name, model_module", available_models)
def test_calculate_gradients_for_all_models(mocker, name, model_module):
model = model_module()
backprop = Backprop(model)
target_class = 5
input_ = torch.zeros([1, 3, 224, 224])
if 'inception' in name:
input_ = torch.zeros([1, 3, 299, 299])
make_mock_output(mocker, model, target_class)
gradients = backprop.calculate_gradients(input_,
target_class,
use_gpu=True)
assert gradients.shape == input_.size()[1:]
if __name__ == '__main__':
pytest.main([__file__])
|
test/programytest/storage/stores/sql/dao/test_conversation.py | cdoebler1/AIML2 | 345 | 11135931 | import unittest
from programy.storage.stores.sql.dao.conversation import Conversation
from programy.storage.stores.sql.dao.conversation import ConversationProperty
from programy.storage.stores.sql.dao.conversation import Match
from programy.storage.stores.sql.dao.conversation import MatchNode
from programy.storage.stores.sql.dao.conversation import Question
from programy.storage.stores.sql.dao.conversation import Sentence
class ConversationTests(unittest.TestCase):
def test_init(self):
conversation1 = Conversation(clientid='clientid', userid='userid', botid='botid', brainid='brainid', maxhistories=100)
self.assertIsNotNone(conversation1)
self.assertEqual("<Conversation(id='n/a', clientid='clientid', userid='userid', botid='botid', brainid='brainid', maxhistories='100'>", str(conversation1))
conversation2 = Conversation(id=1, clientid='clientid', userid='userid', botid='botid', brainid='brainid', maxhistories=100)
self.assertIsNotNone(conversation2)
self.assertEqual("<Conversation(id='1', clientid='clientid', userid='userid', botid='botid', brainid='brainid', maxhistories='100'>", str(conversation2))
question1 = Question (conversationid=1, questionno=1, srai="false")
self.assertIsNotNone(question1)
self.assertEqual("<Question(id='n/a', conversationid='1', questionno='1', srai='false'>", str(question1))
question2 = Question (id=1, conversationid=1, questionno=1, srai="false")
self.assertIsNotNone(question2)
self.assertEqual("<Question(id='1', conversationid='1', questionno='1', srai='false'>", str(question2))
sentence1 = Sentence (questionid=1, sentenceno=2, sentence='Hello world', response='Hi there', positivity='1.0', subjectivity='0.34')
self.assertIsNotNone(sentence1)
self.assertEqual("<Sentence(id='n/a', questionid='1', sentenceno='2', sentence='Hello world', response='Hi there', positivity='1.0', subjectivity='0.34'>", str(sentence1))
sentence2 = Sentence (id=1, questionid=1, sentenceno=2, sentence='Hello world', response='Hi there', positivity='1.0', subjectivity='0.34')
self.assertIsNotNone(sentence2)
self.assertEqual("<Sentence(id='1', questionid='1', sentenceno='2', sentence='Hello world', response='Hi there', positivity='1.0', subjectivity='0.34'>", str(sentence2))
convopro1 = ConversationProperty (conversationid=1, questionid=2, type=3, name='name', value='value')
self.assertIsNotNone(convopro1)
self.assertEqual("<ConversationProperty(id='n/a', conversationid='1', questionid='2', type='3', name='name', value='value')>", str(convopro1))
convopro2 = ConversationProperty (id=1, conversationid=1, questionid=2, type=3, name='name', value='value')
self.assertIsNotNone(convopro2)
self.assertEqual("<ConversationProperty(id='1', conversationid='1', questionid='2', type='3', name='name', value='value')>", str(convopro2))
match1 = Match (max_search_depth=100, max_search_timeout=100, sentence='HELLO WORLD', response='Hi there', score='0.99')
self.assertIsNotNone(match1)
self.assertEqual("<Match(id='n/a', max_search_depth='100', max_search_timeout='100', sentence='HELLO WORLD', response='Hi there', score='0.99')>", str(match1))
match2 = Match (id=1, max_search_depth=100, max_search_timeout=100, sentence='HELLO WORLD', response='Hi there', score='0.99')
self.assertIsNotNone(match2)
self.assertEqual("<Match(id='1', max_search_depth='100', max_search_timeout='100', sentence='HELLO WORLD', response='Hi there', score='0.99')>", str(match2))
matchnode1 = MatchNode (matchid=1, matchcount=2, matchtype='WORD', matchnode='NODE', matchstr='HELLO', wildcard='*', multiword='False')
self.assertIsNotNone(matchnode1)
self.assertEqual("<MatchNode(id='n/a', matchid='1', matchcount='2', matchtype='WORD', matchnode='NODE', matchstr='HELLO', wildcard='*', multiword='False')>", str(matchnode1))
matchnode2 = MatchNode (id=1, matchid=1, matchcount=2, matchtype='WORD', matchnode='NODE', matchstr='HELLO', wildcard='*', multiword='False')
self.assertIsNotNone(matchnode2)
self.assertEqual("<MatchNode(id='1', matchid='1', matchcount='2', matchtype='WORD', matchnode='NODE', matchstr='HELLO', wildcard='*', multiword='False')>", str(matchnode2))
|
vipermonkey/api.py | lap1nou/ViperMonkey | 874 | 11135932 | """
ViperMonkey - API
ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft
VBA macros (Visual Basic for Applications), mainly for malware analysis.
Author: <NAME> - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
#=== LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2019 <NAME> (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import inspect
import pyparsing
# Enable PackRat for better performance:
# (see https://pythonhosted.org/pyparsing/pyparsing.ParserElement-class.html#enablePackrat)
pyparsing.ParserElement.enablePackrat(cache_size_limit=10000000)
from vipermonkey.core import deobfuscation
from vipermonkey.core.modules import *
from vipermonkey.core.modules import Module as _Module
from vipermonkey.core.vba_lines import vba_collapse_long_lines
# NOTE: This MUST be imported because it registers function to the VBA_LIBRARY
# dictionary in vba_context... don't ask me why.
# Make sure we populate the VBA Library:
from vipermonkey.core.vba_library import *
def _get_keywords(line, num=2):
"""Gets the first num keywords of line"""
return line.lower().split(None, num)
class CustomVBALibraryFunc(VbaLibraryFunc):
"""Wraps a function into a VbaLibraryFunc class object."""
def __init__(self, callback):
self._callback = callback
def eval(self, context, params=None):
return self._callback(context, params=params)
class SmartDict(dict):
"""
Smarter dictionary that handles the VBALibraryFunc types better.
Also, the keys are case insensitive.
"""
def __contains__(self, key):
return super(SmartDict, self).__contains__(key.lower())
def __getitem__(self, key):
"""Convert to case of key before retrieval."""
return super(SmartDict, self).__getitem__(key.lower())
def __setitem__(self, key, value):
"""Automatically convert VbaLibraryFunc classes and lambdas before setting."""
# If a VBALibraryFunc class was passed in without being initialized, initialize it for them.
if inspect.isclass(value) and issubclass(value, VbaLibraryFunc):
value = value()
# If a function was passed in, wrap it in a VbaLibraryFunc class.
elif callable(value):
value = CustomVBALibraryFunc(value)
super(SmartDict, self).__setitem__(key.lower(), value)
orig_Context = Context
# MonkeyPatch Context with new features useful for a user
# FIXME: We can't just update the main context with this stuff because we can't get
# the VbaLibraryFunc class to import there (which is needed by SmartDict).
# This is due the complexities caused by the abundant use of wildcard imports.
class Context(orig_Context):
"""Overwrites ViperMonkey's original context to improve functionality:
- simplify constructor
- provide report_action callback
- uses magic indexing
- allows overwriting a function or sub with a custom python class.
- allows providing a list of interesting functions to log.
(this extends the function names already defined in Function_Call)
"""
def __init__(self, report_action=None, **kwargs):
"""
Initializes context
:param report_action: Callback function used to report triggered actions.
:param log_funcs: List of function to report as an interestting function call.
:param kwargs: Extra options passed back to original context.
"""
kwargs['engine'] = self # Engine is self so that sub contexts also have report_action.
super(Context, self).__init__(**kwargs)
self._report_action = report_action
self.actions = collections.defaultdict(list)
# Replace dictionaries with "smarter" ones.
if not isinstance(self.globals, SmartDict):
self.globals = SmartDict(self.globals)
if not isinstance(self.locals, SmartDict):
self.locals = SmartDict(self.locals)
def __contains__(self, item):
try:
_ = self[item]
return True
except KeyError:
return False
def __delitem__(self, key):
"""Remove item from context."""
key = key.lower()
if key in self.locals:
del self.locals[key]
elif key in self.globals:
del self.globals[key]
if key in self.types:
del self.types[key]
def __getitem__(self, item):
"""Let context['thing'] be equivalent to context.get('thing')"""
return self.get(item)
def __setitem__(self, key, value):
"""Let context['thing'] = 'foo' be equivalent to context.set('thing', 'foo')"""
self.set(key, value)
def report_action(self, action, params=None, description=None, strip_null_bytes=False):
# NOTE: We are ignoring the strip_null_bytes parameter because that is a business logic detail.
# Record action in context
self.actions[description].append((action, params))
# Perform any custom reporting.
if self._report_action:
self._report_action(action, params=params, description=description)
class CodeBlock(object):
"""
Defines a block of code. This can be a function, for loop or even a single line of code.
Each code block may also have internal code blocks within it.
For example, in the below code, the function Execute() is a code block which has
the internal code blocks -- the Dim statements and the For loop.
The For loop also has internal code blocks containing the lines of code within it.
Public Function Execute() As Variant
Dim foo As String
Dim counter As Integer
For counter = 34 to 40
foo = foo & Chr(counter)
Next counter
End Function
:param pp_spec: pyparsing object used to parse the code.
:param lines: String or list of lines representing the code.
:param parse_all: Whether to ensure all the code will be parsed when using pp_spec
:param deobfuscate: Whether to deobfuscate the code first which may speed up processing.
"""
def __init__(self, pp_spec, lines, parse_all=True, deobfuscate=False):
self._pp_spec = pp_spec
if isinstance(lines, (bytes, str)):
if deobfuscate:
# vba_collapse_long_lines() is done in deobfuscate()
lines = deobfuscation.deobfuscate(lines)
else:
lines = vba_collapse_long_lines(lines)
self.lines = lines.splitlines(True)
else:
if deobfuscate:
lines = deobfuscation.deobfuscate('\n'.join(lines)).splitlines(True)
self.lines = lines
self._obj = None
self._parse_attempted = False
self._parse_all = parse_all
self._code_blocks = None
def __str__(self):
return ''.join(self.lines)
def __getattr__(self, item):
"""Redirects anything that this class doesn't support back to the parsed obj."""
# Default to None so we can avoid having to tediously check the type beforehand.
return getattr(self.obj, item, None)
@property
def __class__(self):
"""
Black magic necessary to fake the isinstance() to VBA_Objects in classes like
SimpleNameExpression, Global_Var_Statement, and Module.
"""
return self.obj.__class__
@property
def obj(self):
"""Returns VBA_Object or None on failure."""
# TODO: Support the option of running the full grammar?
if not self._obj:
# Don't keep trying if we will fail.
if self._parse_attempted:
return None
try:
self._parse_attempted = True
# parse the first line using provided pp_spec
self._obj = self._pp_spec.parseString(self.lines[0], parseAll=self._parse_all)[0]
except ParseException as err:
log.warn('*** PARSING ERROR (3) ***\n{}\n{}\n{}'.format(
err.line, " " * (err.column - 1) + "^", err))
return None
return self._obj
def _take_until(self, line_gen, end):
"""Consumes and yields lines from the given line generator until end tokens are found."""
for line in line_gen:
yield line
if line.lower().split(None, len(end))[:len(end)] == end:
return
def _generate_code_block(self, line_gen, line, line_keywords):
"""
Factory method for creating a CodeBlock from given line, line_keywords, and line generator
to optional consume more lines.
"""
# TODO: Add the other block code types like For, Switch, Case and If statements?
if line_keywords[0] == 'for':
log.debug('FOR LOOP')
# NOTE: a for clause may be followed by ":" and statements on the same line
lines = [line] + list(self._take_until(line_gen, ['next']))
return CodeBlock(for_start, lines, parse_all=False)
else:
# NOTE: Needed to add EOS to fix "Expected end of text" errors. (This should be on vba_line)
return CodeBlock(vba_line + Optional(EOS).suppress(), line)
def _iter_code_blocks(self):
"""Iterates internal codes blocks contained within this block."""
line_gen = iter(self.lines[1:-1]) # Iterate internal lines between the header and footer.
for line in line_gen:
# Parse line
log.debug('Parsing line: {}'.format(line.rstrip()))
# extract first two keywords in lowercase, for quick matching
line_keywords = line.lower().split(None, 2)
# ignore empty or comment lines
if not line_keywords or line_keywords[0].startswith("'"):
continue
if line_keywords[0] in ('public', 'private'):
# remove the public/private keyword:
line_keywords = line_keywords[1:]
yield self._generate_code_block(line_gen, line, line_keywords)
@property
def code_blocks(self):
"""Iterates internal code blocks. Caches results to speed up next request."""
if self._code_blocks is None:
code_blocks = []
for code_block in self._iter_code_blocks():
code_blocks.append(code_block)
yield code_block
self._code_blocks = code_blocks
else:
for code_block in self._code_blocks:
yield code_block
@property
def type(self):
"""Returns type of VBA_Object."""
return type(self.obj)
def eval(self, context=None, params=None):
"""Evaluates line(s) of code. Returns evaluated value (if appropriate) or None."""
context = context or Context()
if not self.obj:
log.error('Unable to evaluate "{}" due to parse error.'.format(self))
return None
# Before performing evaluation we need to parse all the internal code blocks
# and add any parsed statements.
if hasattr(self.obj, 'statements') and not self.obj.statements:
# Even though we are passing our own class type it should still work because we have an
# eval() function. (Duck typing and all that)
self.obj.statements = list(self.code_blocks)
if hasattr(self.obj, 'eval'):
return self.obj.eval(context=context, params=params)
else:
return self.obj
def load_context(self, context):
"""
Loads context by evaluating code blocks within.
This is a convenience function for performing the common need of trying to get the
state of the context after a function as been run.
"""
for code_block in self.code_blocks:
code_block.eval(context)
class Module(CodeBlock):
"""The entry point for creating a VBA element for parsing/evaluation."""
# List of possible entry point functions.
_ENTRY_POINTS = ['autoopen', 'document_open', 'autoclose',
'document_close', 'auto_open', 'autoexec',
'autoexit', 'document_beforeclose', 'workbook_open',
'workbook_activate', 'auto_close', 'workbook_close']
def __init__(self, lines, deobfuscate=False):
"""
Initializes a VBA module (or collection of loose lines)
:param lines: String or list lines representing the code.
:param deobfuscate: Whether to deobfuscate the code first which may speed up processing.
"""
# TODO: pp spec for module?
# Instead of having a pyparsing spec, we are going to manually create the
# parsed object from code blocks.
super(Module, self).__init__(None, lines, deobfuscate=deobfuscate)
# We are also going to include a dummy first line so that _iter_code_blocks()
# doesn't skip the first line and last line.
self.lines = [''] + self.lines + ['']
def _generate_code_block(self, line_gen, line, line_keywords):
# Overwrite, because a module can contain subs, functions, and module header lines
# (VBA doesn't support nested functions/subs)
if line_keywords[0] == 'attribute':
return CodeBlock(header_statements_line, line)
# TODO: Is dim necesary here, or can it be found via vba_line?
elif line_keywords[0] in ('option', 'dim', 'declare'):
log.debug('DECLARATION LINE')
return CodeBlock(declaration_statements_line, line)
elif line_keywords[0] == 'sub':
log.debug('SUB')
lines = [line] + list(self._take_until(line_gen, ['end', 'sub']))
return CodeBlock(procedures.sub_start_line, lines)
elif line_keywords[0] == 'function':
log.debug('FUNCTION')
lines = [line] + list(self._take_until(line_gen, ['end', 'function']))
return CodeBlock(procedures.function_start_line, lines)
else:
return super(Module, self)._generate_code_block(line_gen, line, line_keywords)
@property
def functions(self):
"""Returns functions"""
return self.obj.functions.values()
@property
def subs(self):
"""Returns subs"""
return self.obj.subs.values()
@property
def procedures(self):
"""Returns subs and functions combined."""
return self.functions + self.subs
@property
def entry_points(self):
"""Yields the entry points. (or None if not found)."""
# Since the module VBA_Object stores its elements with case intact we can't just hash.
for name, sub in self.obj.subs.iteritems():
if name.lower() in self._ENTRY_POINTS:
yield sub
for name, function in self.obj.functions.iteritems():
if name.lower() in self._ENTRY_POINTS:
yield function
def eval(self, context=None, params=None):
"""Evaluates line(s) of code. Returns evaluated value (if appropriate) or None."""
context = context or Context()
self.load_context(context)
# Evaluate each loose code_block.
# NOTE: I would have used their obj.eval() with their "loose_lines" but it seems to not
# detect a lot of things...
# It's easier and more reliable to just count anything that is not a Function/Sub as loose.
# (Also, it doesn't return anything)
ret = None
for code_block in self.code_blocks:
if not isinstance(code_block, (Function, Sub)):
# TODO: We are going to consider variables as local when run like this
# We should really have a "global" Context just be considered the parent Context object.
# ... That would make better scoping emulation!
# context.global_scope = True # must set inside encase the code changes it.
ret = code_block.eval(context, params)
# context.global_scope = False
return ret
# TODO: Rename to declare()?
def load_context(self, context):
# For a Module this will declare all subs and functions into the context.
# NOTE: I am not using obj.load_context() because the functions/subs
# are set to locals instead of globals.
context = context or Context()
for name, _sub in self.obj.subs.items():
log.debug('(3) storing sub "%s" in globals' % name)
context.globals[name.lower()] = _sub
for name, _function in self.obj.functions.items():
log.debug('(3) storing function "%s" in globals' % name)
context.globals[name.lower()] = _function
for name, _function in self.obj.external_functions.items():
log.debug('(3) storing external function "%s" in globals' % name)
context.globals[name.lower()] = _function
for name, _var in self.obj.global_vars.items():
log.debug('(3) storing global var "%s" in globals' % name)
if isinstance(name, str):
context.globals[name.lower()] = _var
if isinstance(name, list):
context.globals[name[0].lower()] = _var
context.types[name[0].lower()] = name[1]
@property
def obj(self):
"""Returns VBA_Object or None on failure."""
# Instead of using a pyparsing spec, we are going to manually
# call and grab all the components from code_blocks.
# (This helps to prevent calling eval() to every code block.)
if not self._obj:
# TODO: Instead of blindly processing the obj for every code_block, only
# process Sub, Function, External_Function, Attribute_Statement, and Global_Var_Statement
# We need to replicate the initialization done in modules.Module but with code_blocks.
self._obj = _Module(str(self), 0, list(self.code_blocks))
return self._obj
def eval(vba_code, context=None, deobfuscate=False):
"""
A quick helper function to evaluate a chunk of code. (Useful as an analysis or development tool.)
:param str vba_code: VBA code to evaluate
:param context: Context obj to fill while evaluating.
:param deobfuscate: Whether to deobfuscate the code first which may speed up processing.
:return: Evaluated results.
"""
context = context or Context()
module = Module(vba_code, deobfuscate=deobfuscate)
return module.eval(context)
|
pygears/lib/filt.py | bogdanvuk/pygears | 120 | 11135997 | from pygears import alternative, gear
from pygears.typing import Queue, Union, Uint, Tuple, Bool, Unit, Any, Maybe
from pygears.lib.fmaps.queue import queuemap
from .ccat import ccat
def filt_type(din, lvl, sel):
return Queue[(din[0].types)[sel], lvl]
@gear(hdl={'compile': True})
async def filt(din: Tuple[{'data': Union, 'sel': Uint}]) -> b'din["data"]':
'''Filters the ``data`` field of :class:`Union` type, by passing it forward
only if it carries the ``data`` :class:`Union` subtype whose index is equal
to the value supplied to the ``sel`` field. Index of a :class:`Union`
subtype is equal to the position of the subtype within the :class:`Union`
definition.
'''
async with din as (data, sel):
if data.ctrl == sel:
yield data
@alternative(filt)
@gear
def filt_unit(din: Union[Any, Unit]) -> b'din.types[0]':
return din | filt(fixsel=0)
@alternative(filt)
@gear
def filt_maybe(din: Union[Unit, Any]) -> b'din.types[1]':
return din | filt(fixsel=1)
@alternative(filt)
@gear
def filt_fix_sel(din: Union, *, fixsel) -> b'din.types[fixsel]':
return (ccat(din, din.dtype.ctrl(fixsel)) | filt)[0] \
>> din.dtype.types[fixsel]
@alternative(filt)
@gear
def qfilt_f(din: Queue, *, f):
@gear
def maybe_out(din, *, f):
return ccat(din, din | f) | Union
return din | queuemap(f=maybe_out(f=f)) | qfilt_union(fixsel=1)
@alternative(filt)
@gear(enablement=b'not typeof(din, Queue)')
def filt_f(din, *, f):
@gear
def maybe_out(din, *, f):
return ccat(din, f(din)) | Union
return din | maybe_out(f=f) | filt(fixsel=1)
@alternative(filt)
@gear(hdl={'compile': True})
async def qfilt_union(din: Queue[Union, 'lvl'], *, fixsel=0,
filt_lvl=1) -> b'filt_type(din, lvl, fixsel)':
data_reg: din.dtype.data.data = din.dtype.data.data(0)
eot_reg: Uint[din.dtype.lvl] = Uint[din.dtype.lvl](0)
empty_reg: Bool = Bool(True)
curr_data: din.dtype.data.data
field_sel: Bool
while True:
async with din as d:
curr_data = d.data.data
field_sel = (d.data.ctrl == fixsel)
if all(d.eot[:filt_lvl]):
if field_sel:
if not empty_reg:
yield (data_reg, eot_reg)
yield (curr_data, d.eot)
elif not empty_reg:
yield (data_reg, d.eot)
empty_reg = True
elif field_sel:
if not empty_reg:
yield (data_reg, eot_reg)
# register
data_reg = curr_data
eot_reg = d.eot
empty_reg = False
|
utils/cmpcodesize/cmpcodesize/compare.py | AbdouSarr/swift | 825 | 11136006 | <filename>utils/cmpcodesize/cmpcodesize/compare.py
# ====--- compare.py - Compare built products' sizes -*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import print_function
import collections
import os
import re
import subprocess
from operator import itemgetter
prefixes = {
# Cpp
"__Z": "CPP",
"_swift": "CPP",
"__swift": "CPP",
# Objective-C
"+[": "ObjC",
"-[": "ObjC",
# Swift
"__TP": "Partial Apply",
"__TTW": "Protocol Witness",
"__Tw": "Value Witness",
"__TM": "Type Metadata",
"__TF": "Swift Function",
"__TTSg": "Generic Spec",
"__TTSf": "FuncSig Spec",
"__TZF": "Static Func",
# Function signature specialization of a generic specialization.
"__TTSGF": "FuncSigGen Spec",
"__TTo": "Swift @objc Func",
}
infixes = {
# Swift
"q_": "Generic Function"
}
generic_function_prefix = "__TTSg"
sorted_prefixes = sorted(prefixes)
sorted_infixes = sorted(infixes)
def add_function(sizes, function, start_addr, end_addr, group_by_prefix):
if not function or start_addr is None or end_addr is None:
return
size = end_addr - start_addr
if group_by_prefix:
for infix in sorted_infixes:
if infix in function:
if generic_function_prefix not in function:
sizes[infixes[infix]] += size
return
for prefix in sorted_prefixes:
if function.startswith(prefix):
# Special handling for function signature specializations
# of generic specializations.
if prefix == "__TTSf" and generic_function_prefix in function:
prefix = "__TTSGF"
sizes[prefixes[prefix]] += size
return
sizes["Unknown"] += size
else:
sizes[function] += size
def flatten(*args):
for x in args:
if hasattr(x, '__iter__'):
for y in flatten(*x):
yield y
else:
yield x
def read_sizes(sizes, file_name, function_details, group_by_prefix):
# Check if multiple architectures are supported by the object file.
# Prefer arm64 if available.
architectures = subprocess.check_output(
["otool", "-V", "-f", file_name]).split("\n")
arch = None
arch_pattern = re.compile('architecture ([\S]+)')
for architecture in architectures:
arch_match = arch_pattern.match(architecture)
if arch_match:
if arch is None:
arch = arch_match.group(1)
if "arm64" in arch:
arch = "arm64"
if arch is not None:
arch_params = ["-arch", arch]
else:
arch_params = []
if function_details:
content = subprocess.check_output(
flatten([
"otool",
arch_params,
"-l",
"-v",
"-t",
file_name]
)).split("\n")
content += subprocess.check_output(flatten(
["otool", arch_params, "-v", "-s", "__TEXT", "__textcoal_nt",
file_name])).split("\n")
else:
content = subprocess.check_output(
flatten(["otool", arch_params, "-l", file_name])).split("\n")
sect_name = None
curr_func = None
start_addr = None
end_addr = None
section_pattern = re.compile(' +sectname ([\S]+)')
size_pattern = re.compile(' +size ([\da-fx]+)')
asmline_pattern = re.compile('^([0-9a-fA-F]+)\s')
label_pattern = re.compile('^((\-*\[[^\]]*\])|[^\/\s]+):$')
for line in content:
asmline_match = asmline_pattern.match(line)
if asmline_match:
addr = int(asmline_match.group(1), 16)
if start_addr is None:
start_addr = addr
end_addr = addr
elif line == "Section":
sect_name = None
else:
label_match = label_pattern.match(line)
size_match = size_pattern.match(line)
section_match = section_pattern.match(line)
if label_match:
func_name = label_match.group(1)
add_function(sizes, curr_func, start_addr,
end_addr, group_by_prefix)
curr_func = func_name
start_addr = None
end_addr = None
elif size_match and sect_name and group_by_prefix:
size = int(size_match.group(1), 16)
sizes[sect_name] += size
elif section_match:
sect_name = section_match.group(1)
if sect_name == "__textcoal_nt":
sect_name = "__text"
add_function(sizes, curr_func, start_addr, end_addr, group_by_prefix)
def compare_sizes(old_sizes, new_sizes, name_key, title):
old_size = old_sizes[name_key]
new_size = new_sizes[name_key]
if old_size is not None and new_size is not None:
if old_size != 0:
perc = "%.1f%%" % (
(1.0 - float(new_size) / float(old_size)) * 100.0)
else:
perc = "- "
print("%-26s%16s: %8d %8d %6s" %
(title, name_key, old_size, new_size, perc))
def compare_sizes_of_file(old_files, new_files, all_sections, list_categories):
old_sizes = collections.defaultdict(int)
new_sizes = collections.defaultdict(int)
for old_file in old_files:
read_sizes(old_sizes, old_file, list_categories, True)
for new_file in new_files:
read_sizes(new_sizes, new_file, list_categories, True)
if len(old_files) == 1 and len(new_files) == 1:
old_base = os.path.basename(old_files[0])
new_base = os.path.basename(new_files[0])
title = old_base
if old_base != new_base:
title += "-" + new_base
else:
title = "old-new"
compare_sizes(old_sizes, new_sizes, "__text", title)
if list_categories:
prev = None
for category_name in sorted(prefixes.values()) + \
sorted(infixes.values()) + ["Unknown"]:
if category_name != prev:
compare_sizes(old_sizes, new_sizes, category_name, "")
prev = category_name
if all_sections:
section_title = " section"
compare_sizes(old_sizes, new_sizes, "__textcoal_nt", section_title)
compare_sizes(old_sizes, new_sizes, "__stubs", section_title)
compare_sizes(old_sizes, new_sizes, "__const", section_title)
compare_sizes(old_sizes, new_sizes, "__cstring", section_title)
compare_sizes(old_sizes, new_sizes, "__objc_methname", section_title)
compare_sizes(old_sizes, new_sizes, "__const", section_title)
compare_sizes(old_sizes, new_sizes, "__objc_const", section_title)
compare_sizes(old_sizes, new_sizes, "__data", section_title)
compare_sizes(old_sizes, new_sizes, "__swift1_proto", section_title)
compare_sizes(old_sizes, new_sizes, "__common", section_title)
compare_sizes(old_sizes, new_sizes, "__bss", section_title)
def list_function_sizes(size_array):
for pair in sorted(size_array, key=itemgetter(1)):
name = pair[0]
size = pair[1]
yield "%8d %s" % (size, name)
def compare_function_sizes(old_files, new_files):
old_sizes = collections.defaultdict(int)
new_sizes = collections.defaultdict(int)
for name in old_files:
read_sizes(old_sizes, name, True, False)
for name in new_files:
read_sizes(new_sizes, name, True, False)
only_in_file1 = []
only_in_file2 = []
in_both = []
only_in_file1size = 0
only_in_file2size = 0
in_both_size = 0
for func, old_size in old_sizes.items():
new_size = new_sizes[func]
if new_size != 0:
in_both.append((func, old_size, new_size))
else:
only_in_file1.append((func, old_size))
only_in_file1size += old_size
for func, new_size in new_sizes.items():
old_size = old_sizes[func]
if old_size == 0:
only_in_file2.append((func, new_size))
only_in_file2size += new_size
if only_in_file1:
print("Only in old file(s)")
print(os.linesep.join(list_function_sizes(only_in_file1)))
print("Total size of functions only in old file: {}".format(
only_in_file1size))
print()
if only_in_file2:
print("Only in new files(s)")
print(os.linesep.join(list_function_sizes(only_in_file2)))
print("Total size of functions only in new file: {}".format(
only_in_file2size))
print()
if in_both:
size_increase = 0
size_decrease = 0
print("%8s %8s %8s" % ("old", "new", "diff"))
for triple in sorted(
in_both,
key=lambda tup: (tup[2] - tup[1], tup[1])):
func = triple[0]
old_size = triple[1]
new_size = triple[2]
diff = new_size - old_size
if diff > 0:
size_increase += diff
else:
size_decrease -= diff
if diff == 0:
in_both_size += new_size
print("%8d %8d %8d %s" %
(old_size, new_size, new_size - old_size, func))
print("Total size of functions " +
"with the same size in both files: {}".format(in_both_size))
print("Total size of functions " +
"that got smaller: {}".format(size_decrease))
print("Total size of functions " +
"that got bigger: {}".format(size_increase))
print("Total size change of functions present " +
"in both files: {}".format(size_increase - size_decrease))
|
evaluation_scripts/python_code.py | yolochai/scisumm-corpus | 198 | 11136075 | # -*- coding: utf-8 -*-
"""
Created on Fri May 06 14:43:15 2016
@author: rustagi
"""
import ast
import os
import csv
csvfile=open('results_task1_last.csv', 'wb')
fieldnames = ['System_Name','Filename','Precision_Task_1a','Recall_Task_1a','F1_Score_Task_1a','Precision_Task_1b','Recall_Task_1b','F1_Score_Task_1b']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
def calculate_values(list_gold,list_comp) :
set_gold=set(list_gold)
set_comp=set(list_comp)
#print set_gold
#print set_comp
TP=len(set_gold.intersection(set_comp))
FP=len(set_comp.difference(set_gold))
FN=len(set_gold.difference(set_comp))
return [TP,FP,FN]
def calculate_metric(TP,FP,FN) :
recall="NA"
precision="NA"
sum_precision_deno=TP+FP
if sum_precision_deno!=0 :
precision=TP/float(sum_precision_deno)
sum_recall_deno=TP+FN
if sum_recall_deno!=0 :
recall=TP/float(sum_recall_deno)
return (precision,recall)
def make_dict(filename,dict_main) :
gold_file=open(filename,"r")
filename=filename.split(".")[0].split("\\")[-1]
dict_main[filename]={}
for lines in gold_file.readlines():
if lines !="\n":
#print lines
split_line=lines[:-1].split(" | ")
for index in [2,3] :
item_split=split_line[index].split(":")
item_name=item_split[0].strip()
item_value=item_split[1].strip()
if index==2:
if item_value not in dict_main[filename]:
dict_main[filename][item_value]={}
index_item_value_2=item_value
else :
dict_main[filename][index_item_value_2][item_value]={}
index_item_value_3=item_value
for index in range(len(split_line)) :
if split_line[index]!="":
item_split=split_line[index].split(":")
#print item_split
item_name=item_split[0].strip()
item_value=item_split[1].strip()
if index==3 or index==5 or index==7 :
if "-" in item_value :
item_value= ast.literal_eval("['"+item_value+"']")
else :
item_value= ast.literal_eval(item_value)
if index==9 :
if "[" in item_value :
item_value= ast.literal_eval(item_value)
else :
item_value=[item_value]
dict_main[filename][index_item_value_2][index_item_value_3][item_name]=item_value
return dict_main
result_file="results.txt"
folder_read="C:\\Users\\rustagi\\Desktop\\Work_home\\new_system\\"
dict_gold={}
gold_file_path=folder_read+"Gold\\Default\\Task1\\"
for file_2 in os.listdir(gold_file_path) :
print file_2
gold_filename=file_2.split(".")[0]
dict_gold=make_dict(gold_file_path+gold_filename+".annv3.txt",dict_gold)
for systems in os.listdir(folder_read):
if systems!="Gold" and systems!="System3" :
for runs in os.listdir(folder_read+systems):
system_name= systems+"->"+runs
print system_name
dict_comp={}
comp_file_path=folder_read+systems+"\\"+runs+"\\Task1\\"
for file_2 in os.listdir(comp_file_path) :
print file_2
comp_filename=file_2.split(".")[0]
dict_comp=make_dict(comp_file_path+comp_filename+".annv3.txt",dict_comp)
for files in dict_gold :
main_file_values=dict_gold[files]
comp_file_values=dict_comp[files]
[TP_discourse_facet,FP_discourse_facet,FN_discourse_facet]=[0,0,0]
#[TP_citation_marker,FP_citation_marker,FN_citation_marker]=[0,0,0]
#[TP_citation_offset,FP_citation_offset,FN_citation_offset]=[0,0,0]
[TP_reference_offset,FP_reference_offset,FN_reference_offset]=[0,0,0]
for gold_values in main_file_values :
if gold_values in comp_file_values :
for gold_values_2 in main_file_values[gold_values] :
if gold_values_2 in comp_file_values[gold_values] :
gold_value=main_file_values[gold_values][gold_values_2]
comp_value=comp_file_values[gold_values][gold_values_2]
old_value=TP_reference_offset
[TP_reference_offset,FP_reference_offset,FN_reference_offset]=[x+y for x,y in zip([TP_reference_offset,FP_reference_offset,FN_reference_offset], calculate_values(gold_value["Reference Offset"],comp_value["Reference Offset"]))]
if (TP_reference_offset-old_value)>=1:
[TP_discourse_facet,FP_discourse_facet,FN_discourse_facet]=[x+y for x,y in zip([TP_discourse_facet,FP_discourse_facet,FN_discourse_facet], calculate_values(gold_value["Discourse Facet"],comp_value["Discourse Facet"]))]
else :
[TP_discourse_facet,FP_discourse_facet,FN_discourse_facet]=[x+y for x,y in zip([TP_discourse_facet,FP_discourse_facet,FN_discourse_facet], calculate_values(gold_value["Discourse Facet"],[]))]
else :
gold_value=main_file_values[gold_values][gold_values_2]
[TP_reference_offset,FP_reference_offset,FN_reference_offset]=[x+y for x,y in zip([TP_reference_offset,FP_reference_offset,FN_reference_offset], calculate_values(gold_value["Reference Offset"],[]))]
[TP_discourse_facet,FP_discourse_facet,FN_discourse_facet]=[x+y for x,y in zip([TP_discourse_facet,FP_discourse_facet,FN_discourse_facet], calculate_values(gold_value["Discourse Facet"],[]))]
print "2"
else :
for gold_values_2 in main_file_values[gold_values] :
gold_value=main_file_values[gold_values][gold_values_2]
[TP_reference_offset,FP_reference_offset,FN_reference_offset]=[x+y for x,y in zip([TP_reference_offset,FP_reference_offset,FN_reference_offset], calculate_values(gold_value["Reference Offset"],[]))]
[TP_discourse_facet,FP_discourse_facet,FN_discourse_facet]=[x+y for x,y in zip([TP_discourse_facet,FP_discourse_facet,FN_discourse_facet], calculate_values(gold_value["Discourse Facet"],[]))]
print "4"
for comp_values in comp_file_values:
if comp_values in main_file_values :
for comp_values_2 in comp_file_values[comp_values] :
if comp_values_2 not in main_file_values[comp_values] :
comp_value=comp_file_values[comp_values][comp_values_2]
[TP_reference_offset,FP_reference_offset,FN_reference_offset]=[x+y for x,y in zip([TP_reference_offset,FP_reference_offset,FN_reference_offset], calculate_values([],comp_value["Reference Offset"]))]
[TP_discourse_facet,FP_discourse_facet,FN_discourse_facet]=[x+y for x,y in zip([TP_discourse_facet,FP_discourse_facet,FN_discourse_facet], calculate_values([],comp_value["Discourse Facet"]))]
print "1"
else :
for comp_values_2 in comp_file_values[comp_values] :
comp_value=comp_file_values[comp_values][comp_values_2]
[TP_reference_offset,FP_reference_offset,FN_reference_offset]=[x+y for x,y in zip([TP_reference_offset,FP_reference_offset,FN_reference_offset], calculate_values([],comp_value["Reference Offset"]))]
[TP_discourse_facet,FP_discourse_facet,FN_discourse_facet]=[x+y for x,y in zip([TP_discourse_facet,FP_discourse_facet,FN_discourse_facet], calculate_values([],comp_value["Discourse Facet"]))]
print "3"
(precision_discourse_facet,recall_discourse_facet)=calculate_metric(TP_discourse_facet,FP_discourse_facet,FN_discourse_facet)
if precision_discourse_facet!="NA" and recall_discourse_facet!="NA" :
if precision_discourse_facet+recall_discourse_facet!=0 :
f1_score_discourse_facet=2*precision_discourse_facet*recall_discourse_facet/float(precision_discourse_facet+recall_discourse_facet)
else :
f1_score_discourse_facet="NA"
else :
f1_score_discourse_facet="NA"
#(precision_citation_marker,recall_citation_marker)=calculate_metric(TP_citation_marker,FP_citation_marker,FN_citation_marker)
#(precision_citation_offset,recall_citation_offset)=calculate_metric(TP_citation_offset,FP_citation_offset,FN_citation_offset)
(precision_reference_offset,recall_reference_offset)=calculate_metric(TP_reference_offset,FP_reference_offset,FN_reference_offset)
if precision_reference_offset!="NA" and recall_reference_offset!="NA" :
if precision_reference_offset+recall_reference_offset!=0 :
f1_score_reference_offset=2*precision_reference_offset*recall_reference_offset/float(precision_reference_offset+recall_reference_offset)
else :
f1_score_reference_offset="NA"
else :
f1_score_reference_offset="NA"
# writer.writerow({'System_Name': systems, 'Method': runs, 'Filename' :files,'Precision_Task_1a': precision_reference_offset,'Recall_Task_1a': recall_reference_offset,'F1_Score_Task_1a': f1_score_reference_offset,'Precision_Task_1b': precision_discourse_facet,'Recall_Task_1b': recall_discourse_facet,'F1_Score_Task_1b': f1_score_discourse_facet})
writer.writerow({'System_Name': systems+"$"+runs, 'Filename' :files,'Precision_Task_1a': precision_reference_offset,'Recall_Task_1a': recall_reference_offset,'F1_Score_Task_1a': f1_score_reference_offset,'Precision_Task_1b': precision_discourse_facet,'Recall_Task_1b': recall_discourse_facet,'F1_Score_Task_1b': f1_score_discourse_facet})
csvfile.close()
"""
gold_filename="C00-2123"
comp_filename="C00-2123_ref_2"
dict_comp=make_dict(comp_filename+".annv3.txt",dict_comp)
main_file_values=dict_gold[gold_filename]
comp_file_values=dict_comp[comp_filename]
"""
|
examples/01-maps/plot_layer.py | Prithwijit-Chak/simpeg | 358 | 11136078 | <reponame>Prithwijit-Chak/simpeg<filename>examples/01-maps/plot_layer.py
"""
Maps: Parametrized Layer
========================
Build a model of a parametrized layer in a wholespace. If you want to
build a model of a parametrized layer in a halfspace, also use
maps.InjectActiveCell.
The model is
.. code::
m = [
'background physical property value',
'layer physical property value',
'layer center',
'layer thickness'
]
"""
import discretize
from SimPEG import maps
import numpy as np
import matplotlib.pyplot as plt
def run(plotIt=True):
mesh = discretize.TensorMesh([50, 50], x0="CC") # 2D tensor mesh
mapping = maps.ParametricLayer(mesh) # parametric layer in wholespace
# model
m = np.hstack(
np.r_[
1.0, # background value
2.0, # layer value
-0.1, # layer center
0.2, # layer thickness
]
)
rho = mapping * m # apply the mapping
if plotIt is True:
fig, ax = plt.subplots(1, 1, figsize=(4, 6))
mesh.plotImage(rho, ax=ax)
if __name__ == "__main__":
run()
plt.show()
|
tests/test_activation_count.py | lauragustafson/fvcore | 1,137 | 11136079 | <reponame>lauragustafson/fvcore<gh_stars>1000+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# pyre-ignore-all-errors[2]
import typing
import unittest
from collections import Counter, defaultdict
from typing import Any, Dict, List, Tuple
import torch
import torch.nn as nn
from fvcore.nn.activation_count import ActivationCountAnalysis, activation_count
from fvcore.nn.jit_handles import Handle
from numpy import prod
class SmallConvNet(nn.Module):
"""
A network with three conv layers. This is used for testing convolution
layers for activation count.
"""
def __init__(self, input_dim: int) -> None:
super(SmallConvNet, self).__init__()
conv_dim1 = 8
conv_dim2 = 4
conv_dim3 = 2
self.conv1 = nn.Conv2d(input_dim, conv_dim1, 1, 1)
self.conv2 = nn.Conv2d(conv_dim1, conv_dim2, 1, 2)
self.conv3 = nn.Conv2d(conv_dim2, conv_dim3, 1, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
def get_gt_activation(self, x: torch.Tensor) -> Tuple[int, int, int]:
x = self.conv1(x)
count1 = prod(list(x.size()))
x = self.conv2(x)
count2 = prod(list(x.size()))
x = self.conv3(x)
count3 = prod(list(x.size()))
return (count1, count2, count3)
class TestActivationCountAnalysis(unittest.TestCase):
"""
Unittest for activation_count.
"""
def setUp(self) -> None:
# nn.Linear uses a different operator based on version, so make sure
# we are testing the right thing.
lin = nn.Linear(10, 10)
lin_x: torch.Tensor = torch.randn(10, 10)
trace = torch.jit.trace(lin, (lin_x,))
node_kinds = [node.kind() for node in trace.graph.nodes()]
assert "aten::addmm" in node_kinds or "aten::linear" in node_kinds
if "aten::addmm" in node_kinds:
self.lin_op = "addmm"
else:
self.lin_op = "linear"
def test_conv2d(self) -> None:
"""
Test the activation count for convolutions.
"""
batch_size = 1
input_dim = 3
spatial_dim = 32
x = torch.randn(batch_size, input_dim, spatial_dim, spatial_dim)
convNet = SmallConvNet(input_dim)
ac_dict, _ = activation_count(convNet, (x,))
gt_count = sum(convNet.get_gt_activation(x))
gt_dict = defaultdict(float)
gt_dict["conv"] = gt_count / 1e6
self.assertDictEqual(
gt_dict,
ac_dict,
"ConvNet with 3 layers failed to pass the activation count test.",
)
def test_linear(self) -> None:
"""
Test the activation count for fully connected layer.
"""
batch_size = 1
input_dim = 10
output_dim = 20
netLinear = nn.Linear(input_dim, output_dim)
x = torch.randn(batch_size, input_dim)
ac_dict, _ = activation_count(netLinear, (x,))
gt_count = batch_size * output_dim
gt_dict = defaultdict(float)
gt_dict[self.lin_op] = gt_count / 1e6
self.assertEquals(
gt_dict, ac_dict, "FC layer failed to pass the activation count test."
)
def test_supported_ops(self) -> None:
"""
Test the activation count for user provided handles.
"""
def dummy_handle(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
return Counter({"conv": 100})
batch_size = 1
input_dim = 3
spatial_dim = 32
x = torch.randn(batch_size, input_dim, spatial_dim, spatial_dim)
convNet = SmallConvNet(input_dim)
sp_ops: Dict[str, Handle] = {"aten::_convolution": dummy_handle}
ac_dict, _ = activation_count(convNet, (x,), sp_ops)
gt_dict = defaultdict(float)
conv_layers = 3
gt_dict["conv"] = 100 * conv_layers / 1e6
self.assertDictEqual(
gt_dict,
ac_dict,
"ConvNet with 3 layers failed to pass the activation count test.",
)
def test_activation_count_class(self) -> None:
"""
Tests ActivationCountAnalysis.
"""
batch_size = 1
input_dim = 10
output_dim = 20
netLinear = nn.Linear(input_dim, output_dim)
x = torch.randn(batch_size, input_dim)
gt_count = batch_size * output_dim
gt_dict = Counter(
{
"": gt_count,
}
)
acts_counter = ActivationCountAnalysis(netLinear, (x,))
self.assertEqual(acts_counter.by_module(), gt_dict)
batch_size = 1
input_dim = 3
spatial_dim = 32
x = torch.randn(batch_size, input_dim, spatial_dim, spatial_dim)
convNet = SmallConvNet(input_dim)
acts_counter = ActivationCountAnalysis(convNet, (x,))
gt_counts = convNet.get_gt_activation(x)
gt_dict = Counter(
{
"": sum(gt_counts),
"conv1": gt_counts[0],
"conv2": gt_counts[1],
"conv3": gt_counts[2],
}
)
self.assertDictEqual(gt_dict, acts_counter.by_module())
|
recommends/apps.py | asymness/django-recommends | 142 | 11136096 | import importlib
from django.apps import AppConfig, apps
from .settings import RECOMMENDS_AUTODISCOVER_MODULE
class RecommendsConfig(AppConfig):
name = 'recommends'
def ready(self):
if not RECOMMENDS_AUTODISCOVER_MODULE:
return
for appconfig in apps.get_app_configs():
try:
importlib.import_module('.' + RECOMMENDS_AUTODISCOVER_MODULE, appconfig.name)
except ImportError:
pass
|
pybotters/models/ftx.py | maruuuui/pybotters | 176 | 11136103 | from __future__ import annotations
import asyncio
import logging
from typing import Any, Awaitable, Optional
import aiohttp
from ..auth import Auth
from ..store import DataStore, DataStoreManager
from ..typedefs import Item
from ..ws import ClientWebSocketResponse
logger = logging.getLogger(__name__)
class FTXDataStore(DataStoreManager):
"""
FTXのデータストアマネージャー
"""
def _init(self) -> None:
self.create('ticker', datastore_class=Ticker)
self.create('markets', datastore_class=Markets)
self.create('trades', datastore_class=Trades)
self.create('orderbook', datastore_class=OrderBook)
self.create('fills', datastore_class=Fills)
self.create('orders', datastore_class=Orders)
self.create('positions', datastore_class=Positions)
async def initialize(self, *aws: Awaitable[aiohttp.ClientResponse]) -> None:
"""
対応エンドポイント
- GET /orders (DataStore: orders)
- GET /conditional_orders (DataStore: orders)
- GET /positions (DataStore: positions)
- fills 受信時に GET /positions の自動フェッチする機能が有効化される。
"""
for f in asyncio.as_completed(aws):
resp = await f
data = await resp.json()
if resp.url.path in (
'/api/orders',
'/api/conditional_orders',
):
self.orders._onresponse(data['result'])
elif resp.url.path in ('/api/positions',):
self.positions._onresponse(data['result'])
self.positions._fetch = True
def _onmessage(self, msg: Any, ws: ClientWebSocketResponse) -> None:
if 'type' in msg:
if msg['type'] == 'error':
logger.warning(msg)
if 'data' in msg:
channel: str = msg['channel']
market: str = msg['market'] if 'market' in msg else ''
data: Any = msg['data']
if channel == 'ticker':
self.ticker._onmessage(market, data)
elif channel == 'markets':
self.markets._onmessage(data)
elif channel == 'trades':
self.trades._onmessage(market, data)
elif channel == 'orderbook':
self.orderbook._onmessage(market, data)
elif channel == 'orderbookGrouped':
data['action'] = msg['type']
self.orderbook._onmessage(market, data)
elif channel == 'fills':
self.fills._onmessage(data)
if self.positions._fetch:
asyncio.create_task(self.positions._onfills(ws._response._session))
elif channel == 'orders':
self.orders._onmessage(data)
@property
def ticker(self) -> 'Ticker':
return self.get('ticker', Ticker)
@property
def markets(self) -> 'Markets':
return self.get('markets', Markets)
@property
def trades(self) -> 'Trades':
return self.get('trades', Trades)
@property
def orderbook(self) -> 'OrderBook':
return self.get('orderbook', OrderBook)
@property
def fills(self) -> 'Fills':
return self.get('fills', Fills)
@property
def orders(self) -> 'Orders':
"""
アクティブオーダーのみ(約定・キャンセル済みは削除される)
"""
return self.get('orders', Orders)
@property
def positions(self) -> 'Positions':
return self.get('positions', Positions)
class Ticker(DataStore):
_KEYS = ['market']
def _onmessage(self, market: str, item: Item) -> None:
self._update([{'market': market, **item}])
class Markets(DataStore):
_KEYS = ['name']
def _onmessage(self, item: Item) -> None:
if item['action'] == 'partial':
self._clear()
self._update([item['data'][k] for k in item['data']])
class Trades(DataStore):
_MAXLEN = 99999
def _onmessage(self, market: str, data: list[Item]) -> None:
for item in data:
self._insert([{'market': market, **item}])
class OrderBook(DataStore):
_KEYS = ['market', 'side', 'price']
_BDSIDE = {'sell': 'asks', 'buy': 'bids'}
def sorted(self, query: Optional[Item] = None) -> dict[str, list[float]]:
if query is None:
query = {}
result = {'asks': [], 'bids': []}
for item in self:
if all(k in item and query[k] == item[k] for k in query):
result[self._BDSIDE[item['side']]].append([item['price'], item['size']])
result['asks'].sort(key=lambda x: x[0])
result['bids'].sort(key=lambda x: x[0], reverse=True)
return result
def _onmessage(self, market: str, data: list[Item]) -> None:
if data['action'] == 'partial':
result = self.find({'market': market})
self._delete(result)
for boardside, side in (('bids', 'buy'), ('asks', 'sell')):
for item in data[boardside]:
if item[1]:
self._update(
[
{
'market': market,
'side': side,
'price': item[0],
'size': item[1],
}
]
)
else:
self._delete([{'market': market, 'side': side, 'price': item[0]}])
class Fills(DataStore):
def _onmessage(self, item: Item) -> None:
self._insert([item])
class Orders(DataStore):
_KEYS = ['id']
def _onresponse(self, data: list[Item]) -> None:
if data:
results = self.find({'market': data[0]['market']})
self._delete(results)
self._update(data)
def _onmessage(self, item: Item) -> None:
if item['status'] != 'closed':
self._update([item])
else:
self._delete([item])
class Positions(DataStore):
_KEYS = ['future']
def _init(self) -> None:
self._fetch = False
def _onresponse(self, data: list[Item]) -> None:
self._update(data)
async def _onfills(self, session: aiohttp.ClientSession) -> None:
async with session.get(
'https://ftx.com/api/positions?showAvgPrice=true', auth=Auth
) as resp:
data = await resp.json()
self._onresponse(data['result'])
|
tests/test_scheduler/test_virtualtimescheduler.py | mmpio/RxPY | 4,342 | 11136106 | import pytest
import unittest
from rx.scheduler import VirtualTimeScheduler
from rx.internal import ArgumentOutOfRangeException
from rx.internal.constants import DELTA_ZERO, UTC_ZERO
class VirtualSchedulerTestScheduler(VirtualTimeScheduler):
def add(self, absolute, relative):
return absolute + relative
class TestVirtualTimeScheduler(unittest.TestCase):
def test_virtual_now_noarg(self):
scheduler = VirtualSchedulerTestScheduler()
assert scheduler.clock == 0.0
assert scheduler.now == UTC_ZERO
def test_virtual_now_float(self):
scheduler = VirtualSchedulerTestScheduler(0.0)
assert scheduler.clock == 0.0
assert scheduler.now == UTC_ZERO
def test_virtual_now_timedelta(self):
scheduler = VirtualSchedulerTestScheduler(DELTA_ZERO)
assert scheduler.clock == DELTA_ZERO
assert scheduler.now == UTC_ZERO
def test_virtual_now_datetime(self):
scheduler = VirtualSchedulerTestScheduler(UTC_ZERO)
assert scheduler.clock == UTC_ZERO
assert scheduler.now == UTC_ZERO
def test_virtual_schedule_action(self):
scheduler = VirtualSchedulerTestScheduler()
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
scheduler.schedule(action)
scheduler.start()
assert ran is True
def test_virtual_schedule_action_error(self):
scheduler = VirtualSchedulerTestScheduler()
class MyException(Exception):
pass
def action(scheduler, state):
raise MyException()
with pytest.raises(MyException):
scheduler.schedule(action)
scheduler.start()
def test_virtual_schedule_sleep_error(self):
scheduler = VirtualSchedulerTestScheduler()
with pytest.raises(ArgumentOutOfRangeException):
scheduler.sleep(-1)
def test_virtual_schedule_advance_clock_error(self):
scheduler = VirtualSchedulerTestScheduler()
with pytest.raises(ArgumentOutOfRangeException):
scheduler.advance_to(scheduler._clock - 1)
|
usaspending_api/etl/management/commands/load_multiple_submissions.py | g4brielvs/usaspending-api | 217 | 11136109 | <filename>usaspending_api/etl/management/commands/load_multiple_submissions.py<gh_stars>100-1000
import logging
from datetime import timedelta
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models import Max
from django.utils.crypto import get_random_string
from usaspending_api.common.helpers.date_helper import now, datetime_command_line_argument_type
from usaspending_api.etl.submission_loader_helpers.final_of_fy import populate_final_of_fy
from usaspending_api.etl.submission_loader_helpers.submission_ids import get_new_or_updated_submission_ids
from usaspending_api.submissions import dabs_loader_queue_helpers as dlqh
from usaspending_api.submissions.models import SubmissionAttributes
logger = logging.getLogger("script")
DISPLAY_CAP = 100
class Command(BaseCommand):
help = (
"The goal of this management command is coordinate the loading of multiple submissions "
"simultaneously using the load_submission single submission loader. To load submissions "
"in parallel, kick off multiple runs at the same time. Runs will be coordinated via the "
"dabs_loader_queue table in the database which allows loaders to be run from different "
"machines in different environments. Using the database as the queue sidesteps the AWS "
"SQS 24 hour message lifespan limitation. There is no hard cap on the number of jobs that "
"can be run simultaneously, but certainly there is a soft cap imposed by resource "
"contention. During development, 8 were run in parallel without incident."
)
submission_ids = None
incremental = False
start_datetime = None
report_queue_status_only = False
processor_id = None
heartbeat_timer = None
file_c_chunk_size = 100000
do_not_retry = []
def add_arguments(self, parser):
mutually_exclusive_group = parser.add_mutually_exclusive_group(required=True)
mutually_exclusive_group.add_argument(
"--submission-ids",
help=(
"One or more Broker submission_ids to be reloaded. These submissions are added to "
"the submission queue and processing begins on them immediately. Due to the "
"asynchronous, multiprocessing nature of the submission queue, it is possible that "
"another loader might nab and/or complete one or more of these submissions before "
"we get to them. This is just the nature of the beast. The logs will document "
"when this happens. Submissions loaded in this manner will be fully reloaded unless "
"another process is currently loading the submission."
),
nargs="+",
type=int,
)
mutually_exclusive_group.add_argument(
"--incremental",
action="store_true",
help=(
"Loads new or updated submissions in Broker since the most recently published "
"submission in USAspending. Submissions loaded in this manner will be updated "
"where possible. Otherwise they will be fully reloaded."
),
)
mutually_exclusive_group.add_argument(
"--start-datetime",
type=datetime_command_line_argument_type(naive=True), # Broker date/times are naive.
help=(
"Loads new or updated submissions in Broker since the timestamp provided. This is "
"effectively the same as the --incremental option except the start date/time is "
"specified on the command line."
),
)
mutually_exclusive_group.add_argument(
"--report-queue-status-only",
action="store_true",
help="Just reports the queue status. Nothing is loaded.",
)
parser.add_argument(
"--file-c-chunk-size",
type=int,
default=self.file_c_chunk_size,
help=(
f"Controls the number of File C records processed in a single batch. Theoretically, "
f"bigger should be faster... right up until you run out of memory. Balance carefully. "
f"Default is {self.file_c_chunk_size:,}."
),
)
parser.epilog = (
"And to answer your next question, yes this can be run standalone. The parallelization "
"code is pretty minimal and should not add significant time to the overall run time of "
"serial submission loads."
)
def handle(self, *args, **options):
self.record_options(options)
self.report_queue_status()
if self.report_queue_status_only:
return
self.reset_abandoned_locks()
if self.submission_ids:
self.add_specific_submissions_to_queue()
processed_count = self.load_specific_submissions()
else:
since_datetime = self.start_datetime or self.calculate_load_submissions_since_datetime()
self.add_submissions_since_datetime_to_queue(since_datetime)
processed_count = self.load_incremental_submissions()
ready, in_progress, abandoned, failed, unrecognized = dlqh.get_queue_status()
failed_unrecognized_and_abandoned_count = len(failed) + len(unrecognized) + len(abandoned)
in_progress_count = len(in_progress)
self.update_final_of_fy(processed_count, in_progress_count)
# Only return unstable state if something's in a bad state and we're the last one standing.
# Should cut down on Slack noise a bit.
if failed_unrecognized_and_abandoned_count > 0 and in_progress_count == 0:
raise SystemExit(3)
def record_options(self, options):
self.submission_ids = options.get("submission_ids")
self.incremental = options.get("incremental")
self.start_datetime = options.get("start_datetime")
self.report_queue_status_only = options.get("report_queue_status_only")
self.file_c_chunk_size = options.get("file_c_chunk_size")
self.processor_id = f"{now()}/{get_random_string()}"
logger.info(f'processor_id = "{self.processor_id}"')
@staticmethod
def report_queue_status():
"""
Logs various information about the state of the submission queue. Returns a count of failed
and unrecognized submissions so the caller can whine about it if they're so inclined.
"""
ready, in_progress, abandoned, failed, unrecognized = dlqh.get_queue_status()
overall_count = sum(len(s) for s in (ready, in_progress, abandoned, failed, unrecognized))
msg = [
"The current queue status is as follows:\n",
f"There are {overall_count:,} total submissions in the queue.",
f" {len(ready):,} are ready but have not yet started processing.",
f" {len(in_progress):,} are in progress.",
f" {len(abandoned):,} have been abandoned.",
f" {len(failed):,} have FAILED.",
f" {len(unrecognized):,} are in an unrecognized state.",
]
def log_submission_ids(submissions, message):
if submissions:
caveat = f" (first {DISPLAY_CAP:,} shown)" if len(submissions) > DISPLAY_CAP else ""
submissions = ", ".join(str(s) for s in submissions[:DISPLAY_CAP])
msg.extend(["", f"The following submissions {message}{caveat}: {submissions}"])
log_submission_ids(in_progress, "are in progress")
log_submission_ids(abandoned, "have been abandoned")
log_submission_ids(failed, "have failed")
log_submission_ids(unrecognized, "are in an unrecognized state")
logger.info("\n".join(msg) + "\n")
@staticmethod
def reset_abandoned_locks():
count = dlqh.reset_abandoned_locks()
if count > 0:
logger.info(f"Reset {count:,} abandoned locks.")
return count
def add_specific_submissions_to_queue(self):
with transaction.atomic():
added = dlqh.add_submission_ids(self.submission_ids)
dlqh.mark_force_reload(self.submission_ids)
count = len(self.submission_ids)
logger.info(
f"Received {count:,} submission ids on the command line. {added:,} were "
f"added to the queue. {count - added:,} already existed."
)
def load_specific_submissions(self):
processed_count = 0
for submission_id in self.submission_ids:
count = dlqh.start_processing(submission_id, self.processor_id)
if count == 0:
logger.info(f"Submission {submission_id} has already been picked up by another processor. Skipping.")
else:
self.load_submission(submission_id, force_reload=True)
processed_count += 1
return processed_count
@staticmethod
def add_submissions_since_datetime_to_queue(since_datetime):
if since_datetime is None:
logger.info("No records found in submission_attributes. Performing a full load.")
else:
logger.info(f"Performing incremental load starting from {since_datetime}.")
submission_ids = get_new_or_updated_submission_ids(since_datetime)
added = dlqh.add_submission_ids(submission_ids)
count = len(submission_ids)
logger.info(
f"Identified {count:,} new or updated submission ids in Broker. {added:,} were "
f"added to the queue. {count - added:,} already existed."
)
def load_incremental_submissions(self):
processed_count = 0
while True:
submission_id, force_reload = dlqh.claim_next_available_submission(self.processor_id, self.do_not_retry)
if submission_id is None:
logger.info("No more available submissions in the queue. Exiting.")
break
self.load_submission(submission_id, force_reload)
processed_count += 1
return processed_count
def cancel_heartbeat_timer(self):
if self.heartbeat_timer:
self.heartbeat_timer.cancel()
self.heartbeat_timer = None
def start_heartbeat_timer(self, submission_id):
if self.heartbeat_timer:
self.cancel_heartbeat_timer()
self.heartbeat_timer = dlqh.HeartbeatTimer(submission_id, self.processor_id)
self.heartbeat_timer.start()
def load_submission(self, submission_id, force_reload):
"""
Accepts a locked/claimed submission id, spins up a heartbeat thread, loads the submission,
returns True if successful or False if not.
"""
args = ["--file-c-chunk-size", self.file_c_chunk_size, "--skip-final-of-fy-calculation"]
if force_reload:
args.append("--force-reload")
self.start_heartbeat_timer(submission_id)
try:
call_command("load_submission", submission_id, *args)
except (Exception, SystemExit) as e:
self.cancel_heartbeat_timer()
logger.exception(f"Submission {submission_id} failed to load")
dlqh.fail_processing(submission_id, self.processor_id, e)
self.do_not_retry.append(submission_id)
self.report_queue_status()
return False
self.cancel_heartbeat_timer()
dlqh.complete_processing(submission_id, self.processor_id)
self.report_queue_status()
return True
@staticmethod
def calculate_load_submissions_since_datetime():
since = SubmissionAttributes.objects.all().aggregate(Max("published_date"))["published_date__max"]
if since:
# In order to prevent skips, we're just always going to look back 30 days. Since submission is a
# relatively low volume table, this should not cause any noticeable performance issues.
since -= timedelta(days=30)
return since
@staticmethod
def update_final_of_fy(processed_count, in_progress_count):
"""
For performance and deadlocking reasons, we only update final_of_fy once the last
submission is processed. To this end, only update final_of_fy if any loads were
performed and there's nothing processable left in the queue.
"""
if processed_count < 1:
logger.info("No work performed. Not updating final_of_fy.")
return
if in_progress_count > 0:
logger.info("Submissions still in progress. Not updating final_of_fy.")
return
logger.info("Updating final_of_fy")
populate_final_of_fy()
logger.info(f"Finished updating final_of_fy.")
|
code/trainer.py | kkanshul/finegan | 288 | 11136124 | from __future__ import print_function
from six.moves import range
import sys
import numpy as np
import os
import random
import time
from PIL import Image
from copy import deepcopy
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torchvision.utils as vutils
from torch.nn.functional import softmax, log_softmax
from torch.nn.functional import cosine_similarity
from tensorboardX import summary
from tensorboardX import FileWriter
from miscc.config import cfg
from miscc.utils import mkdir_p
from model import G_NET, D_NET
# ################## Shared functions ###################
def child_to_parent(child_c_code, classes_child, classes_parent):
ratio = classes_child / classes_parent
arg_parent = torch.argmax(child_c_code, dim = 1) / ratio
parent_c_code = torch.zeros([child_c_code.size(0), classes_parent]).cuda()
for i in range(child_c_code.size(0)):
parent_c_code[i][arg_parent[i]] = 1
return parent_c_code
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def load_network(gpus):
netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=gpus)
print(netG)
netsD = []
for i in range(3): # 3 discriminators for background, parent and child stage
netsD.append(D_NET(i))
for i in range(len(netsD)):
netsD[i].apply(weights_init)
netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
count = 0
if cfg.TRAIN.NET_G != '':
state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
count = cfg.TRAIN.NET_G[istart:iend]
count = int(count) + 1
if cfg.TRAIN.NET_D != '':
for i in range(len(netsD)):
print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
state_dict = torch.load('%s_%d.pth' % (cfg.TRAIN.NET_D, i))
netsD[i].load_state_dict(state_dict)
if cfg.CUDA:
netG.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
return netG, netsD, len(netsD), count
def define_optimizers(netG, netsD):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
optimizerG = []
optimizerG.append(optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999)))
for i in range(num_Ds):
if i==1:
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
optimizerG.append(opt)
elif i==2:
opt = optim.Adam([{'params':netsD[i].module.jointConv.parameters()},{'params':netsD[i].module.logits.parameters()}],
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
optimizerG.append(opt)
return optimizerG, optimizersD
def save_model(netG, avg_param_G, netsD, epoch, model_dir):
load_params(netG, avg_param_G)
torch.save(
netG.state_dict(),
'%s/netG_%d.pth' % (model_dir, epoch))
for i in range(len(netsD)):
netD = netsD[i]
torch.save(
netD.state_dict(),
'%s/netD%d.pth' % (model_dir, i))
print('Save G/Ds models.')
def save_img_results(imgs_tcpu, fake_imgs, num_imgs,
count, image_dir, summary_writer):
num = cfg.TRAIN.VIS_COUNT
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples%09d.png' % (image_dir,count),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
for i in range(len(fake_imgs)):
fake_img = fake_imgs[i][0:num]
vutils.save_image(
fake_img.data, '%s/count_%09d_fake_samples%d.png' %
(image_dir, count, i), normalize=True)
fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy()
fake_img_set = np.transpose(fake_img_set, (1, 2, 0))
fake_img_set = (fake_img_set + 1) * 255 / 2
fake_img_set = fake_img_set.astype(np.uint8)
summary_writer.flush()
class FineGAN_trainer(object):
def __init__(self, output_dir, data_loader, imsize):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
self.log_dir = os.path.join(output_dir, 'Log')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
mkdir_p(self.log_dir)
self.summary_writer = FileWriter(self.log_dir)
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
torch.cuda.set_device(self.gpus[0])
cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.data_loader = data_loader
self.num_batches = len(self.data_loader)
def prepare_data(self, data):
fimgs, cimgs, c_code, _, warped_bbox = data
real_vfimgs, real_vcimgs = [], []
if cfg.CUDA:
vc_code = Variable(c_code).cuda()
for i in range(len(warped_bbox)):
warped_bbox[i] = Variable(warped_bbox[i]).float().cuda()
else:
vc_code = Variable(c_code)
for i in range(len(warped_bbox)):
warped_bbox[i] = Variable(warped_bbox[i])
if cfg.CUDA:
real_vfimgs.append(Variable(fimgs[0]).cuda())
real_vcimgs.append(Variable(cimgs[0]).cuda())
else:
real_vfimgs.append(Variable(fimgs[0]))
real_vcimgs.append(Variable(cimgs[0]))
return fimgs, real_vfimgs, real_vcimgs, vc_code, warped_bbox
def train_Dnet(self, idx, count):
if idx == 0 or idx == 2: # Discriminator is only trained in background and child stage. (NOT in parent stage)
flag = count % 100
batch_size = self.real_fimgs[0].size(0)
criterion, criterion_one = self.criterion, self.criterion_one
netD, optD = self.netsD[idx], self.optimizersD[idx]
if idx == 0:
real_imgs = self.real_fimgs[0]
elif idx == 2:
real_imgs = self.real_cimgs[0]
fake_imgs = self.fake_imgs[idx]
netD.zero_grad()
real_logits = netD(real_imgs)
if idx == 2:
fake_labels = torch.zeros_like(real_logits[1])
real_labels = torch.ones_like(real_logits[1])
elif idx == 0:
fake_labels = torch.zeros_like(real_logits[1])
ext, output = real_logits
weights_real = torch.ones_like(output)
real_labels = torch.ones_like(output)
for i in range(batch_size):
x1 = self.warped_bbox[0][i]
x2 = self.warped_bbox[2][i]
y1 = self.warped_bbox[1][i]
y2 = self.warped_bbox[3][i]
a1 = max(torch.tensor(0).float().cuda(), torch.ceil((x1 - self.recp_field)/self.patch_stride))
a2 = min(torch.tensor(self.n_out - 1).float().cuda(), torch.floor((self.n_out - 1) - ((126 - self.recp_field) - x2)/self.patch_stride)) + 1
b1 = max(torch.tensor(0).float().cuda(), torch.ceil((y1 - self.recp_field)/self.patch_stride))
b2 = min(torch.tensor(self.n_out - 1).float().cuda(), torch.floor((self.n_out - 1) - ((126 - self.recp_field) - y2)/self.patch_stride)) + 1
if (x1 != x2 and y1 != y2):
weights_real[i, :, a1.type(torch.int) : a2.type(torch.int) , b1.type(torch.int) : b2.type(torch.int)] = 0.0
norm_fact_real = weights_real.sum()
norm_fact_fake = weights_real.shape[0]*weights_real.shape[1]*weights_real.shape[2]*weights_real.shape[3]
real_logits = ext, output
fake_logits = netD(fake_imgs.detach())
if idx == 0: # Background stage
errD_real_uncond = criterion(real_logits[1], real_labels) # Real/Fake loss for 'real background' (on patch level)
errD_real_uncond = torch.mul(errD_real_uncond, weights_real) # Masking output units which correspond to receptive fields which lie within the boundin box
errD_real_uncond = errD_real_uncond.mean()
errD_real_uncond_classi = criterion(real_logits[0], weights_real) # Background/foreground classification loss
errD_real_uncond_classi = errD_real_uncond_classi.mean()
errD_fake_uncond = criterion(fake_logits[1], fake_labels) # Real/Fake loss for 'fake background' (on patch level)
errD_fake_uncond = errD_fake_uncond.mean()
if (norm_fact_real > 0): # Normalizing the real/fake loss for background after accounting the number of masked members in the output.
errD_real = errD_real_uncond * ((norm_fact_fake * 1.0) /(norm_fact_real * 1.0))
else:
errD_real = errD_real_uncond
errD_fake = errD_fake_uncond
errD = ((errD_real + errD_fake) * cfg.TRAIN.BG_LOSS_WT) + errD_real_uncond_classi
if idx == 2:
errD_real = criterion_one(real_logits[1], real_labels) # Real/Fake loss for the real image
errD_fake = criterion_one(fake_logits[1], fake_labels) # Real/Fake loss for the fake image
errD = errD_real + errD_fake
if (idx == 0 or idx == 2):
errD.backward()
optD.step()
if (flag == 0):
summary_D = summary.scalar('D_loss%d' % idx, errD.data[0])
self.summary_writer.add_summary(summary_D, count)
summary_D_real = summary.scalar('D_loss_real_%d' % idx, errD_real.data[0])
self.summary_writer.add_summary(summary_D_real, count)
summary_D_fake = summary.scalar('D_loss_fake_%d' % idx, errD_fake.data[0])
self.summary_writer.add_summary(summary_D_fake, count)
return errD
def train_Gnet(self, count):
self.netG.zero_grad()
for myit in range(len(self.netsD)):
self.netsD[myit].zero_grad()
errG_total = 0
flag = count % 100
batch_size = self.real_fimgs[0].size(0)
criterion_one, criterion_class, c_code, p_code = self.criterion_one, self.criterion_class, self.c_code, self.p_code
for i in range(self.num_Ds):
outputs = self.netsD[i](self.fake_imgs[i])
if i == 0 or i == 2: # real/fake loss for background (0) and child (2) stage
real_labels = torch.ones_like(outputs[1])
errG = criterion_one(outputs[1], real_labels)
if i==0:
errG = errG * cfg.TRAIN.BG_LOSS_WT
errG_classi = criterion_one(outputs[0], real_labels) # Background/Foreground classification loss for the fake background image (on patch level)
errG = errG + errG_classi
errG_total = errG_total + errG
if i == 1: # Mutual information loss for the parent stage (1)
pred_p = self.netsD[i](self.fg_mk[i-1])
errG_info = criterion_class(pred_p[0], torch.nonzero(p_code.long())[:,1])
elif i == 2: # Mutual information loss for the child stage (2)
pred_c = self.netsD[i](self.fg_mk[i-1])
errG_info = criterion_class(pred_c[0], torch.nonzero(c_code.long())[:,1])
if(i>0):
errG_total = errG_total + errG_info
if flag == 0:
if i>0:
summary_D_class = summary.scalar('Information_loss_%d' % i, errG_info.data[0])
self.summary_writer.add_summary(summary_D_class, count)
if i == 0 or i == 2:
summary_D = summary.scalar('G_loss%d' % i, errG.data[0])
self.summary_writer.add_summary(summary_D, count)
errG_total.backward()
for myit in range(len(self.netsD)):
self.optimizerG[myit].step()
return errG_total
def train(self):
self.netG, self.netsD, self.num_Ds, start_count = load_network(self.gpus)
avg_param_G = copy_G_params(self.netG)
self.optimizerG, self.optimizersD = \
define_optimizers(self.netG, self.netsD)
self.criterion = nn.BCELoss(reduce=False)
self.criterion_one = nn.BCELoss()
self.criterion_class = nn.CrossEntropyLoss()
self.real_labels = \
Variable(torch.FloatTensor(self.batch_size).fill_(1))
self.fake_labels = \
Variable(torch.FloatTensor(self.batch_size).fill_(0))
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(self.batch_size, nz))
fixed_noise = \
Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1))
hard_noise = \
Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)).cuda()
self.patch_stride = float(4) # Receptive field stride given the current discriminator architecture for background stage
self.n_out = 24 # Output size of the discriminator at the background stage; N X N where N = 24
self.recp_field = 34 # Receptive field of each of the member of N X N
if cfg.CUDA:
self.criterion.cuda()
self.criterion_one.cuda()
self.criterion_class.cuda()
self.real_labels = self.real_labels.cuda()
self.fake_labels = self.fake_labels.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
print ("Starting normal FineGAN training..")
count = start_count
start_epoch = start_count // (self.num_batches)
for epoch in range(start_epoch, self.max_epoch):
start_t = time.time()
for step, data in enumerate(self.data_loader, 0):
self.imgs_tcpu, self.real_fimgs, self.real_cimgs, \
self.c_code, self.warped_bbox = self.prepare_data(data)
# Feedforward through Generator. Obtain stagewise fake images
noise.data.normal_(0, 1)
self.fake_imgs, self.fg_imgs, self.mk_imgs, self.fg_mk = \
self.netG(noise, self.c_code)
# Obtain the parent code given the child code
self.p_code = child_to_parent(self.c_code, cfg.FINE_GRAINED_CATEGORIES, cfg.SUPER_CATEGORIES)
# Update Discriminator networks
errD_total = 0
for i in range(self.num_Ds):
if i == 0 or i == 2: # only at parent and child stage
errD = self.train_Dnet(i, count)
errD_total += errD
# Update the Generator networks
errG_total = self.train_Gnet(count)
for p, avg_p in zip(self.netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
count = count + 1
if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0:
backup_para = copy_G_params(self.netG)
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
# Save images
load_params(self.netG, avg_param_G)
self.netG.eval()
with torch.set_grad_enabled(False):
self.fake_imgs, self.fg_imgs, self.mk_imgs, self.fg_mk = \
self.netG(fixed_noise, self.c_code)
save_img_results(self.imgs_tcpu, (self.fake_imgs + self.fg_imgs + self.mk_imgs + self.fg_mk), self.num_Ds,
count, self.image_dir, self.summary_writer)
self.netG.train()
load_params(self.netG, backup_para)
end_t = time.time()
print('''[%d/%d][%d]
Loss_D: %.2f Loss_G: %.2f Time: %.2fs
'''
% (epoch, self.max_epoch, self.num_batches,
errD_total.data[0], errG_total.data[0],
end_t - start_t))
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
print ("Done with the normal training. Now performing hard negative training..")
count = 0
start_t = time.time()
for step, data in enumerate(self.data_loader, 0):
self.imgs_tcpu, self.real_fimgs, self.real_cimgs, \
self.c_code, self.warped_bbox = self.prepare_data(data)
if (count % 2) == 0: # Train on normal batch of images
# Feedforward through Generator. Obtain stagewise fake images
noise.data.normal_(0, 1)
self.fake_imgs, self.fg_imgs, self.mk_imgs, self.fg_mk = \
self.netG(noise, self.c_code)
self.p_code = child_to_parent(self.c_code, cfg.FINE_GRAINED_CATEGORIES, cfg.SUPER_CATEGORIES)
# Update discriminator networks
errD_total = 0
for i in range(self.num_Ds):
if i == 0 or i == 2:
errD = self.train_Dnet(i, count)
errD_total += errD
# Update the generator network
errG_total = self.train_Gnet(count)
else: # Train on degenerate images
repeat_times=10
all_hard_z = Variable(torch.zeros(self.batch_size * repeat_times, nz)).cuda()
all_hard_class = Variable(torch.zeros(self.batch_size * repeat_times, cfg.FINE_GRAINED_CATEGORIES)).cuda()
all_logits = Variable(torch.zeros(self.batch_size * repeat_times,)).cuda()
for hard_it in range(repeat_times):
hard_noise = hard_noise.data.normal_(0,1)
hard_class = Variable(torch.zeros([self.batch_size, cfg.FINE_GRAINED_CATEGORIES])).cuda()
my_rand_id=[]
for c_it in range(self.batch_size):
rand_class = random.sample(range(cfg.FINE_GRAINED_CATEGORIES),1);
hard_class[c_it][rand_class] = 1
my_rand_id.append(rand_class)
all_hard_z[self.batch_size * hard_it : self.batch_size * (hard_it + 1)] = hard_noise.data
all_hard_class[self.batch_size * hard_it : self.batch_size * (hard_it + 1)] = hard_class.data
self.fake_imgs, self.fg_imgs, self.mk_imgs, self.fg_mk = self.netG(hard_noise.detach(), hard_class.detach())
fake_logits = self.netsD[2](self.fg_mk[1].detach())
smax_class = softmax(fake_logits[0], dim = 1)
for b_it in range(self.batch_size):
all_logits[(self.batch_size * hard_it) + b_it] = smax_class[b_it][my_rand_id[b_it]]
sorted_val, indices_hard = torch.sort(all_logits)
noise = all_hard_z[indices_hard[0 : self.batch_size]]
self.c_code = all_hard_class[indices_hard[0 : self.batch_size]]
self.fake_imgs, self.fg_imgs, self.mk_imgs, self.fg_mk = \
self.netG(noise, self.c_code)
self.p_code = child_to_parent(self.c_code, cfg.FINE_GRAINED_CATEGORIES, cfg.SUPER_CATEGORIES)
# Update Discriminator networks
errD_total = 0
for i in range(self.num_Ds):
if i == 0 or i == 2:
errD = self.train_Dnet(i, count)
errD_total += errD
# Update generator network
errG_total = self.train_Gnet(count)
for p, avg_p in zip(self.netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
count = count + 1
if count % cfg.TRAIN.SNAPSHOT_INTERVAL_HARDNEG == 0:
backup_para = copy_G_params(self.netG)
save_model(self.netG, avg_param_G, self.netsD, count+500000, self.model_dir)
load_params(self.netG, avg_param_G)
self.netG.eval()
with torch.set_grad_enabled(False):
self.fake_imgs, self.fg_imgs, self.mk_imgs, self.fg_mk = \
self.netG(fixed_noise, self.c_code)
save_img_results(self.imgs_tcpu, (self.fake_imgs + self.fg_imgs + self.mk_imgs + self.fg_mk), self.num_Ds,
count, self.image_dir, self.summary_writer)
self.netG.train()
load_params(self.netG, backup_para)
end_t = time.time()
if (count % 100) == 0:
print('''[%d/%d][%d]
Loss_D: %.2f Loss_G: %.2f Time: %.2fs
'''
% (count, cfg.TRAIN.HARDNEG_MAX_ITER, self.num_batches,
errD_total.data[0], errG_total.data[0],
end_t - start_t))
if (count == cfg.TRAIN.HARDNEG_MAX_ITER): # Hard negative training complete
break
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
self.summary_writer.close()
class FineGAN_evaluator(object):
def __init__(self):
self.save_dir = os.path.join(cfg.SAVE_DIR, 'images')
mkdir_p(self.save_dir)
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
torch.cuda.set_device(self.gpus[0])
cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
def evaluate_finegan(self):
if cfg.TRAIN.NET_G == '':
print('Error: the path for model not found!')
else:
# Build and load the generator
netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
model_dict = netG.state_dict()
state_dict = \
torch.load(cfg.TRAIN.NET_G,
map_location=lambda storage, loc: storage)
state_dict = {k: v for k, v in state_dict.items() if k in model_dict}
model_dict.update(state_dict)
netG.load_state_dict(model_dict)
print('Load ', cfg.TRAIN.NET_G)
# Uncomment this to print Generator layers
# print(netG)
nz = cfg.GAN.Z_DIM
noise = torch.FloatTensor(self.batch_size, nz)
noise.data.normal_(0, 1)
if cfg.CUDA:
netG.cuda()
noise = noise.cuda()
netG.eval()
background_class = cfg.TEST_BACKGROUND_CLASS
parent_class = cfg.TEST_PARENT_CLASS
child_class = cfg.TEST_CHILD_CLASS
bg_code = torch.zeros([self.batch_size, cfg.FINE_GRAINED_CATEGORIES])
p_code = torch.zeros([self.batch_size, cfg.SUPER_CATEGORIES])
c_code = torch.zeros([self.batch_size, cfg.FINE_GRAINED_CATEGORIES])
for j in range(self.batch_size):
bg_code[j][background_class] = 1
p_code[j][parent_class] = 1
c_code[j][child_class] = 1
fake_imgs, fg_imgs, mk_imgs, fgmk_imgs = netG(noise, c_code, p_code, bg_code) # Forward pass through the generator
self.save_image(fake_imgs[0][0], self.save_dir, 'background')
self.save_image(fake_imgs[1][0], self.save_dir, 'parent_final')
self.save_image(fake_imgs[2][0], self.save_dir, 'child_final')
self.save_image(fg_imgs[0][0], self.save_dir, 'parent_foreground')
self.save_image(fg_imgs[1][0], self.save_dir, 'child_foreground')
self.save_image(mk_imgs[0][0], self.save_dir, 'parent_mask')
self.save_image(mk_imgs[1][0], self.save_dir, 'child_mask')
self.save_image(fgmk_imgs[0][0], self.save_dir, 'parent_foreground_masked')
self.save_image(fgmk_imgs[1][0], self.save_dir, 'child_foreground_masked')
def save_image(self, images, save_dir, iname):
img_name = '%s.png' % (iname)
full_path = os.path.join(save_dir, img_name)
if (iname.find('mask') == -1) or (iname.find('foreground') != -1):
img = images.add(1).div(2).mul(255).clamp(0, 255).byte()
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(full_path)
else:
img = images.mul(255).clamp(0, 255).byte()
ndarr = img.data.cpu().numpy()
ndarr = np.reshape(ndarr, (ndarr.shape[-1], ndarr.shape[-1], 1))
ndarr = np.repeat(ndarr, 3, axis=2)
im = Image.fromarray(ndarr)
im.save(full_path)
|
tests/decorators/test_parameter.py | slobinger/connexion | 4,084 | 11136131 | <reponame>slobinger/connexion<filename>tests/decorators/test_parameter.py
from unittest.mock import MagicMock
from connexion.decorators.parameter import parameter_to_arg
def test_injection():
request = MagicMock(name='request', path_params={'p1': '123'})
request.args = {}
request.headers = {}
request.params = {}
func = MagicMock()
def handler(**kwargs):
func(**kwargs)
class Op:
consumes = ['application/json']
def get_arguments(self, *args, **kwargs):
return {"p1": "123"}
parameter_to_arg(Op(), handler)(request)
func.assert_called_with(p1='123')
parameter_to_arg(Op(), handler, pass_context_arg_name='framework_request_ctx')(request)
func.assert_called_with(p1='123', framework_request_ctx=request.context)
|
QUICK_START/NODE_VOXELNET/src/script/voxelnet_ros.py | Hqss/DINK | 189 | 11136147 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
from std_msgs.msg import Header
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
import sys
import glob
import argparse
import os
import time
import tensorflow as tf
sys.path.append("voxelnet/")
from model import RPN3D
from config import cfg
from utils import *
from utils.preprocess import process_pointcloud
from utils.kitti_loader import build_input
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# code from /opt/ros/kinetic/lib/python2.7/dist-packages/tf/transformations.py
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> np.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
# print("ak : {}".format(type(ak)))
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci * ck
cs = ci * sk
sc = si * ck
ss = si * sk
quaternion = np.empty((4,), dtype=np.float64)
if repetition:
quaternion[i] = cj * (cs + sc)
quaternion[j] = sj * (cc + ss)
quaternion[k] = sj * (cs - sc)
quaternion[3] = cj * (cc - ss)
else:
quaternion[i] = cj * sc - sj * cs
quaternion[j] = cj * ss + sj * cc
quaternion[k] = cj * cs - sj * sc
quaternion[3] = cj * cc + sj * ss
if parity:
quaternion[j] *= -1
return quaternion
class Processor_ROS:
def __init__(self, np_p_ranged):
self.np_p_ranged = np_p_ranged
def run(self):
raw_lidar = self.np_p_ranged
# print(raw_lidar.shape) # DEBUG
voxel = process_pointcloud(raw_lidar)
return raw_lidar, voxel
def dataset_generator(np_p_ranged, batch_size=1, multi_gpu_sum=1):
proc = Processor_ROS(np_p_ranged)
raw_lidar, voxel = proc.run()
# print("feature_buffer: {}".format(voxel['feature_buffer'].shape)) # DEBUG [----, 35, 7]
# print("coordinate_buffer: {}".format(voxel['coordinate_buffer'].shape)) # DEBUG [----, 3]
# print("number_buffer: {}".format(voxel['number_buffer'].shape)) # DEBUG [----]
# only for voxel -> [gpu, k_single_batch, ...]
vox_feature, vox_number, vox_coordinate = [], [], []
single_batch_size = int(batch_size / multi_gpu_sum)
_, per_vox_feature, per_vox_number, per_vox_coordinate = build_input_ros(
voxel)
vox_feature.append(per_vox_feature)
vox_number.append(per_vox_number)
vox_coordinate.append(per_vox_coordinate)
ret = (
np.array(vox_feature),
np.array(vox_number),
np.array(vox_coordinate),
np.array(raw_lidar)
)
return ret
# point cloud topic's subscriber callback function
def velo_callback(msg):
global sess, model
arr_bbox = BoundingBoxArray()
pcl_msg = pc2.read_points(msg, skip_nans=False, field_names=(
"x", "y", "z", "intensity","ring"))
np_p = np.array(list(pcl_msg), dtype=np.float32)
# np_p = np.delete(np_p, -1, 1) # delete "ring" field
print(np_p.shape)
dataset = dataset_generator(np_p, batch_size=1, multi_gpu_sum=1)
# print("{} {} {} {}".format(dataset[0],dataset[1],dataset[2],dataset[3])) # DEBUG
results = model.predict_step_ros(sess, dataset)
# publish to /velodyne_poitns_modified
publish_test(np_p, msg.header.frame_id)
# results: (N, N') (class, x, y, z, h, w, l, rz, score)
if len(results[0]) != 0:
# print("len(results[0]) : {} ".format(len(results[0])))
for result in results[0]:
# print("[+] result: {}".format(result)) # DEBUG
bbox = BoundingBox()
bbox.header.frame_id = msg.header.frame_id
# bbox.header.stamp = rospy.Time.now()
# print("result[7] : {} ".format(result[7]))
q = quaternion_from_euler(0, 0, float(result[7]))
bbox.pose.orientation.x = q[0]
bbox.pose.orientation.y = q[1]
bbox.pose.orientation.z = q[2]
bbox.pose.orientation.w = q[3]
bbox.pose.position.x = float(result[1])
bbox.pose.position.y = float(result[2])
bbox.pose.position.z = float(result[3])
bbox.dimensions.x = float(result[6])
bbox.dimensions.y = float(result[5])
bbox.dimensions.z = float(result[4])
arr_bbox.boxes.append(bbox)
arr_bbox.header.frame_id = msg.header.frame_id
# arr_bbox.header.stamp = rospy.Time.now()
# print("arr_bbox.boxes.size() : {} ".format(len(arr_bbox.boxes)))
if len(arr_bbox.boxes) is not 0:
pub_arr_bbox.publish(arr_bbox)
arr_bbox.boxes.clear()
# publishing function for DEBUG
def publish_test(np_p_ranged, frame_id):
header = Header()
header.stamp = rospy.Time()
header.frame_id = frame_id
x = np_p_ranged[:, 0].reshape(-1)
y = np_p_ranged[:, 1].reshape(-1)
z = np_p_ranged[:, 2].reshape(-1)
# if intensity field exists
if np_p_ranged.shape[1] == 4:
i = np_p_ranged[:, 3].reshape(-1)
else:
i = np.zeros((np_p_ranged.shape[0], 1)).reshape(-1)
cloud = np.stack((x, y, z, i))
# point cloud segments
# 4 PointFields as channel description
msg_segment = pc2.create_cloud(header=header,
fields=_make_point_field(4),
points=cloud.T)
# publish to /velodyne_points_modified
pub_velo.publish(msg_segment) # DEBUG
# voxelnet initializer
def voxelnet_init():
global sess, model
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=cfg.GPU_MEMORY_FRACTION,
visible_device_list=cfg.GPU_AVAILABLE,
allow_growth=True)
config = tf.ConfigProto(
gpu_options=gpu_options,
device_count={
"GPU": cfg.GPU_USE_COUNT,
},
allow_soft_placement=True,
)
sess = tf.Session(config=config)
model = RPN3D(
cls=cfg.DETECT_OBJ,
single_batch_size=args.single_batch_size,
avail_gpus=cfg.GPU_AVAILABLE.split(',')
)
if tf.train.get_checkpoint_state(save_model_dir):
print("Reading model parameters from %s" % save_model_dir)
model.saver.restore(sess, tf.train.latest_checkpoint(save_model_dir))
# code from SqueezeSeg (inspired from Durant35)
def hv_in_range(x, y, z, fov, fov_type='h'):
"""
Extract filtered in-range velodyne coordinates based on azimuth & elevation angle limit
Args:
`x`:velodyne points x array
`y`:velodyne points y array
`z`:velodyne points z array
`fov`:a two element list, e.g.[-45,45]
`fov_type`:the fov type, could be `h` or 'v',defualt in `h`
Return:
`cond`:condition of points within fov or not
Raise:
`NameError`:"fov type must be set between 'h' and 'v' "
"""
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if fov_type == 'h':
return np.logical_and(np.arctan2(y, x) > (-fov[1] * np.pi / 180), np.arctan2(y, x) < (-fov[0] * np.pi / 180))
elif fov_type == 'v':
return np.logical_and(np.arctan2(z, d) < (fov[1] * np.pi / 180), np.arctan2(z, d) > (fov[0] * np.pi / 180))
else:
raise NameError("fov type must be set between 'h' and 'v' ")
def _make_point_field(num_field):
msg_pf1 = pc2.PointField()
msg_pf1.name = np.str('x')
msg_pf1.offset = np.uint32(0)
msg_pf1.datatype = np.uint8(7)
msg_pf1.count = np.uint32(1)
msg_pf2 = pc2.PointField()
msg_pf2.name = np.str('y')
msg_pf2.offset = np.uint32(4)
msg_pf2.datatype = np.uint8(7)
msg_pf2.count = np.uint32(1)
msg_pf3 = pc2.PointField()
msg_pf3.name = np.str('z')
msg_pf3.offset = np.uint32(8)
msg_pf3.datatype = np.uint8(7)
msg_pf3.count = np.uint32(1)
msg_pf4 = pc2.PointField()
msg_pf4.name = np.str('intensity')
msg_pf4.offset = np.uint32(16)
msg_pf4.datatype = np.uint8(7)
msg_pf4.count = np.uint32(1)
if num_field == 4:
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4]
msg_pf5 = pc2.PointField()
msg_pf5.name = np.str('label')
msg_pf5.offset = np.uint32(20)
msg_pf5.datatype = np.uint8(4)
msg_pf5.count = np.uint32(1)
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4, msg_pf5]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='testing')
parser.add_argument('-n', '--tag', type=str, nargs='?', default='pre_trained_car',
help='set log tag')
parser.add_argument('-b', '--single-batch-size', type=int, nargs='?', default=1, # def: 2
help='set batch size for each gpu')
args = parser.parse_args()
save_model_dir = os.path.join('voxelnet/save_model', args.tag)
# initializing voxelnet
voxelnet_init()
# code added for using ROS
rospy.init_node('voxelnet_ros_node')
#print('hahah')
sub_ = rospy.Subscriber("points_raw", PointCloud2,
velo_callback, queue_size=1)
pub_velo = rospy.Publisher(
"velodyne_points_modified", PointCloud2, queue_size=1)
pub_arr_bbox = rospy.Publisher(
"voxelnet_arr_bbox", BoundingBoxArray, queue_size=10)
pub_bbox = rospy.Publisher("voxelnet_bbox", BoundingBox, queue_size=1)
print("[+] voxelnet_ros_node has started!")
rospy.spin()
|
gans/gan_training_tf_fns.py | garyxcheng/federated | 330 | 11136152 | # Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow training code for Federated GANs.
This code is intended to only use vanilla TensorFlow (no TFF dependency); it is
wired together into a federated computation in gan_training_tff_fns.py. The one
exception is some handling for conversion from Struct, which should go
away when b/130724878 is fixed.
"""
import collections
import attr
import tensorflow as tf
from gans import gan_losses
from utils import tensor_utils
def assert_no_anon_tuples(x):
"""Checks that a nested structure has no Structs at the leaves."""
def check_anon(t):
if 'Struct' in str(type(t)):
raise ValueError('Found Struct:\n', t)
return None
tf.nest.map_structure(check_anon, x)
return x
# Set cmp=False to get a default hash function for tf.function.
@attr.s(eq=False, frozen=True)
class FromServer(object):
"""Container for data that is broadcast from the server to clients.
Attributes:
generator_weights: Weights for the generator model, in the order of
`tf.keras.Model.weights`.
discriminator_weights: Weights for the discriminator model, in the order of
`tf.keras.Model.weights`.
"""
generator_weights = attr.ib()
discriminator_weights = attr.ib()
# Set cmp=False to get a default hash function for tf.function.
@attr.s(eq=False, frozen=False)
class ServerState(object):
"""Container for all server state that must pass from round to round.
Attributes:
generator_weights: Weights for the generator model, in the order of
`tf.keras.Model.weights`.
discriminator_weights: Weights for the discriminator model, in the order of
`tf.keras.Model.weights`.
counters: Aggregated training counters.
aggregation_state: State of the aggregation process. This aggregation
process could be used to handle Differential Privacy aggregation, or could
be set to a simple stateless mean for a non-Differentially Private
approach.
"""
generator_weights = attr.ib()
discriminator_weights = attr.ib()
counters = attr.ib()
aggregation_state = attr.ib(default=())
# Set cmp=False to get a default hash function for tf.function.
@attr.s(eq=False, frozen=True)
class ClientOutput(object):
"""Container for data that is sent from clients back to the server..
Attributes:
discriminator_weights_delta: Update for the discriminator model, in the
order of tf.keras.Model.weights`.
update_weight: Weight to be associated with the update.
counters: Metrics that are summed across clients.
"""
discriminator_weights_delta = attr.ib()
update_weight = attr.ib()
counters = attr.ib()
def _weights(model):
"""Returns tensors of model weights, in the order of the variables."""
return [v.read_value() for v in model.weights]
@tf.function
def client_computation(
# Tensor/Dataset arguments that will be supplied by TFF:
gen_inputs_ds: tf.data.Dataset,
real_data_ds: tf.data.Dataset,
from_server: FromServer,
# Python arguments bound to be bound at TFF computation construction time:
generator: tf.keras.Model,
discriminator: tf.keras.Model,
train_discriminator_fn) -> ClientOutput:
"""The computation to run on the client, training the discriminator.
Args:
gen_inputs_ds: A `tf.data.Dataset` of generator_inputs.
real_data_ds: A `tf.data.Dataset` of data from the real distribution.
from_server: A `FromServer` object, including the current model weights.
generator: The generator.
discriminator: The discriminator.
train_discriminator_fn: A function which takes the two networks, generator
input, and real data and trains the discriminator.
Returns:
A `ClientOutput` object.
"""
tf.nest.map_structure(lambda a, b: a.assign(b), generator.weights,
from_server.generator_weights)
tf.nest.map_structure(lambda a, b: a.assign(b), discriminator.weights,
from_server.discriminator_weights)
num_examples = tf.constant(0)
gen_inputs_and_real_data = tf.data.Dataset.zip((gen_inputs_ds, real_data_ds))
for gen_inputs, real_data in gen_inputs_and_real_data:
# It's possible that real_data and gen_inputs have different batch sizes.
# For calculating the discriminator loss, it's desirable to have equal-sized
# contributions from both the real and fake data. Also, it's necessary if
# using the Wasserstein gradient penalty (where a difference is taken b/w
# the real and fake data). So here we reduce to the min batch size. This
# also ensures num_examples properly reflects the amount of data trained on.
min_batch_size = tf.minimum(tf.shape(real_data)[0], tf.shape(gen_inputs)[0])
real_data = real_data[0:min_batch_size]
gen_inputs = gen_inputs[0:min_batch_size]
num_examples += train_discriminator_fn(generator, discriminator, gen_inputs,
real_data)
weights_delta = tf.nest.map_structure(tf.subtract, discriminator.weights,
from_server.discriminator_weights)
weights_delta, has_non_finite_delta = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
update_weight = tf.cast(num_examples, tf.float32)
# Zero out the weight if there are any non-finite values.
# TODO(b/122071074): federated_mean might not do the right thing if
# all clients have zero weight.
update_weight = tf.cond(
tf.equal(has_non_finite_delta, 0), lambda: update_weight,
lambda: tf.constant(0.0))
return ClientOutput(
discriminator_weights_delta=weights_delta,
update_weight=update_weight,
counters=collections.OrderedDict(
num_discriminator_train_examples=num_examples))
def server_initial_state(generator, discriminator):
"""Returns the initial state of the server."""
return ServerState(
generator_weights=_weights(generator),
discriminator_weights=_weights(discriminator),
counters=collections.OrderedDict(
num_discriminator_train_examples=tf.constant(0),
num_generator_train_examples=tf.constant(0),
num_rounds=tf.constant(0)),
aggregation_state=())
@tf.function
def server_computation(
# Tensor/Dataset arguments that will be supplied by TFF:
server_state: ServerState,
gen_inputs_ds: tf.data.Dataset,
client_output: ClientOutput,
# Python arguments to be bound at TFF computation construction time:
generator: tf.keras.Model,
discriminator: tf.keras.Model,
server_disc_update_optimizer: tf.keras.optimizers.Optimizer,
train_generator_fn,
new_aggregation_state=()) -> ServerState:
"""The computation to run on the server, training the generator.
Args:
server_state: The initial `ServerState` for the round.
gen_inputs_ds: An infinite `tf.data.Dataset` of inputs to the `generator`.
client_output: The (possibly aggregated) `ClientOutput`.
generator: The generator.
discriminator: The discriminator.
server_disc_update_optimizer: Optimizer used to `apply_gradients` based on
the client_output delta.
train_generator_fn: A function which takes the two networks and generator
input and trains the generator.
new_aggregation_state: The updated state of the (possibly DP) averaging
aggregator.
Returns:
An updated `ServerState` object.
"""
# A tf.function can't modify the structure of its input arguments,
# so we make a semi-shallow copy:
server_state = attr.evolve(
server_state, counters=collections.OrderedDict(server_state.counters))
tf.nest.map_structure(lambda a, b: a.assign(b), generator.weights,
server_state.generator_weights)
tf.nest.map_structure(lambda a, b: a.assign(b), discriminator.weights,
server_state.discriminator_weights)
# Update the server discriminator.
delta = client_output.discriminator_weights_delta
tf.nest.assert_same_structure(delta, discriminator.weights)
grads_and_vars = tf.nest.map_structure(lambda x, v: (-1.0 * x, v), delta,
discriminator.weights)
server_disc_update_optimizer.apply_gradients(
grads_and_vars, name='server_update')
for k, v in client_output.counters.items():
server_state.counters[k] += v
# Update the state of the (possibly DP) averaging aggregator.
server_state.aggregation_state = new_aggregation_state
gen_examples_this_round = tf.constant(0)
for gen_inputs in gen_inputs_ds: # Compiled by autograph.
gen_examples_this_round += train_generator_fn(generator, discriminator,
gen_inputs)
server_state.counters[
'num_generator_train_examples'] += gen_examples_this_round
server_state.counters['num_rounds'] += 1
server_state.generator_weights = _weights(generator)
server_state.discriminator_weights = _weights(discriminator)
return server_state
def create_train_generator_fn(gan_loss_fns: gan_losses.AbstractGanLossFns,
gen_optimizer: tf.keras.optimizers.Optimizer):
"""Create a function that trains generator, binding loss and optimizer.
Args:
gan_loss_fns: Instance of gan_losses.AbstractGanLossFns interface,
specifying the generator/discriminator training losses.
gen_optimizer: Optimizer for training the generator.
Returns:
Function that executes one step of generator training.
"""
# We check that the optimizer has not been used previously, which ensures
# that when it is bound the train fn isn't holding onto a different copy of
# the optimizer variables then the copy that is being exchanged b/w server and
# clients.
if gen_optimizer.variables():
raise ValueError(
'Expected gen_optimizer to not have been used previously, but '
'variables were already initialized.')
@tf.function
def train_generator_fn(generator: tf.keras.Model,
discriminator: tf.keras.Model, generator_inputs):
"""Trains the generator on a single batch.
Args:
generator: The generator.
discriminator: The discriminator.
generator_inputs: A batch of inputs (usually noise) for the generator.
Returns:
The number of examples trained on.
"""
def gen_loss():
"""Does the forward pass and computes losses for the generator."""
# N.B. The complete pass must be inside loss() for gradient tracing.
return gan_loss_fns.generator_loss(generator, discriminator,
generator_inputs)
gen_optimizer.minimize(gen_loss, var_list=generator.trainable_variables)
return tf.shape(generator_inputs)[0]
return train_generator_fn
def create_train_discriminator_fn(
gan_loss_fns: gan_losses.AbstractGanLossFns,
disc_optimizer: tf.keras.optimizers.Optimizer):
"""Create a function that trains discriminator, binding loss and optimizer.
Args:
gan_loss_fns: Instance of gan_losses.AbstractGanLossFns interface,
specifying the generator/discriminator training losses.
disc_optimizer: Optimizer for training the discriminator.
Returns:
Function that executes one step of discriminator training.
"""
# We assert that the optimizer has not been used previously, which ensures
# that when it is bound the train fn isn't holding onto a different copy of
# the optimizer variables then the copy that is being exchanged b/w server and
# clients.
if disc_optimizer.variables():
raise ValueError(
'Expected disc_optimizer to not have been used previously, but '
'variables were already initialized.')
@tf.function
def train_discriminator_fn(generator: tf.keras.Model,
discriminator: tf.keras.Model, generator_inputs,
real_data):
"""Trains the discriminator on a single batch.
Args:
generator: The generator.
discriminator: The discriminator.
generator_inputs: A batch of inputs (usually noise) for the generator.
real_data: A batch of real data for the discriminator.
Returns:
The size of the batch.
"""
def disc_loss():
"""Does the forward pass and computes losses for the discriminator."""
# N.B. The complete pass must be inside loss() for gradient tracing.
return gan_loss_fns.discriminator_loss(generator, discriminator,
generator_inputs, real_data)
disc_optimizer.minimize(
disc_loss, var_list=discriminator.trainable_variables)
return tf.shape(real_data)[0]
return train_discriminator_fn
|
nengo/spa/bind.py | Michaeljurado24/nengo | 762 | 11136154 | import nengo
from nengo.exceptions import ValidationError
from nengo.spa.module import Module
class Bind(Module):
"""A module for binding together two inputs.
Binding is done with circular convolution. For more details on how
this is computed, see the underlying `~.networks.CircularConvolution`
network.
Parameters
----------
dimensions : int
Number of dimensions for the two vectors to be compared.
vocab : Vocabulary, optional
The vocabulary to use to interpret the vectors. If None,
the default vocabulary for the given dimensionality is used.
n_neurons : int, optional
Number of neurons to use in each product computation.
invert_a, invert_b : bool, optional
Whether to reverse the order of elements in either
the first input (``invert_a``) or the second input (``invert_b``).
Flipping the second input will make the network perform circular
correlation instead of circular convolution.
input_magnitude : float, optional
The expected magnitude of the vectors to be convolved.
This value is used to determine the radius of the ensembles
computing the element-wise product.
label : str, optional
A name for the ensemble. Used for debugging and visualization.
seed : int, optional
The seed used for random number generation.
add_to_container : bool, optional
Determines if this Network will be added to the current container.
If None, will be true if currently within a Network.
"""
def __init__(
self,
dimensions,
vocab=None,
n_neurons=200,
invert_a=False,
invert_b=False,
input_magnitude=1.0,
label=None,
seed=None,
add_to_container=None,
):
super().__init__(label, seed, add_to_container)
if vocab is None:
# use the default vocab for this number of dimensions
vocab = dimensions
elif vocab.dimensions != dimensions:
raise ValidationError(
"Dimensionality of given vocabulary (%d) does "
"not match dimensionality of buffer (%d)"
% (vocab.dimensions, dimensions),
attr="dimensions",
obj=self,
)
with self:
self.cc = nengo.networks.CircularConvolution(
n_neurons,
dimensions,
invert_a,
invert_b,
input_magnitude=input_magnitude,
)
self.A = self.cc.input_a
self.B = self.cc.input_b
self.output = self.cc.output
self.inputs = dict(A=(self.A, vocab), B=(self.B, vocab))
self.outputs = dict(default=(self.output, vocab))
|
networks/mlp_mse.py | google-research/ibc | 180 | 11136157 | <reponame>google-research/ibc
# coding=utf-8
# Copyright 2022 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a tf_agents compatible mlp-mse."""
import gin
from ibc.networks.layers import mlp_dropout
from ibc.networks.layers import resnet
import tensorflow as tf
from tf_agents.networks import network
@gin.configurable
class MLPMSE(network.Network):
"""MLP-MSE compatible with tfagents."""
def __init__(self,
obs_spec,
action_spec,
width=512,
depth=2,
rate=0.1,
act_denorm_layer=None,
name='MLPMSE',
layers='MLPDropout'):
super(MLPMSE, self).__init__(
input_tensor_spec=obs_spec, state_spec=(), name=name)
# For inference time, use to denormalize mse action output.
self._act_denorm_layer = act_denorm_layer
# Define MLP.
hidden_sizes = [width for _ in range(depth)]
dense = tf.keras.layers.Dense
if layers == 'MLPDropout':
self._mlp = mlp_dropout.MLPDropoutLayer(
hidden_sizes, rate, kernel_initializer='normal',
bias_initializer='normal', dense=dense)
elif layers == 'ResNetOrig':
self._mlp = resnet.ResNetOrigLayer(
hidden_sizes, rate, kernel_initializer='normal',
bias_initializer='normal', dense=dense)
elif layers == 'ResNetPreActivation':
self._mlp = resnet.ResNetPreActivationLayer(
hidden_sizes, rate, kernel_initializer='normal',
bias_initializer='normal', dense=dense)
# Define projection to action.
self._project_action = tf.keras.layers.Dense(
action_spec.shape[-1],
kernel_initializer='normal',
bias_initializer='normal')
def call(self, obs, training, step_type=(), network_state=()):
# Combine dict of observations to concatenated tensor. [B x T x obs_spec]
obs = tf.concat(tf.nest.flatten(obs), axis=-1)
# Flatten obs across time: [B x T * obs_spec]
batch_size = tf.shape(obs)[0]
x = tf.reshape(obs, [batch_size, -1])
# Forward mlp.
x = self._mlp(x, training=training)
# Project to action.
x = self._project_action(x, training=training)
if not training:
x = self._act_denorm_layer(x)
return x, network_state
|
tests/test_main_inline.py | shmuelamar/cbox | 164 | 11136181 | <filename>tests/test_main_inline.py
from io import StringIO
from os import linesep
import pytest
from cbox.__main__ import main
DATA1 = linesep.join(['hello world', '123 456', 'zzz xxx'])
DATA2 = linesep.join(['abc.py', 'def.pyc'])
def run_inline(in_data, argv):
"""runs inline func and returns its stdout, stderr and exitcode"""
outstream = StringIO()
errstream = StringIO()
instream = StringIO(in_data)
exitcode = main(argv, instream, outstream, errstream)
outstream.seek(0)
errstream.seek(0)
return outstream.read(), errstream.read(), exitcode
def test_main_inline():
argv = ['s.split()[0]']
out, err, code = run_inline(DATA1, argv)
assert code == 0
assert not err
assert out.splitlines() == ['hello', '123', 'zzz']
def test_main_inline_modules():
argv = ['-m', 'os,re', 're.findall(r"\.py[cx]?", os.path.splitext(s)[-1])']
out, err, code = run_inline(DATA2, argv)
assert code == 0
assert not err
assert out.splitlines() == ['.py', '.pyc']
assert 're' not in globals(), 'inline imports affect global scope'
assert 'os' not in globals(), 'inline imports affect global scope'
def test_main_inline_error():
argv = ['s.split(']
with pytest.raises(ValueError):
run_inline(DATA1, argv)
def test_main_inline_asyncio():
argv = ['-w', 'asyncio', 's.split()[0]']
out, err, code = run_inline(DATA1, argv)
assert code == 0
assert not err
assert out.splitlines() == ['hello', '123', 'zzz']
|
ferminet/utils/tests/base_scf_test.py | llxlr/ferminet | 469 | 11136198 | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.utils.scf."""
from typing import List, Tuple
from absl.testing import absltest
from absl.testing import parameterized
from ferminet.utils import scf
from ferminet.utils import system
import numpy as np
import pyscf
class ScfTest(parameterized.TestCase):
def setUp(self):
super(ScfTest, self).setUp()
# disable use of temp directory in pyscf.
# Test calculations are small enough to fit in RAM and we don't need
# checkpoint files.
pyscf.lib.param.TMPDIR = None
@parameterized.parameters(
{
'molecule': [system.Atom('He', (0, 0, 0))],
'nelectrons': (1, 1)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (5, 2)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (5, 3)
},
{
'molecule': [system.Atom('N', (0, 0, 0))],
'nelectrons': (4, 2)
},
{
'molecule': [system.Atom('O', (0, 0, 0))],
'nelectrons': (5, 3),
'restricted': False,
},
{
'molecule': [
system.Atom('N', (0, 0, 0)),
system.Atom('N', (0, 0, 1.4))
],
'nelectrons': (7, 7)
},
{
'molecule': [
system.Atom('O', (0, 0, 0)),
system.Atom('O', (0, 0, 1.4))
],
'nelectrons': (9, 7),
'restricted': False,
},
)
def test_scf_interface(self,
molecule: List[system.Atom],
nelectrons: Tuple[int, int],
restricted: bool = True):
"""Tests SCF interface to a pyscf calculation.
pyscf has its own tests so only check that we can run calculations over
atoms and simple diatomics using the interface in ferminet.scf.
Args:
molecule: List of system.Atom objects giving atoms in the molecule.
nelectrons: Tuple containing number of alpha and beta electrons.
restricted: If true, run a restricted Hartree-Fock calculation, otherwise
run an unrestricted Hartree-Fock calculation.
"""
npts = 100
xs = np.random.randn(npts, 3)
hf = scf.Scf(molecule=molecule,
nelectrons=nelectrons,
restricted=restricted)
hf.run()
mo_vals = hf.eval_mos(xs)
self.assertLen(mo_vals, 2) # alpha-spin orbitals and beta-spin orbitals.
for spin_mo_vals in mo_vals:
# Evalute npts points on M orbitals/functions - (npts, M) array.
self.assertEqual(spin_mo_vals.shape, (npts, hf._mol.nao_nr()))
if __name__ == '__main__':
absltest.main()
|
src/textacy/datasets/oxford_text_archive.py | austinjp/textacy | 1,929 | 11136200 | <filename>src/textacy/datasets/oxford_text_archive.py<gh_stars>1000+
"""
Oxford Text Archive literary works
----------------------------------
A collection of ~2.7k Creative Commons literary works from the Oxford Text Archive,
containing primarily English-language 16th-20th century literature and history.
Records include the following data:
- ``text``: Full text of the literary work.
- ``title``: Title of the literary work.
- ``author``: Author(s) of the literary work.
- ``year``: Year that the literary work was published.
- ``url``: URL at which literary work can be found online via the OTA.
- ``id``: Unique identifier of the literary work within the OTA.
This dataset was compiled by <NAME> from the Oxford Text Archive and
stored in his GitHub repo to avoid unnecessary scraping of the OTA site. It is
downloaded from that repo, and excluding some light cleaning of its metadata,
is reproduced exactly here.
"""
import csv
import io
import itertools
import logging
import pathlib
import os
import re
from typing import Iterable, Optional, Set, Tuple, Union
from .. import constants, types, utils
from .. import io as tio
from .base import Dataset
LOGGER = logging.getLogger(__name__)
NAME = "oxford_text_archive"
META = {
"site_url": "https://ota.ox.ac.uk/",
"description": (
"Collection of ~2.7k Creative Commons texts from the Oxford Text "
"Archive, containing primarily English-language 16th-20th century "
"literature and history."
),
}
DOWNLOAD_URL = "https://github.com/mimno/ota/archive/master.zip"
class OxfordTextArchive(Dataset):
"""
Stream a collection of English-language literary works from text files on disk,
either as texts or text + metadata pairs.
Download the data (one time only!), saving and extracting its contents to disk::
>>> import textacy.datasets
>>> ds = textacy.datasets.OxfordTextArchive()
>>> ds.download()
>>> ds.info
{'name': 'oxford_text_archive',
'site_url': 'https://ota.ox.ac.uk/',
'description': 'Collection of ~2.7k Creative Commons texts from the Oxford Text Archive, containing primarily English-language 16th-20th century literature and history.'}
Iterate over literary works as texts or records with both text and metadata::
>>> for text in ds.texts(limit=3):
... print(text[:200])
>>> for text, meta in ds.records(limit=3):
... print("\\n{}, {}".format(meta["title"], meta["year"]))
... print(text[:300])
Filter literary works by a variety of metadata fields and text length::
>>> for text, meta in ds.records(author="<NAME>", limit=1):
... print("{}\\n{}".format(meta["title"], text[:500]))
>>> for text, meta in ds.records(date_range=("1900-01-01", "1990-01-01"), limit=5):
... print(meta["year"], meta["author"])
>>> for text in ds.texts(min_len=4000000):
... print(len(text))
Stream literary works into a :class:`textacy.Corpus <textacy.corpus.Corpus>`::
>>> textacy.Corpus("en", data=ds.records(limit=5))
Corpus(5 docs; 182289 tokens)
Args:
data_dir (str or :class:`pathlib.Path`): Path to directory on disk
under which dataset is stored, i.e. ``/path/to/data_dir/oxford_text_archive``.
Attributes:
full_date_range: First and last dates for which works are available,
each as an ISO-formatted string (YYYY-MM-DD).
authors (Set[str]): Full names of all distinct authors included in this
dataset, e.g. "<NAME>".
"""
full_date_range: Tuple[str, str] = ("0018-01-01", "1990-01-01")
def __init__(
self,
data_dir: Union[str, pathlib.Path] = constants.DEFAULT_DATA_DIR.joinpath(NAME),
):
super().__init__(NAME, meta=META)
self.data_dir = utils.to_path(data_dir).resolve()
self._text_dirpath = self.data_dir.joinpath("master", "text")
self._metadata_filepath = self.data_dir.joinpath("master", "metadata.tsv")
self._metadata = None
def download(self, *, force: bool = False) -> None:
"""
Download the data as a zip archive file, then save it to disk and
extract its contents under the :attr:`OxfordTextArchive.data_dir` directory.
Args:
force: If True, download the dataset, even if it already exists
on disk under ``data_dir``.
"""
filepath = tio.download_file(
DOWNLOAD_URL, filename=None, dirpath=self.data_dir, force=force,
)
if filepath:
tio.unpack_archive(filepath, extract_dir=None)
@property
def metadata(self):
"""Dict[str, dict]"""
if not self._metadata:
try:
self._metadata = self._load_and_parse_metadata()
except OSError as e:
LOGGER.error(e)
return self._metadata
def _load_and_parse_metadata(self):
"""
Read in ``metadata.tsv`` file from :attr:`OxfordTextArchive._metadata_filepath``
zip archive; convert into a dictionary keyed by record ID; clean up some
of the fields, and remove a couple fields that are identical throughout.
"""
if not self._metadata_filepath.is_file():
raise OSError(
f"metadata file {self._metadata_filepath} not found;\n"
"has the dataset been downloaded yet?"
)
re_extract_year = re.compile(r"(\d{4})")
re_extract_authors = re.compile(
r"(\D+)"
r"(?:, "
r"(?:[bdf]l?\. )?(?:ca. )?\d{4}(?:\?| or \d{1,2})?(?:-(?:[bdf]l?\. )?(?:ca. )?\d{4}(?:\?| or \d{1,2})?)?|"
r"(?:\d{2}th(?:/\d{2}th)? cent\.)"
r"\.?)"
)
re_clean_authors = re.compile(r"^[,;. ]+|[,.]+\s*?$")
metadata = {}
with self._metadata_filepath.open(mode="rb") as f:
subf = io.StringIO(f.read().decode("utf-8"))
for row in csv.DictReader(subf, delimiter="\t"):
# only include English-language works (99.9% of all works)
if not row["Language"].startswith("English"):
continue
# clean up years
year_match = re_extract_year.search(row["Year"])
if year_match:
row["Year"] = year_match.group()
else:
row["Year"] = None
# extract and clean up authors
authors = re_extract_authors.findall(row["Author"]) or [row["Author"]]
row["Author"] = tuple(
re_clean_authors.sub("", author) for author in authors
)
row["Title"] = row["Title"].strip()
# get rid of uniform "Language" and "License" fields
del row["Language"]
del row["License"]
metadata[row["ID"]] = {key.lower(): val for key, val in row.items()}
# set authors attribute for user convenience / to validate author filtering
self.authors = {
author
for value in metadata.values()
for author in value["author"]
if value.get("author")
}
return metadata
def __iter__(self):
if not self._text_dirpath.is_dir():
raise OSError(
f"text directory {self._text_dirpath} not found;\n"
"has the dataset been downloaded yet?"
)
_metadata = self.metadata # for performance
for filepath in sorted(tio.get_filepaths(self._text_dirpath, extension=".txt")):
id_, _ = os.path.splitext(os.path.basename(filepath))
record = _metadata.get(id_, {}).copy()
if not record:
LOGGER.debug(
"no metadata found for record %s; probably non-English text...", id_
)
continue
with io.open(filepath, mode="rt", encoding="utf-8") as f:
record["text"] = f.read()
yield record
def _get_filters(self, author, date_range, min_len):
filters = []
if min_len is not None:
if min_len < 1:
raise ValueError("`min_len` must be at least 1")
filters.append(lambda record: len(record.get("text", "")) >= min_len)
if author is not None:
author = utils.validate_set_members(
author, (str, bytes), valid_vals=self.authors
)
filters.append(
lambda record: record.get("author")
and any(athr in author for athr in record["author"])
)
if date_range is not None:
date_range = utils.validate_and_clip_range(
date_range, self.full_date_range, val_type=(str, bytes)
)
filters.append(
lambda record: record.get("year")
and date_range[0] <= record["year"] < date_range[1]
)
return filters
def _filtered_iter(self, filters):
if filters:
for record in self:
if all(filter_(record) for filter_ in filters):
yield record
else:
for record in self:
yield record
def texts(
self,
*,
author: Optional[Union[str, Set[str]]] = None,
date_range: Optional[Tuple[Optional[str], Optional[str]]] = None,
min_len: Optional[int] = None,
limit: Optional[int] = None,
) -> Iterable[str]:
"""
Iterate over works in this dataset, optionally filtering by a variety
of metadata and/or text length, and yield texts only.
Args:
author: Filter texts by the authors' name. For multiple values (Set[str]),
ANY rather than ALL of the authors must be found among a given works's authors.
date_range: Filter texts by the date on which it was published;
both start and end date must be specified, but a null value for either
will be replaced by the min/max date available in the dataset.
min_len: Filter texts by the length (# characters) of their text content.
limit: Yield no more than ``limit`` texts that match all specified filters.
Yields:
Text of the next work in dataset passing all filters.
Raises:
ValueError: If any filtering options are invalid.
"""
filters = self._get_filters(author, date_range, min_len)
for record in itertools.islice(self._filtered_iter(filters), limit):
yield record["text"]
def records(
self,
*,
author: Optional[Union[str, Set[str]]] = None,
date_range: Optional[Tuple[Optional[str], Optional[str]]] = None,
min_len: Optional[int] = None,
limit: Optional[int] = None,
) -> Iterable[types.Record]:
"""
Iterate over works in this dataset, optionally filtering by a variety
of metadata and/or text length, and yield text + metadata pairs.
Args:
author: Filter texts by the authors' name. For multiple values (Set[str]),
ANY rather than ALL of the authors must be found among a given works's authors.
date_range: Filter texts by the date on which it was published;
both start and end date must be specified, but a null value for either
will be replaced by the min/max date available in the dataset.
min_len: Filter texts by the length (# characters) of their text content.
limit: Yield no more than ``limit`` texts that match all specified filters.
Yields:
Text of the next work in dataset passing all filters,
and its corresponding metadata.
Raises:
ValueError: If any filtering options are invalid.
"""
filters = self._get_filters(author, date_range, min_len)
for record in itertools.islice(self._filtered_iter(filters), limit):
yield types.Record(text=record.pop("text"), meta=record)
|
tests/components/rfxtrx/test_device_action.py | andersop91/core | 22,481 | 11136209 | <gh_stars>1000+
"""The tests for RFXCOM RFXtrx device actions."""
from __future__ import annotations
from typing import Any, NamedTuple
import RFXtrx
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.helpers.device_registry import DeviceRegistry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
mock_device_registry,
mock_registry,
)
from tests.components.rfxtrx.conftest import create_rfx_test_cfg
@pytest.fixture(name="device_reg")
def device_reg_fixture(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture(name="entity_reg")
def entity_reg_fixture(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
class DeviceTestData(NamedTuple):
"""Test data linked to a device."""
code: str
device_identifiers: set[tuple[str, str, str, str]]
DEVICE_LIGHTING_1 = DeviceTestData("0710002a45050170", {("rfxtrx", "10", "0", "E5")})
DEVICE_BLINDS_1 = DeviceTestData(
"09190000009ba8010100", {("rfxtrx", "19", "0", "009ba8:1")}
)
DEVICE_TEMPHUM_1 = DeviceTestData(
"0a52080705020095220269", {("rfxtrx", "52", "8", "05:02")}
)
@pytest.mark.parametrize("device", [DEVICE_LIGHTING_1, DEVICE_TEMPHUM_1])
async def test_device_test_data(rfxtrx, device: DeviceTestData):
"""Verify that our testing data remains correct."""
pkt: RFXtrx.lowlevel.Packet = RFXtrx.lowlevel.parse(bytearray.fromhex(device.code))
assert device.device_identifiers == {
("rfxtrx", f"{pkt.packettype:x}", f"{pkt.subtype:x}", pkt.id_string)
}
async def setup_entry(hass, devices):
"""Construct a config setup."""
entry_data = create_rfx_test_cfg(devices=devices)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
def _get_expected_actions(data):
for value in data.values():
yield {"type": "send_command", "subtype": value}
@pytest.mark.parametrize(
"device,expected",
[
[
DEVICE_LIGHTING_1,
list(_get_expected_actions(RFXtrx.lowlevel.Lighting1.COMMANDS)),
],
[
DEVICE_BLINDS_1,
list(_get_expected_actions(RFXtrx.lowlevel.RollerTrol.COMMANDS)),
],
[DEVICE_TEMPHUM_1, []],
],
)
async def test_get_actions(hass, device_reg: DeviceRegistry, device, expected):
"""Test we get the expected actions from a rfxtrx."""
await setup_entry(hass, {device.code: {"signal_repetitions": 1}})
device_entry = device_reg.async_get_device(device.device_identifiers, set())
assert device_entry
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
actions = [action for action in actions if action["domain"] == DOMAIN]
expected_actions = [
{"domain": DOMAIN, "device_id": device_entry.id, **action_type}
for action_type in expected
]
assert_lists_same(actions, expected_actions)
@pytest.mark.parametrize(
"device,config,expected",
[
[
DEVICE_LIGHTING_1,
{"type": "send_command", "subtype": "On"},
"0710000045050100",
],
[
DEVICE_LIGHTING_1,
{"type": "send_command", "subtype": "Off"},
"0710000045050000",
],
[
DEVICE_BLINDS_1,
{"type": "send_command", "subtype": "Stop"},
"09190000009ba8010200",
],
],
)
async def test_action(
hass, device_reg: DeviceRegistry, rfxtrx: RFXtrx.Connect, device, config, expected
):
"""Test for actions."""
await setup_entry(hass, {device.code: {"signal_repetitions": 1}})
device_entry = device_reg.async_get_device(device.device_identifiers, set())
assert device_entry
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event",
},
"action": {
"domain": DOMAIN,
"device_id": device_entry.id,
**config,
},
},
]
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
rfxtrx.transport.send.assert_called_once_with(bytearray.fromhex(expected))
async def test_invalid_action(hass, device_reg: DeviceRegistry):
"""Test for invalid actions."""
device = DEVICE_LIGHTING_1
await setup_entry(hass, {device.code: {"signal_repetitions": 1}})
device_identifers: Any = device.device_identifiers
device_entry = device_reg.async_get_device(device_identifers, set())
assert device_entry
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event",
},
"action": {
"domain": DOMAIN,
"device_id": device_entry.id,
"type": "send_command",
"subtype": "invalid",
},
},
]
},
)
await hass.async_block_till_done()
assert len(notifications := hass.states.async_all("persistent_notification")) == 1
assert (
"The following integrations and platforms could not be set up"
in notifications[0].attributes["message"]
)
|
tests/backends/oracle/test_operations.py | ni-ning/django | 61,676 | 11136217 | <reponame>ni-ning/django
import unittest
from django.core.management.color import no_style
from django.db import connection
from django.test import TransactionTestCase
from ..models import Person, Tag
@unittest.skipUnless(connection.vendor == 'oracle', 'Oracle tests')
class OperationsTests(TransactionTestCase):
available_apps = ['backends']
def test_sequence_name_truncation(self):
seq_name = connection.ops._get_no_autofield_sequence_name('schema_authorwithevenlongee869')
self.assertEqual(seq_name, 'SCHEMA_AUTHORWITHEVENLOB0B8_SQ')
def test_bulk_batch_size(self):
# Oracle restricts the number of parameters in a query.
objects = range(2**16)
self.assertEqual(connection.ops.bulk_batch_size([], objects), len(objects))
# Each field is a parameter for each object.
self.assertEqual(
connection.ops.bulk_batch_size(['id'], objects),
connection.features.max_query_params,
)
self.assertEqual(
connection.ops.bulk_batch_size(['id', 'other'], objects),
connection.features.max_query_params // 2,
)
def test_sql_flush(self):
statements = connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
)
# The tables and constraints are processed in an unordered set.
self.assertEqual(
statements[0],
'ALTER TABLE "BACKENDS_TAG" DISABLE CONSTRAINT '
'"BACKENDS__CONTENT_T_FD9D7A85_F" KEEP INDEX;',
)
self.assertEqual(
sorted(statements[1:-1]),
[
'TRUNCATE TABLE "BACKENDS_PERSON";',
'TRUNCATE TABLE "BACKENDS_TAG";',
],
)
self.assertEqual(
statements[-1],
'ALTER TABLE "BACKENDS_TAG" ENABLE CONSTRAINT '
'"BACKENDS__CONTENT_T_FD9D7A85_F";',
)
def test_sql_flush_allow_cascade(self):
statements = connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
allow_cascade=True,
)
# The tables and constraints are processed in an unordered set.
self.assertEqual(
statements[0],
'ALTER TABLE "BACKENDS_VERYLONGMODELNAME540F" DISABLE CONSTRAINT '
'"BACKENDS__PERSON_ID_1DD5E829_F" KEEP INDEX;',
)
self.assertEqual(
sorted(statements[1:-1]),
[
'TRUNCATE TABLE "BACKENDS_PERSON";',
'TRUNCATE TABLE "BACKENDS_TAG";',
'TRUNCATE TABLE "BACKENDS_VERYLONGMODELNAME540F";',
],
)
self.assertEqual(
statements[-1],
'ALTER TABLE "BACKENDS_VERYLONGMODELNAME540F" ENABLE CONSTRAINT '
'"BACKENDS__PERSON_ID_1DD5E829_F";',
)
def test_sql_flush_sequences(self):
statements = connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
reset_sequences=True,
)
# The tables and constraints are processed in an unordered set.
self.assertEqual(
statements[0],
'ALTER TABLE "BACKENDS_TAG" DISABLE CONSTRAINT '
'"BACKENDS__CONTENT_T_FD9D7A85_F" KEEP INDEX;',
)
self.assertEqual(
sorted(statements[1:3]),
[
'TRUNCATE TABLE "BACKENDS_PERSON";',
'TRUNCATE TABLE "BACKENDS_TAG";',
],
)
self.assertEqual(
statements[3],
'ALTER TABLE "BACKENDS_TAG" ENABLE CONSTRAINT '
'"BACKENDS__CONTENT_T_FD9D7A85_F";',
)
# Sequences.
self.assertEqual(len(statements[4:]), 2)
self.assertIn('BACKENDS_PERSON_SQ', statements[4])
self.assertIn('BACKENDS_TAG_SQ', statements[5])
def test_sql_flush_sequences_allow_cascade(self):
statements = connection.ops.sql_flush(
no_style(),
[Person._meta.db_table, Tag._meta.db_table],
reset_sequences=True,
allow_cascade=True,
)
# The tables and constraints are processed in an unordered set.
self.assertEqual(
statements[0],
'ALTER TABLE "BACKENDS_VERYLONGMODELNAME540F" DISABLE CONSTRAINT '
'"BACKENDS__PERSON_ID_1DD5E829_F" KEEP INDEX;',
)
self.assertEqual(
sorted(statements[1:4]),
[
'TRUNCATE TABLE "BACKENDS_PERSON";',
'TRUNCATE TABLE "BACKENDS_TAG";',
'TRUNCATE TABLE "BACKENDS_VERYLONGMODELNAME540F";',
],
)
self.assertEqual(
statements[4],
'ALTER TABLE "BACKENDS_VERYLONGMODELNAME540F" ENABLE CONSTRAINT '
'"BACKENDS__PERSON_ID_1DD5E829_F";',
)
# Sequences.
self.assertEqual(len(statements[5:]), 3)
self.assertIn('BACKENDS_PERSON_SQ', statements[5])
self.assertIn('BACKENDS_VERYLONGMODELN7BE2_SQ', statements[6])
self.assertIn('BACKENDS_TAG_SQ', statements[7])
|
misc/markov.py | erayon/cycli | 290 | 11136224 | <filename>misc/markov.py
from cycli.cypher import Cypher
from misc.graphgist import get_all_queries
import re
cypher = Cypher()
queries = get_all_queries()
# Each Cypher word is a state in the Markov model. We're also adding a "" state; this means there are no previous words
# (we're at the beginning of the query).
cypher_words = [""] + cypher.words()
# Store the model in a dictionary of dictionaries.
markov = {i: {j:0.0 for j in cypher_words} for i in cypher_words}
for query in queries:
# Find the indices of Cypher functions and keywords separately. This results in a list of tuples for each word and its
# index, e.g. [('MATCH', 0), ('WHERE', 13), ('RETURN', 29)].
function_indices = []
for word in cypher.FUNCTIONS:
idx = [m.start() for m in re.finditer(" " + word + "\s+\(", query, re.IGNORECASE)]
for i in idx:
function_indices.append((word, i))
# Find the keywords. Make sure they're surrounded by spaces so that we don't grab words within words.
keyword_indices = []
for word in cypher.KEYWORDS:
idx = [m.start() for m in re.finditer(" " + word + " ", query, re.IGNORECASE)]
for i in idx:
keyword_indices.append((word, i))
# Combine the indexes of the functions and keywords.
indices = function_indices + keyword_indices
# Sort the words by the order of their indexes, i.e. the order in which they were found in the query.
indices.sort(key=lambda tup: tup[1])
# Drop the indexes so that we just have a list of words ordered by their position in the query.
indices = [x[0] for x in indices]
# Append the empty string state to the beginning; this state means there are no previous keywords.
indices = [""] + indices
for i in range(len(indices) - 1):
keyword = indices[i]
next_keyword = indices[i + 1]
# Build the Markov model. Given that the previous keyword is keyword[i], find the probability that the next keyword is
# keyword[j].
markov[keyword][next_keyword] += 1
# Divide each value in a row by the sum of all the values in the row to convert to a probability.
for key, value in markov.items():
denominator = sum(value.values())
if denominator == 0:
# Absorbing state.
markov[key] = {i: 1.0 if i == key else 0.0 for i in value.keys()}
else:
markov[key] = {i:j / denominator for i, j in value.items()}
# Convert dictionaries to list of tuples so that they can be stored in order.
for word in markov.keys():
ordered = sorted(markov[word].items(), key=lambda x:x[1], reverse=True)
markov[word] = ordered
# Write the Markov model to a file.
with open("markov.txt", "w") as file:
file.write(repr(markov))
|
sarpy/io/general/nitf_elements/tres/unclass/OBJCTA.py | bombaci-vsc/sarpy | 119 | 11136225 | <filename>sarpy/io/general/nitf_elements/tres/unclass/OBJCTA.py
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
class OBJ(TREElement):
def __init__(self, value):
super(OBJ, self).__init__()
self.add_field('OBJ_TY', 's', 20, value)
self.add_field('OBJ_NM', 's', 15, value)
self.add_field('OBJ_POS', 's', 2, value)
self.add_field('OBJ_SN', 's', 10, value)
self.add_field('OBJ_LL', 's', 21, value)
self.add_field('OBJ_ELEV', 's', 8, value)
self.add_field('OBJ_ROW', 's', 8, value)
self.add_field('OBJ_COL', 's', 8, value)
self.add_field('OBJ_PROW', 's', 8, value)
self.add_field('OBJ_PCOL', 's', 8, value)
self.add_field('OBJ_ATTR', 's', 20, value)
self.add_field('OBJ_SEN', 's', 2, value)
if self.OBJ_SEN == 'R':
self.add_field('OBJ_AZ_3DB_WIDTH', 's', 7, value)
self.add_field('OBJ_RNG_3DB_WIDTH', 's', 7, value)
self.add_field('OBJ_AZ_18DB_WIDTH', 's', 7, value)
self.add_field('OBJ_RNG_18DB_WIDTH', 's', 7, value)
self.add_field('OBJ_AZ_3_18DB_RATIO', 's', 8, value)
self.add_field('OBJ_RNG_3_18DB_RATIO', 's', 8, value)
self.add_field('OBJ_AZ_PK_SL_RATIO', 's', 8, value)
self.add_field('OBJ_RNG_PK_SL_RATIO', 's', 8, value)
self.add_field('OBJ_AZ_INT_SL_RATIO', 's', 8, value)
self.add_field('OBJ_RNGINT_SL_RATIO', 's', 8, value)
elif self.OBJ_SEN in ['EO', 'IR']:
self.add_field('OBJ_CAL_TEMP', 's', 6, value)
class OBJCTAType(TREElement):
def __init__(self, value):
super(OBJCTAType, self).__init__()
self.add_field('VERNUM', 's', 4, value)
self.add_field('NUM_OBJ', 'd', 3, value)
self.add_field('OBJ_REF', 's', 10, value)
self.add_field('NUM_SCENE_OBJ', 'd', 3, value)
self.add_loop('OBJs', self.NUM_OBJ, OBJ, value)
class OBJCTA(TREExtension):
_tag_value = 'OBJCTA'
_data_type = OBJCTAType
|
rules/private/proto_repository_tools_srcs.bzl | heartless-clown/rules_proto | 249 | 11136237 | <gh_stars>100-1000
""" Code generated by list_repository_tools_srcs.go; DO NOT EDIT."""
PROTO_REPOSITORY_TOOLS_SRCS = [
"@build_stack_rules_proto//:BUILD.bazel",
"@build_stack_rules_proto//cmd/depsgen:BUILD.bazel",
"@build_stack_rules_proto//cmd/depsgen:config.go",
"@build_stack_rules_proto//cmd/depsgen:depsgen.go",
"@build_stack_rules_proto//cmd/depsgen:generator.go",
"@build_stack_rules_proto//cmd/depsgen:template.go",
"@build_stack_rules_proto//cmd/examplegen:BUILD.bazel",
"@build_stack_rules_proto//cmd/examplegen:config.go",
"@build_stack_rules_proto//cmd/examplegen:examplegen.go",
"@build_stack_rules_proto//cmd/examplegen:generator.go",
"@build_stack_rules_proto//cmd/examplegen:linewriter.go",
"@build_stack_rules_proto//cmd/examplegen:template.go",
"@build_stack_rules_proto//cmd/gazelle:BUILD.bazel",
"@build_stack_rules_proto//cmd/gazelle:diff.go",
"@build_stack_rules_proto//cmd/gazelle:fix-update.go",
"@build_stack_rules_proto//cmd/gazelle:fix.go",
"@build_stack_rules_proto//cmd/gazelle:gazelle.go",
"@build_stack_rules_proto//cmd/gazelle:langs.go",
"@build_stack_rules_proto//cmd/gazelle:metaresolver.go",
"@build_stack_rules_proto//cmd/gazelle:print.go",
"@build_stack_rules_proto//cmd/gazelle:update-repos.go",
"@build_stack_rules_proto//cmd/gazelle:wspace.go",
"@build_stack_rules_proto//cmd/gencopy:BUILD.bazel",
"@build_stack_rules_proto//cmd/gencopy:gencopy.go",
"@build_stack_rules_proto//deps:BUILD.bazel",
"@build_stack_rules_proto//docs:BUILD.bazel",
"@build_stack_rules_proto//docs/_site:BUILD.bazel",
"@build_stack_rules_proto//example:BUILD.bazel",
"@build_stack_rules_proto//example/golden:BUILD.bazel",
"@build_stack_rules_proto//example/person:BUILD.bazel",
"@build_stack_rules_proto//example/place:BUILD.bazel",
"@build_stack_rules_proto//example/routeguide:BUILD.bazel",
"@build_stack_rules_proto//example/routeguide/cc:BUILD.bazel",
"@build_stack_rules_proto//example/routeguide/closure:BUILD.bazel",
"@build_stack_rules_proto//example/routeguide/java:BUILD.bazel",
"@build_stack_rules_proto//example/routeguide/nodejs:BUILD.bazel",
"@build_stack_rules_proto//example/routeguide/scala:BUILD.bazel",
"@build_stack_rules_proto//example/thing:BUILD.bazel",
"@build_stack_rules_proto//example/toolchain/prebuilt:BUILD.bazel",
"@build_stack_rules_proto//example/toolchain/standard:BUILD.bazel",
"@build_stack_rules_proto//language/example:BUILD.bazel",
"@build_stack_rules_proto//language/example:example.go",
"@build_stack_rules_proto//language/protobuf:BUILD.bazel",
"@build_stack_rules_proto//language/protobuf/oldtestdata/gogo:BUILD.bazel",
"@build_stack_rules_proto//language/protobuf/oldtestdata/java:BUILD.bazel",
"@build_stack_rules_proto//language/protobuf:protobuf.go",
"@build_stack_rules_proto//pkg:BUILD.bazel",
"@build_stack_rules_proto//pkg/goldentest:BUILD.bazel",
"@build_stack_rules_proto//pkg/goldentest:cases.go",
"@build_stack_rules_proto//pkg/language/noop:BUILD.bazel",
"@build_stack_rules_proto//pkg/language/noop:noop.go",
"@build_stack_rules_proto//pkg/language/protobuf:BUILD.bazel",
"@build_stack_rules_proto//pkg/language/protobuf:config.go",
"@build_stack_rules_proto//pkg/language/protobuf:fix.go",
"@build_stack_rules_proto//pkg/language/protobuf:generate.go",
"@build_stack_rules_proto//pkg/language/protobuf:kinds.go",
"@build_stack_rules_proto//pkg/language/protobuf:lang.go",
"@build_stack_rules_proto//pkg/language/protobuf:override.go",
"@build_stack_rules_proto//pkg/language/protobuf:resolve.go",
"@build_stack_rules_proto//pkg/plugin/builtin:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/builtin:cpp_plugin.go",
"@build_stack_rules_proto//pkg/plugin/builtin:csharp_plugin.go",
"@build_stack_rules_proto//pkg/plugin/builtin:doc.go",
"@build_stack_rules_proto//pkg/plugin/builtin:grpc_grpc_cpp.go",
"@build_stack_rules_proto//pkg/plugin/builtin:java_plugin.go",
"@build_stack_rules_proto//pkg/plugin/builtin:js_closure_plugin.go",
"@build_stack_rules_proto//pkg/plugin/builtin:js_common_plugin.go",
"@build_stack_rules_proto//pkg/plugin/builtin:objc_plugin.go",
"@build_stack_rules_proto//pkg/plugin/builtin:php_plugin.go",
"@build_stack_rules_proto//pkg/plugin/builtin:python_plugin.go",
"@build_stack_rules_proto//pkg/plugin/builtin:ruby_plugin.go",
"@build_stack_rules_proto//pkg/plugin/gogo/protobuf:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/gogo/protobuf:protoc-gen-gogo.go",
"@build_stack_rules_proto//pkg/plugin/golang/protobuf:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/golang/protobuf:protoc-gen-go.go",
"@build_stack_rules_proto//pkg/plugin/grpc/grpc:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/grpc/grpc:protoc-gen-grpc-python.go",
"@build_stack_rules_proto//pkg/plugin/grpc/grpcgo:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/grpc/grpcgo:protoc-gen-go-grpc.go",
"@build_stack_rules_proto//pkg/plugin/grpc/grpcjava:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/grpc/grpcjava:protoc-gen-grpc-java.go",
"@build_stack_rules_proto//pkg/plugin/grpc/grpcnode:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/grpc/grpcnode:protoc-gen-grpc-node.go",
"@build_stack_rules_proto//pkg/plugin/grpcecosystem/grpcgateway:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/grpcecosystem/grpcgateway:protoc-gen-grpc-gateway.go",
"@build_stack_rules_proto//pkg/plugin/scalapb/scalapb:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/scalapb/scalapb:protoc_gen_scala.go",
"@build_stack_rules_proto//pkg/plugin/stackb/grpc_js:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/stackb/grpc_js:protoc-gen-grpc-js.go",
"@build_stack_rules_proto//pkg/plugin/stephenh/ts-proto:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugin/stephenh/ts-proto:protoc-gen-ts-proto.go",
"@build_stack_rules_proto//pkg/plugintest:BUILD.bazel",
"@build_stack_rules_proto//pkg/plugintest:case.go",
"@build_stack_rules_proto//pkg/plugintest:doc.go",
"@build_stack_rules_proto//pkg/plugintest:utils.go",
"@build_stack_rules_proto//pkg/protoc:BUILD.bazel",
"@build_stack_rules_proto//pkg/protoc:depsresolver.go",
"@build_stack_rules_proto//pkg/protoc:file.go",
"@build_stack_rules_proto//pkg/protoc:intent.go",
"@build_stack_rules_proto//pkg/protoc:language_config.go",
"@build_stack_rules_proto//pkg/protoc:language_plugin_config.go",
"@build_stack_rules_proto//pkg/protoc:language_rule.go",
"@build_stack_rules_proto//pkg/protoc:language_rule_config.go",
"@build_stack_rules_proto//pkg/protoc:other_proto_library.go",
"@build_stack_rules_proto//pkg/protoc:package.go",
"@build_stack_rules_proto//pkg/protoc:package_config.go",
"@build_stack_rules_proto//pkg/protoc:plugin.go",
"@build_stack_rules_proto//pkg/protoc:plugin_configuration.go",
"@build_stack_rules_proto//pkg/protoc:plugin_context.go",
"@build_stack_rules_proto//pkg/protoc:plugin_registry.go",
"@build_stack_rules_proto//pkg/protoc:proto_compile.go",
"@build_stack_rules_proto//pkg/protoc:proto_compiled_sources.go",
"@build_stack_rules_proto//pkg/protoc:proto_descriptor_set.go",
"@build_stack_rules_proto//pkg/protoc:proto_enum_option_collector.go",
"@build_stack_rules_proto//pkg/protoc:proto_library.go",
"@build_stack_rules_proto//pkg/protoc:protoc_configuration.go",
"@build_stack_rules_proto//pkg/protoc:registry.go",
"@build_stack_rules_proto//pkg/protoc:resolver.go",
"@build_stack_rules_proto//pkg/protoc:rewrite.go",
"@build_stack_rules_proto//pkg/protoc:rule_provider.go",
"@build_stack_rules_proto//pkg/protoc:rule_registry.go",
"@build_stack_rules_proto//pkg/protoc:ruleindex.go",
"@build_stack_rules_proto//pkg/protoc:syntaxutil.go",
"@build_stack_rules_proto//pkg/protoc:yconfig.go",
"@build_stack_rules_proto//pkg/rule/rules_cc:BUILD.bazel",
"@build_stack_rules_proto//pkg/rule/rules_cc:cc_library.go",
"@build_stack_rules_proto//pkg/rule/rules_cc:grpc_cc_library.go",
"@build_stack_rules_proto//pkg/rule/rules_cc:proto_cc_library.go",
"@build_stack_rules_proto//pkg/rule/rules_closure:BUILD.bazel",
"@build_stack_rules_proto//pkg/rule/rules_closure:closure_js_library.go",
"@build_stack_rules_proto//pkg/rule/rules_closure:grpc_closure_js_library.go",
"@build_stack_rules_proto//pkg/rule/rules_closure:proto_closure_js_library.go",
"@build_stack_rules_proto//pkg/rule/rules_go:BUILD.bazel",
"@build_stack_rules_proto//pkg/rule/rules_go:go_library.go",
"@build_stack_rules_proto//pkg/rule/rules_java:BUILD.bazel",
"@build_stack_rules_proto//pkg/rule/rules_java:grpc_java_library.go",
"@build_stack_rules_proto//pkg/rule/rules_java:java_library.go",
"@build_stack_rules_proto//pkg/rule/rules_java:proto_java_library.go",
"@build_stack_rules_proto//pkg/rule/rules_nodejs:BUILD.bazel",
"@build_stack_rules_proto//pkg/rule/rules_nodejs:grpc_nodejs_library.go",
"@build_stack_rules_proto//pkg/rule/rules_nodejs:js_library.go",
"@build_stack_rules_proto//pkg/rule/rules_nodejs:proto_nodejs_library.go",
"@build_stack_rules_proto//pkg/rule/rules_nodejs:proto_ts_library.go",
"@build_stack_rules_proto//pkg/rule/rules_python:BUILD.bazel",
"@build_stack_rules_proto//pkg/rule/rules_python:grpc_py_library.go",
"@build_stack_rules_proto//pkg/rule/rules_python:proto_py_library.go",
"@build_stack_rules_proto//pkg/rule/rules_python:py_library.go",
"@build_stack_rules_proto//pkg/rule/rules_scala:BUILD.bazel",
"@build_stack_rules_proto//pkg/rule/rules_scala:scala_library.go",
"@build_stack_rules_proto//pkg/rule/rules_scala:scala_proto_library.go",
"@build_stack_rules_proto//plugin:BUILD.bazel",
"@build_stack_rules_proto//plugin/builtin:BUILD.bazel",
"@build_stack_rules_proto//plugin/gogo/protobuf:BUILD.bazel",
"@build_stack_rules_proto//plugin/golang/protobuf:BUILD.bazel",
"@build_stack_rules_proto//plugin/grpc/grpc:BUILD.bazel",
"@build_stack_rules_proto//plugin/grpc/grpc-go:BUILD.bazel",
"@build_stack_rules_proto//plugin/grpc/grpc-java:BUILD.bazel",
"@build_stack_rules_proto//plugin/grpc/grpc-node:BUILD.bazel",
"@build_stack_rules_proto//plugin/grpc-ecosystem/grpc-gateway:BUILD.bazel",
"@build_stack_rules_proto//plugin/scalapb/scalapb:BUILD.bazel",
"@build_stack_rules_proto//plugin/stackb/grpc_js:BUILD.bazel",
"@build_stack_rules_proto//plugin/stephenh/ts-proto:BUILD.bazel",
"@build_stack_rules_proto//rules:BUILD.bazel",
"@build_stack_rules_proto//rules/cc:BUILD.bazel",
"@build_stack_rules_proto//rules/closure:BUILD.bazel",
"@build_stack_rules_proto//rules/go:BUILD.bazel",
"@build_stack_rules_proto//rules/java:BUILD.bazel",
"@build_stack_rules_proto//rules/nodejs:BUILD.bazel",
"@build_stack_rules_proto//rules/private:BUILD.bazel",
"@build_stack_rules_proto//rules/private:list_repository_tools_srcs.go",
"@build_stack_rules_proto//rules/proto:BUILD.bazel",
"@build_stack_rules_proto//rules/py:BUILD.bazel",
"@build_stack_rules_proto//rules/scala:BUILD.bazel",
"@build_stack_rules_proto//rules/ts:BUILD.bazel",
"@build_stack_rules_proto//toolchain:BUILD.bazel",
]
|
examples/quickflat/plot_make_svg.py | mvdoc/pycortex | 423 | 11136246 | """
===============================
Save a 2D static flatmap as SVG
===============================
Plot a 2D static flatmap and save it as SVG file.
**Some words on the `rechache` parameter before we begin:**
Setting the `recache=True` parameter recaches the flatmap cache located in
<filestore>/<subject>/cache. By default intermediate steps for a flatmap are
cached after the first generation to speed up the process for the future. If
any of the intermediate steps changes, the flatmap generation may fail.
`recache=True` will load these intermediate steps new.
This can be helpful if you think there is no reason that the
`quickflat.make_figure` to fail but it nevertheless fails. Try it, it's magic!
The default background is set to be a transparent image. If you want to change
that use the parameter `bgcolor`.
"""
import cortex
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1234)
# Create a random pycortex Volume
volume = cortex.Volume.random(subject='S1', xfmname='fullhead')
# Plot a flatmap with the data projected onto the surface
_ = cortex.quickflat.make_figure(volume)
plt.show()
# Save this flatmap
filename = "./my_flatmap.svg"
_ = cortex.quickflat.make_png(filename, volume, recache=False)
|
example/viewer.py | animuku/pyk4a | 186 | 11136252 | import cv2
import numpy as np
import pyk4a
from pyk4a import Config, PyK4A
def main():
k4a = PyK4A(
Config(
color_resolution=pyk4a.ColorResolution.RES_720P,
depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
synchronized_images_only=True,
)
)
k4a.start()
# getters and setters directly get and set on device
k4a.whitebalance = 4500
assert k4a.whitebalance == 4500
k4a.whitebalance = 4510
assert k4a.whitebalance == 4510
while 1:
capture = k4a.get_capture()
if np.any(capture.color):
cv2.imshow("k4a", capture.color[:, :, :3])
key = cv2.waitKey(10)
if key != -1:
cv2.destroyAllWindows()
break
k4a.stop()
if __name__ == "__main__":
main()
|
data/covering_grammar/lib/covering_grammar.py | wannaphong/wikipron | 111 | 11136271 | #!/usr/bin/env python
"""Creates covering grammar FST from TSV of correspondences."""
import argparse
import pynini
TOKEN_TYPES = ["byte", "utf8"]
def main(args: argparse.Namespace) -> None:
input_token_type = (
args.input_token_type
if args.input_token_type in TOKEN_TYPES
else pynini.SymbolTable.read_text(args.input_token_type)
)
output_token_type = (
args.output_token_type
if args.output_token_type in TOKEN_TYPES
else pynini.SymbolTable.read_text(args.output_token_type)
)
cg = pynini.string_file(
args.tsv_path,
input_token_type=input_token_type,
output_token_type=output_token_type,
)
cg.closure().optimize()
cg.write(args.fst_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--input_token_type",
default="utf8",
help="input token type or path to symbol table (default: %(default)s)",
)
parser.add_argument(
"--output_token_type",
default="utf8",
help="output token type or path to symbol table "
"(default: %(default)s)",
)
parser.add_argument("tsv_path", help="path to input TSV")
parser.add_argument("fst_path", help="path to output FST")
main(parser.parse_args())
|
videoanalyst/data/utils/misc.py | TragedyN/SiamFCpp | 737 | 11136283 | # -*- coding: utf-8 -*-
from typing import Dict
def index_data(data: Dict, idx: int):
r"""
Arguments
data: Dict
data to be indexed
idx: int
index used for indexing in data's first dimension
"""
ret = dict()
for k in data:
ret[k] = data[k][idx]
return ret
|
skyline/panorama/agent.py | datastreaming/skyline-1 | 396 | 11136288 | import logging
import sys
import traceback
from os import getpid
# from os.path import dirname, abspath, isdir
from os.path import isdir
from daemon import runner
from time import sleep
from sys import version_info
import mysql.connector
from mysql.connector import errorcode
from logging.handlers import TimedRotatingFileHandler, MemoryHandler
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, os.path.dirname(__file__))
# @modified 20191115 - Branch #3262: py3
# This prevents flake8 E402 - module level import not at top of file
if True:
import settings
from panorama import Panorama
from validate_settings import validate_settings_variables
skyline_app = 'panorama'
skyline_app_logger = skyline_app + 'Log'
logger = logging.getLogger(skyline_app_logger)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
python_version = int(version_info[0])
# Database configuration
try:
config = {'user': settings.PANORAMA_DBUSER,
'password': settings.<PASSWORD>,
'host': settings.PANORAMA_DBHOST,
'port': settings.PANORAMA_DBPORT,
'database': settings.PANORAMA_DATABASE,
'raise_on_warnings': True}
except:
print('error: failed to determine database settings from settings.py')
sys.exit(1)
class PanoramaAgent():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
self.stderr_path = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
self.pidfile_path = '%s/%s.pid' % (settings.PID_PATH, skyline_app)
self.pidfile_timeout = 5
def run(self):
logger.info('starting skyline panorama')
Panorama(getpid()).start()
while 1:
sleep(100)
if __name__ == "__main__":
"""
Start the Panorama agent.
"""
if not isdir(settings.PID_PATH):
print('pid directory does not exist at %s' % settings.PID_PATH)
sys.exit(1)
if not isdir(settings.LOG_PATH):
print('log directory does not exist at %s' % settings.LOG_PATH)
sys.exit(1)
if not isdir(settings.PANORAMA_CHECK_PATH):
print('Panorama check directory does not exist at %s' % settings.PANORAMA_CHECK_PATH)
sys.exit(1)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
handler = logging.handlers.TimedRotatingFileHandler(
logfile,
when="midnight",
interval=1,
backupCount=5)
memory_handler = logging.handlers.MemoryHandler(256,
flushLevel=logging.DEBUG,
target=handler)
handler.setFormatter(formatter)
logger.addHandler(memory_handler)
# Validate settings variables
valid_settings = validate_settings_variables(skyline_app)
if not valid_settings:
print('error :: invalid variables in settings.py - cannot start')
sys.exit(1)
# @added 20191031 - Feature #3310: gracefully handle db failure
# Branch 3262: py3
try:
start_if_no_db = settings.START_IF_NO_DB
except:
start_if_no_db = False
# Make sure mysql is available
mysql_up = False
try:
configuration_error = True
# Try connect to mysql
try:
cnx = mysql.connector.connect(**config)
configuration_error = False
mysql_up = True
cnx.close()
logger.info('database user name or password - connected OK')
except mysql.connector.Error as err:
logger.error(traceback.format_exc())
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
logger.error('error :: something is wrong with your database user name or password')
elif err.errno == errorcode.ER_BAD_DB_ERROR:
logger.error('error :: the %s database does not exist' % settings.PANORAMA_DATABASE)
else:
logger.error('error :: mysql error - %s' % str(err))
except:
try:
if configuration_error:
logger.error(traceback.format_exc())
logger.error('error :: The database is not available')
print('The database is not available')
except:
logger.error(traceback.format_exc())
logger.error('error :: The database is not available')
print('The database is not available')
# @modified 20191031 - Feature #3310: gracefully handle db failure
# Branch 3262: py3
# sys.exit(1)
if start_if_no_db:
logger.warn('warning :: mysql_up is %s but START_IF_NO_DB is %s, so starting' % (
str(mysql_up), str(start_if_no_db)))
mysql_up = True
else:
sys.exit(1)
# @added 20191031 - Feature #3310: gracefully handle db failure
# Branch 3262: py3
if start_if_no_db:
if not mysql_up:
logger.warn('warning :: mysql_up is %s but START_IF_NO_DB is %s, so starting' % (
str(mysql_up), str(start_if_no_db)))
mysql_up = True
if not mysql_up:
sys.exit(1)
panorama = PanoramaAgent()
if len(sys.argv) > 1 and sys.argv[1] == 'run':
panorama.run()
else:
daemon_runner = runner.DaemonRunner(panorama)
daemon_runner.daemon_context.files_preserve = [handler.stream]
daemon_runner.do_action()
|
crypto/storagespace/test.py | cclauss/fbctf-2019-challenges | 213 | 11136292 | <reponame>cclauss/fbctf-2019-challenges<gh_stars>100-1000
#!/usr/bin/env sage
from typing import Any, Callable, Dict, Optional, Tuple
import base64
import socket
import json
from solve import Connection
"""
Tests functionality of the server and a few sanity checks
"""
JSON = Dict[str, Any]
TestFunction = Callable[[Connection], None]
def do_request(
store: Connection,
request: JSON,
) -> str:
store.sendline(json.dumps(request, sort_keys=True))
r = store.recvuntil('\n> ')
return r[:-3]
def sign_request(
store: Connection,
request: JSON,
) -> JSON:
sign_request = {
'command': 'sign',
'params': {
'command': request['command'],
'params': request['params'],
}
}
result = do_request(store, sign_request)
signed_request = json.loads(result)
return signed_request
def do_signed(
store: Connection,
request: JSON,
) -> str:
signed_request = sign_request(store, request)
return do_request(store, signed_request)
def request(
__command: str,
**params,
) -> JSON:
return {
'command': __command,
'params': params,
}
def test_help(store: Connection) -> None:
r = do_request(store, request('help'))
assert 'help(command' in r, 'no help'
assert 'list of commands' in r, 'no list'
assert '- flag' in r, 'no flag'
def test_sign(store: Connection) -> None:
r = do_request(store, request('sign', command='help'))
try:
json.loads(r)
except json.decoder.JSONDecodeError:
assert False, 'json decode fail'
def test_flag(store: Connection) -> None:
req = request('flag')
r = do_request(store, req)
assert r == 'signature required', 'sig required'
r = do_signed(store, req)
assert 'bad command' in r
def test_info(store: Connection) -> None:
req = request('info')
r = do_request(store, req)
assert r == 'signature required', 'sig required'
r = do_signed(store, req)
assert 'curve' in r, 'no curve'
assert 'generator' in r, 'no generator'
def test_spec(store: Connection) -> None:
req = request('spec', mode='sign')
r = do_request(store, req)
assert r == 'signature required', 'sig required'
r = do_signed(store, req)
assert 'def sign' in r, 'mode=sign'
req = request('spec', mode='verify')
r = do_signed(store, req)
assert 'def verify' in r, 'mode=verify'
req = request('spec', mode='request')
r = do_signed(store, req)
assert 'base64' in r, 'mode=request'
req = request('spec', mode='all')
all_r = do_signed(store, req)
req = request('spec')
r = do_signed(store, req)
assert all_r == r, 'default behavior'
req = request('spec', mode='asdf')
r = do_signed(store, req)
assert 'please use' in r
def test_save_and_list(store: Connection) -> None:
list_req = request('list')
r = do_request(store, list_req)
assert r == 'signature required', 'sig required (list)'
r = do_signed(store, list_req)
assert r == 'fbctf', 'fbctf only'
save_req = request('save')
r = do_request(store, save_req)
assert r == 'signature required', 'sig required (save)'
r = do_signed(store, save_req)
assert 'missing' in r, 'missing both'
save_req = request('save', name='myflag')
r = do_signed(store, save_req)
assert 'missing' in r, 'missing flag'
save_req = request('save', flag='asdf')
r = do_signed(store, save_req)
assert 'missing' in r, 'missing name'
save_req = request('save', name='myflag', flag='asdf')
r = do_signed(store, save_req)
assert 'stored' in r, 'stored: '
r = do_signed(store, save_req)
assert 'overwritten' in r, 'overwrite'
r = do_signed(store, list_req)
assert r == 'fbctf\nmyflag' or r == 'myflag\nfbctf', 'list both'
def test_bad(store: Connection) -> None:
req = request('bad')
r = do_request(store, req)
assert r == 'bad command!', 'bad'
def test_caps(store: Connection) -> None:
req = request('FLAG')
r = do_request(store, req)
assert 'invalid' in r, 'FLAG (nosig): ' + r
r = do_signed(store, req)
assert 'bad' in r, 'FLAG: ' + r
req = request('FlAg')
r = do_signed(store, req)
assert 'bad' in r, 'FlAg: ' + r
req = request('iNfO')
r = do_signed(store, req)
assert 'bad' in r, 'iNfO: ' + r
def test_badsig(store: Connection) -> None:
req = request('info')
signed = sign_request(store, req)
sig = base64.b64decode(signed['sig']).decode('utf-8').split('|')
r, s = list(map(int, sig))[:2]
modified_request = request('info')
modified_request['sig'] = base64.b64encode(bytes(str(r) + '|' + str(s), 'utf-8')).decode('utf-8')
res = do_request(store, modified_request)
assert 'curve' in res, 'normal failed'
modified_request = request('info')
modified_request['sig'] = base64.b64encode(bytes(str(r) + '|' + str(s+1), 'utf-8')).decode('utf-8')
res = do_request(store, modified_request)
assert 'invalid sig' in res, 'modified s'
modified_request = request('info')
modified_request['sig'] = base64.b64encode(bytes(str(r) + '|' + str(0), 'utf-8')).decode('utf-8')
res = do_request(store, modified_request)
assert 'invalid sig' in res, 's = 0'
modified_request = request('info')
modified_request['sig'] = base64.b64encode(bytes(str(r+1) + '|' + str(s), 'utf-8')).decode('utf-8')
res = do_request(store, modified_request)
assert 'invalid sig' in res, 'modified r'
modified_request = request('info')
modified_request['sig'] = base64.b64encode(bytes(str(0) + '|' + str(s), 'utf-8')).decode('utf-8')
res = do_request(store, modified_request)
assert 'invalid sig' in res, 'r = 0'
modified_request = request('info')
modified_request['sig'] = base64.b64encode(bytes(str(0) + '|' + str(0), 'utf-8')).decode('utf-8')
res = do_request(store, modified_request)
assert 'invalid sig' in res, 'r = s = 0'
modified_request = request('info')
modified_request['sig'] = base64.b64encode(b'asdf').decode('utf-8')
res = do_request(store, modified_request)
assert 'invalid sig' in res, 'sig=asdf'
def do_test(
test: TestFunction,
store: Connection,
) -> bool:
try:
test(store)
return True
except AssertionError as e:
print(e)
return False
def do_all_tests() -> Tuple[int, int]:
store = Connection()
prompt = store.recvuntil('\n> ')
if 'please provide S' in prompt:
# Need to do proof of work
from collections import namedtuple
from proof import cmd_solve
lines = prompt.split('\n')
prefix = lines[1].split('"')[1]
challenge = lines[4].split('"')[1]
if len(lines) > 5 and 'len(S)' in lines[5]:
length = int(lines[5].split(' == ')[-1])
else:
length = 20 # rando default
print(f'doing proof of work, {prefix} -> {challenge} (len {length})')
Args = namedtuple('Args', ['prefix', 'challenge', 'length'])
proof = cmd_solve(Args(prefix, challenge, length))
print(f'solved : {proof}')
store.sendline(proof)
check = store.recvline()
if 'invalid' in check or 'timeout' in check:
print('proof of work failed!')
exit(1)
prompt = check + store.recvuntil('\n> ')
success = 0
failure = 0
test_names = [g for g in globals().keys() if g.startswith('test_')]
for name in test_names:
func = globals()[name]
if callable(func):
if do_test(func, store): # type: ignore
print(f'{name}: pass')
success += 1
else:
print(f'{name}: fail')
failure += 1
return (success, failure)
if __name__ == '__main__':
success, failure = do_all_tests()
total = success + failure
print(f'{success} / {total} tests pass')
|
Hackerrank_problems/kangaroo/solution3.py | gbrls/CompetitiveCode | 205 | 11136295 | '''
LOGIC:
First, we compare the first kangaroo's location with the second one.
if same then they will meet at the starting point.
Second, if first kangaroo's location and rate bothe are heigher or visa-verse
then there is no chance of them meeting at same place.
Else if their location are different and theit rates are same
thene there is no chance of them meeting at same place.
'''
x1,v1,x2,v2 = map(int,input().split())
a = 0
while True:
k = x1 + a*v1
m = x2 + a*v2
if k==m:
print("YES")
break
elif k>m and v1>v2:
print("NO")
break
elif k<m and v2>v1:
print("NO")
break
elif k>m or k<m and v1==v2:
print("NO")
break
a=a+1 |
addons/pythonscript_repl/input_box.py | blueflamegames/godot-python | 1,323 | 11136302 | <reponame>blueflamegames/godot-python
from godot import exposed, InputEventKey, KEY_UP, KEY_DOWN, LineEdit
@exposed(tool=True)
class InputBox(LineEdit):
def _enter_tree(self):
self.repl_node = self.get_parent().get_parent()
def _gui_input(self, event):
if isinstance(event, InputEventKey) and event.pressed:
if event.scancode == KEY_UP:
self.repl_node.up_pressed()
self.accept_event()
elif event.scancode == KEY_DOWN:
self.repl_node.down_pressed()
self.accept_event()
|
scipy/linalg/_decomp_svd.py | jake-is-ESD-protected/scipy | 9,095 | 11136313 | <filename>scipy/linalg/_decomp_svd.py
"""SVD decomposition functions."""
import numpy
from numpy import zeros, r_, diag, dot, arccos, arcsin, where, clip
# Local imports.
from ._misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs, _compute_lwork
from ._decomp import _asarray_validated
__all__ = ['svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space']
def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False,
check_finite=True, lapack_driver='gesdd'):
"""
Singular Value Decomposition.
Factorizes the matrix `a` into two unitary matrices ``U`` and ``Vh``, and
a 1-D array ``s`` of singular values (real, non-negative) such that
``a == U @ S @ Vh``, where ``S`` is a suitably shaped matrix of zeros with
main diagonal ``s``.
Parameters
----------
a : (M, N) array_like
Matrix to decompose.
full_matrices : bool, optional
If True (default), `U` and `Vh` are of shape ``(M, M)``, ``(N, N)``.
If False, the shapes are ``(M, K)`` and ``(K, N)``, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether to compute also ``U`` and ``Vh`` in addition to ``s``.
Default is True.
overwrite_a : bool, optional
Whether to overwrite `a`; may improve performance.
Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : {'gesdd', 'gesvd'}, optional
Whether to use the more efficient divide-and-conquer approach
(``'gesdd'``) or general rectangular approach (``'gesvd'``)
to compute the SVD. MATLAB and Octave use the ``'gesvd'`` approach.
Default is ``'gesdd'``.
.. versionadded:: 0.18
Returns
-------
U : ndarray
Unitary matrix having left singular vectors as columns.
Of shape ``(M, M)`` or ``(M, K)``, depending on `full_matrices`.
s : ndarray
The singular values, sorted in non-increasing order.
Of shape (K,), with ``K = min(M, N)``.
Vh : ndarray
Unitary matrix having right singular vectors as rows.
Of shape ``(N, N)`` or ``(K, N)`` depending on `full_matrices`.
For ``compute_uv=False``, only ``s`` is returned.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
svdvals : Compute singular values of a matrix.
diagsvd : Construct the Sigma matrix, given the vector s.
Examples
--------
>>> from scipy import linalg
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> m, n = 9, 6
>>> a = rng.standard_normal((m, n)) + 1.j*rng.standard_normal((m, n))
>>> U, s, Vh = linalg.svd(a)
>>> U.shape, s.shape, Vh.shape
((9, 9), (6,), (6, 6))
Reconstruct the original matrix from the decomposition:
>>> sigma = np.zeros((m, n))
>>> for i in range(min(m, n)):
... sigma[i, i] = s[i]
>>> a1 = np.dot(U, np.dot(sigma, Vh))
>>> np.allclose(a, a1)
True
Alternatively, use ``full_matrices=False`` (notice that the shape of
``U`` is then ``(m, n)`` instead of ``(m, m)``):
>>> U, s, Vh = linalg.svd(a, full_matrices=False)
>>> U.shape, s.shape, Vh.shape
((9, 6), (6,), (6, 6))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, Vh)))
True
>>> s2 = linalg.svd(a, compute_uv=False)
>>> np.allclose(s, s2)
True
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m, n = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if not isinstance(lapack_driver, str):
raise TypeError('lapack_driver must be a string')
if lapack_driver not in ('gesdd', 'gesvd'):
raise ValueError('lapack_driver must be "gesdd" or "gesvd", not "%s"'
% (lapack_driver,))
funcs = (lapack_driver, lapack_driver + '_lwork')
gesXd, gesXd_lwork = get_lapack_funcs(funcs, (a1,), ilp64='preferred')
# compute optimal lwork
lwork = _compute_lwork(gesXd_lwork, a1.shape[0], a1.shape[1],
compute_uv=compute_uv, full_matrices=full_matrices)
# perform decomposition
u, s, v, info = gesXd(a1, compute_uv=compute_uv, lwork=lwork,
full_matrices=full_matrices, overwrite_a=overwrite_a)
if info > 0:
raise LinAlgError("SVD did not converge")
if info < 0:
raise ValueError('illegal value in %dth argument of internal gesdd'
% -info)
if compute_uv:
return u, s, v
else:
return s
def svdvals(a, overwrite_a=False, check_finite=True):
"""
Compute singular values of a matrix.
Parameters
----------
a : (M, N) array_like
Matrix to decompose.
overwrite_a : bool, optional
Whether to overwrite `a`; may improve performance.
Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
s : (min(M, N),) ndarray
The singular values, sorted in decreasing order.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
``svdvals(a)`` only differs from ``svd(a, compute_uv=False)`` by its
handling of the edge case of empty ``a``, where it returns an
empty sequence:
>>> a = np.empty((0, 2))
>>> from scipy.linalg import svdvals
>>> svdvals(a)
array([], dtype=float64)
See Also
--------
svd : Compute the full singular value decomposition of a matrix.
diagsvd : Construct the Sigma matrix, given the vector s.
Examples
--------
>>> from scipy.linalg import svdvals
>>> m = np.array([[1.0, 0.0],
... [2.0, 3.0],
... [1.0, 1.0],
... [0.0, 2.0],
... [1.0, 0.0]])
>>> svdvals(m)
array([ 4.28091555, 1.63516424])
We can verify the maximum singular value of `m` by computing the maximum
length of `m.dot(u)` over all the unit vectors `u` in the (x,y) plane.
We approximate "all" the unit vectors with a large sample. Because
of linearity, we only need the unit vectors with angles in [0, pi].
>>> t = np.linspace(0, np.pi, 2000)
>>> u = np.array([np.cos(t), np.sin(t)])
>>> np.linalg.norm(m.dot(u), axis=0).max()
4.2809152422538475
`p` is a projection matrix with rank 1. With exact arithmetic,
its singular values would be [1, 0, 0, 0].
>>> v = np.array([0.1, 0.3, 0.9, 0.3])
>>> p = np.outer(v, v)
>>> svdvals(p)
array([ 1.00000000e+00, 2.02021698e-17, 1.56692500e-17,
8.15115104e-34])
The singular values of an orthogonal matrix are all 1. Here, we
create a random orthogonal matrix by using the `rvs()` method of
`scipy.stats.ortho_group`.
>>> from scipy.stats import ortho_group
>>> orth = ortho_group.rvs(4)
>>> svdvals(orth)
array([ 1., 1., 1., 1.])
"""
a = _asarray_validated(a, check_finite=check_finite)
if a.size:
return svd(a, compute_uv=0, overwrite_a=overwrite_a,
check_finite=False)
elif len(a.shape) != 2:
raise ValueError('expected matrix')
else:
return numpy.empty(0)
def diagsvd(s, M, N):
"""
Construct the sigma matrix in SVD from singular values and size M, N.
Parameters
----------
s : (M,) or (N,) array_like
Singular values
M : int
Size of the matrix whose singular values are `s`.
N : int
Size of the matrix whose singular values are `s`.
Returns
-------
S : (M, N) ndarray
The S-matrix in the singular value decomposition
See Also
--------
svd : Singular value decomposition of a matrix
svdvals : Compute singular values of a matrix.
Examples
--------
>>> from scipy.linalg import diagsvd
>>> vals = np.array([1, 2, 3]) # The array representing the computed svd
>>> diagsvd(vals, 3, 4)
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0]])
>>> diagsvd(vals, 4, 3)
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3],
[0, 0, 0]])
"""
part = diag(s)
typ = part.dtype.char
MorN = len(s)
if MorN == M:
return r_['-1', part, zeros((M, N-M), typ)]
elif MorN == N:
return r_[part, zeros((M-N, N), typ)]
else:
raise ValueError("Length of s must be M or N.")
# Orthonormal decomposition
def orth(A, rcond=None):
"""
Construct an orthonormal basis for the range of A using SVD
Parameters
----------
A : (M, N) array_like
Input array
rcond : float, optional
Relative condition number. Singular values ``s`` smaller than
``rcond * max(s)`` are considered zero.
Default: floating point eps * max(M,N).
Returns
-------
Q : (M, K) ndarray
Orthonormal basis for the range of A.
K = effective rank of A, as determined by rcond
See Also
--------
svd : Singular value decomposition of a matrix
null_space : Matrix null space
Examples
--------
>>> from scipy.linalg import orth
>>> A = np.array([[2, 0, 0], [0, 5, 0]]) # rank 2 array
>>> orth(A)
array([[0., 1.],
[1., 0.]])
>>> orth(A.T)
array([[0., 1.],
[1., 0.],
[0., 0.]])
"""
u, s, vh = svd(A, full_matrices=False)
M, N = u.shape[0], vh.shape[1]
if rcond is None:
rcond = numpy.finfo(s.dtype).eps * max(M, N)
tol = numpy.amax(s) * rcond
num = numpy.sum(s > tol, dtype=int)
Q = u[:, :num]
return Q
def null_space(A, rcond=None):
"""
Construct an orthonormal basis for the null space of A using SVD
Parameters
----------
A : (M, N) array_like
Input array
rcond : float, optional
Relative condition number. Singular values ``s`` smaller than
``rcond * max(s)`` are considered zero.
Default: floating point eps * max(M,N).
Returns
-------
Z : (N, K) ndarray
Orthonormal basis for the null space of A.
K = dimension of effective null space, as determined by rcond
See Also
--------
svd : Singular value decomposition of a matrix
orth : Matrix range
Examples
--------
1-D null space:
>>> from scipy.linalg import null_space
>>> A = np.array([[1, 1], [1, 1]])
>>> ns = null_space(A)
>>> ns * np.sign(ns[0,0]) # Remove the sign ambiguity of the vector
array([[ 0.70710678],
[-0.70710678]])
2-D null space:
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> B = rng.random((3, 5))
>>> Z = null_space(B)
>>> Z.shape
(5, 2)
>>> np.allclose(B.dot(Z), 0)
True
The basis vectors are orthonormal (up to rounding error):
>>> Z.T.dot(Z)
array([[ 1.00000000e+00, 6.92087741e-17],
[ 6.92087741e-17, 1.00000000e+00]])
"""
u, s, vh = svd(A, full_matrices=True)
M, N = u.shape[0], vh.shape[1]
if rcond is None:
rcond = numpy.finfo(s.dtype).eps * max(M, N)
tol = numpy.amax(s) * rcond
num = numpy.sum(s > tol, dtype=int)
Q = vh[num:,:].T.conj()
return Q
def subspace_angles(A, B):
r"""
Compute the subspace angles between two matrices.
Parameters
----------
A : (M, N) array_like
The first input array.
B : (M, K) array_like
The second input array.
Returns
-------
angles : ndarray, shape (min(N, K),)
The subspace angles between the column spaces of `A` and `B` in
descending order.
See Also
--------
orth
svd
Notes
-----
This computes the subspace angles according to the formula
provided in [1]_. For equivalence with MATLAB and Octave behavior,
use ``angles[0]``.
.. versionadded:: 1.0
References
----------
.. [1] <NAME>, <NAME> (2002) Principal Angles between Subspaces
in an A-Based Scalar Product: Algorithms and Perturbation
Estimates. SIAM J. Sci. Comput. 23:2008-2040.
Examples
--------
An Hadamard matrix, which has orthogonal columns, so we expect that
the suspace angle to be :math:`\frac{\pi}{2}`:
>>> from numpy.random import default_rng
>>> from scipy.linalg import hadamard, subspace_angles
>>> rng = default_rng()
>>> H = hadamard(4)
>>> print(H)
[[ 1 1 1 1]
[ 1 -1 1 -1]
[ 1 1 -1 -1]
[ 1 -1 -1 1]]
>>> np.rad2deg(subspace_angles(H[:, :2], H[:, 2:]))
array([ 90., 90.])
And the subspace angle of a matrix to itself should be zero:
>>> subspace_angles(H[:, :2], H[:, :2]) <= 2 * np.finfo(float).eps
array([ True, True], dtype=bool)
The angles between non-orthogonal subspaces are in between these extremes:
>>> x = rng.standard_normal((4, 3))
>>> np.rad2deg(subspace_angles(x[:, :2], x[:, [2]]))
array([ 55.832]) # random
"""
# Steps here omit the U and V calculation steps from the paper
# 1. Compute orthonormal bases of column-spaces
A = _asarray_validated(A, check_finite=True)
if len(A.shape) != 2:
raise ValueError('expected 2D array, got shape %s' % (A.shape,))
QA = orth(A)
del A
B = _asarray_validated(B, check_finite=True)
if len(B.shape) != 2:
raise ValueError('expected 2D array, got shape %s' % (B.shape,))
if len(B) != len(QA):
raise ValueError('A and B must have the same number of rows, got '
'%s and %s' % (QA.shape[0], B.shape[0]))
QB = orth(B)
del B
# 2. Compute SVD for cosine
QA_H_QB = dot(QA.T.conj(), QB)
sigma = svdvals(QA_H_QB)
# 3. Compute matrix B
if QA.shape[1] >= QB.shape[1]:
B = QB - dot(QA, QA_H_QB)
else:
B = QA - dot(QB, QA_H_QB.T.conj())
del QA, QB, QA_H_QB
# 4. Compute SVD for sine
mask = sigma ** 2 >= 0.5
if mask.any():
mu_arcsin = arcsin(clip(svdvals(B, overwrite_a=True), -1., 1.))
else:
mu_arcsin = 0.
# 5. Compute the principal angles
# with reverse ordering of sigma because smallest sigma belongs to largest
# angle theta
theta = where(mask, mu_arcsin, arccos(clip(sigma[::-1], -1., 1.)))
return theta
|
Algo and DSA/LeetCode-Solutions-master/Python/graph-connectivity-with-threshold.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11136364 | <reponame>Sourav692/FAANG-Interview-Preparation<filename>Algo and DSA/LeetCode-Solutions-master/Python/graph-connectivity-with-threshold.py<gh_stars>1000+
# Time: O((nlogn + q) * α(n)) ~= O(nlogn + q)
# Space: O(n)
class UnionFind(object): # Time: O(n * α(n)), Space: O(n)
def __init__(self, n):
self.set = range(n)
self.rank = [0]*n
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
self.set[stk.pop()] = x
return x
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root == y_root:
return False
if self.rank[x_root] < self.rank[y_root]: # union by rank
self.set[x_root] = y_root
elif self.rank[x_root] > self.rank[y_root]:
self.set[y_root] = x_root
else:
self.set[y_root] = x_root
self.rank[x_root] += 1
return True
class Solution(object):
def areConnected(self, n, threshold, queries):
"""
:type n: int
:type threshold: int
:type queries: List[List[int]]
:rtype: List[bool]
"""
union_find = UnionFind(n)
for i in xrange(threshold+1, n+1):
# https://stackoverflow.com/questions/25905118/finding-big-o-of-the-harmonic-series
# sum of harmonic series is O(logn)
for j in xrange(2*i, n+1, i): # step by i
union_find.union_set(i-1, j-1)
return [union_find.find_set(q[0]-1) == union_find.find_set(q[1]-1) for q in queries]
|
uproot3/interp/objects.py | NiclasEich/uproot3 | 320 | 11136380 | <reponame>NiclasEich/uproot3
#!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot3/blob/master/LICENSE
from __future__ import absolute_import
import copy
import struct
import numbers
import numpy
import uproot3.rootio
import uproot3.interp.interp
import uproot3.interp.numerical
import uproot3.interp.jagged
from uproot3._util import _tobytes
class SimpleArray(object):
def __init__(self, cls):
self.cls = cls
@property
def __name__(self):
return "SimpleArray"
def __repr__(self):
if isinstance(self.cls, type):
return "SimpleArray({0})".format(self.cls.__name__)
else:
return "SimpleArray({0})".format(repr(self.cls))
def read(self, source, cursor, context, parent):
out = []
while True:
if hasattr(source, "_source") and cursor.index >= len(source._source):
return out
try:
out.append(self.cls.read(source, cursor, context, parent))
except IndexError:
return out
class STLVector(object):
def __init__(self, cls):
self.cls = cls
@property
def __name__(self):
return "STLVector"
def __repr__(self):
if isinstance(self.cls, type):
return "STLVector({0})".format(self.cls.__name__)
else:
return "STLVector({0})".format(repr(self.cls))
_format1 = struct.Struct(">i")
def read(self, source, cursor, context, parent):
if hasattr(source, "_source") and len(source._source) == 0:
return []
numitems = cursor.field(source, self._format1)
if isinstance(self.cls, uproot3.interp.numerical.asdtype):
out = cursor.array(source, numitems, self.cls.fromdtype)
if out.dtype != self.cls.todtype:
out = out.astype(self.cls.todtype)
return list(out)
else:
out = [None] * numitems
for i in range(numitems):
out[i] = self.cls.read(source, cursor, context, parent)
return out
class STLMap(object):
def __init__(self, keycls, valcls):
self.keycls = keycls
self.valcls = valcls
@property
def __name__(self):
return "STLMap"
def __repr__(self):
key = self.keycls.__name__ if isinstance(self.keycls, type) else repr(self.keycls)
val = self.valcls.__name__ if isinstance(self.valcls, type) else repr(self.valcls)
return "STLMap({0}, {1})".format(key, val)
_format1 = struct.Struct(">i")
def read(self, source, cursor, context, parent):
numitems = cursor.field(source, self._format1)
out = {}
for i in range(numitems):
if isinstance(self.keycls, uproot3.interp.numerical.asdtype):
key = cursor.array(source, 1, self.keycls.fromdtype)
if key.dtype != self.keycls.todtype:
key = key.astype(self.keycls.todtype)
key = key[0]
else:
key = self.keycls.read(source, cursor, context, parent)
if isinstance(self.valcls, uproot3.interp.numerical.asdtype):
val = cursor.array(source, 1, self.valcls.fromdtype)
if val.dtype != self.valcls.todtype:
val = val.astype(self.valcls.todtype)
val = val[0]
else:
val = self.valcls.read(source, cursor, context, parent)
out[key] = val
return out
class STLString(object):
def __init__(self, awkward0=None):
if awkward0 is None:
awkward0 = uproot3.interp.interp.Interpretation.awkward0
self.awkward0 = awkward0
@property
def __name__(self):
return "STLString"
def __repr__(self):
return "STLString()"
_format1 = struct.Struct("B")
_format2 = struct.Struct(">i")
def read(self, source, cursor, context, parent):
numitems = cursor.field(source, self._format1)
if numitems == 255:
numitems = cursor.field(source, self._format2)
return _tobytes(cursor.array(source, numitems, self.awkward0.ObjectArray.CHARTYPE))
class Pointer(object):
def __init__(self, cls):
self.cls = cls
@property
def __name__(self):
return "Pointer"
def __repr__(self):
if isinstance(self.cls, type):
return "Pointer({0})".format(self.cls.__name__)
else:
return "Pointer({0})".format(repr(self.cls))
def read(self, source, cursor, context, parent):
return uproot3.rootio._readobjany(source, cursor, context, parent)
_format1 = struct.Struct(">II")
class astable(uproot3.interp.interp.Interpretation):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (uproot3.interp.interp.Interpretation.__metaclass__,), {})
def __init__(self, content):
if not isinstance(content, uproot3.interp.numerical.asdtype) or content.todtype.names is None or len(content.todtype.names) == 0:
raise TypeError("astable must be given a recarray dtype")
self.content = content
@property
def itemsize(self):
return self.content.itemsize
def __repr__(self):
dtype, shape = uproot3.interp.numerical._dtypeshape(self.content.todtype)
return "astable({0})".format(repr(self.content.to(self.awkward0.util.numpy.dtype([(n, dtype[n]) for n in dtype.names if not n.startswith(" ")]), shape)))
def tonumpy(self):
return self.content
@property
def identifier(self):
dtype, shape = uproot3.interp.numerical._dtypeshape(self.content.todtype)
return "astable({0})".format(self.content.identifier)
@property
def type(self):
dtype, shape = uproot3.interp.numerical._dtypeshape(self.content.todtype)
fields = None
for n in dtype.names:
if fields is None:
fields = self.awkward0.type.ArrayType(n, dtype[n])
else:
fields = fields & self.awkward0.type.ArrayType(n, dtype[n])
if shape == ():
return fields
else:
return self.awkward0.type.ArrayType(*(shape + (fields,)))
def empty(self):
return self.awkward0.Table.fromrec(self.content.empty())
def compatible(self, other):
return isinstance(other, astable) and self.content.compatible(other.content)
def numitems(self, numbytes, numentries):
return self.content.numitems(numbytes, numentries)
def source_numitems(self, source):
return self.content.source_numitems(source)
def fromroot(self, data, byteoffsets, local_entrystart, local_entrystop, keylen):
return self.content.fromroot(data, byteoffsets, local_entrystart, local_entrystop, keylen)
def destination(self, numitems, numentries):
return self.content.destination(numitems, numentries)
def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop):
return self.content.fill(source, destination, itemstart, itemstop, entrystart, entrystop)
def clip(self, destination, itemstart, itemstop, entrystart, entrystop):
return self.content.clip(destination, itemstart, itemstop, entrystart, entrystop)
def finalize(self, destination, branch):
out = self.awkward0.Table.fromrec(self.content.finalize(destination, branch))
if self.debug_reading:
print("reading {0}".format(repr(out)))
return out
class asobj(uproot3.interp.interp.Interpretation):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (uproot3.interp.interp.Interpretation.__metaclass__,), {})
def __init__(self, content, cls):
self.content = content
self.cls = cls
@property
def itemsize(self):
return self.content.itemsize
def __repr__(self):
return "asobj(<{0}.{1}>)".format(self.cls.__module__, self.cls.__name__)
@property
def identifier(self):
return "asobj({0},{1}.{2})".format(self.content.identifier, self.cls.__module__, self.cls.__name__)
@property
def type(self):
return self.cls
def empty(self):
return self.content.empty()
def compatible(self, other):
return isinstance(other, asobj) and self.cls.__name__ == other.cls.__name__
def numitems(self, numbytes, numentries):
return self.content.numitems(numbytes, numentries)
def source_numitems(self, source):
return self.content.source_numitems(source)
def fromroot(self, data, byteoffsets, local_entrystart, local_entrystop, keylen):
return self.content.fromroot(data, byteoffsets, local_entrystart, local_entrystop, keylen)
def destination(self, numitems, numentries):
return self.content.destination(numitems, numentries)
def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop):
return self.content.fill(source, destination, itemstart, itemstop, entrystart, entrystop)
def clip(self, destination, itemstart, itemstop, entrystart, entrystop):
return self.content.clip(destination, itemstart, itemstop, entrystart, entrystop)
def finalize(self, destination, branch):
if self.cls._arraymethods is None:
out = self.awkward0.ObjectArray(self.content.finalize(destination, branch), self.cls._fromrow)
else:
cls = self.awkward0.Methods.mixin(self.cls._arraymethods, self.awkward0.ObjectArray)
out = cls.__new__(cls)
out._initObjectArray(self.content.finalize(destination, branch))
if self.debug_reading:
print("reading {0}".format(repr(out)))
return out
class _variable(uproot3.interp.interp.Interpretation):
def __init__(self, content, generator, *args, **kwargs):
self.content = content
self.generator = generator
self.args = args
self.kwargs = kwargs
def __repr__(self):
return "_variable({0}, {1}{2}{3})".format(repr(self.content), self.generator, "".join(", " + repr(x) for x in self.args), "".join(", {0}={1}".format(n, repr(x)) for n, x in self.kwargs.items()))
@property
def identifier(self):
return "_variable({0},{1}{2}{3})".format(self.content.identifier, self.generator, "".join("," + repr(x) for x in self.args), "".join(",{0}={1}".format(n, repr(self.kwargs[n])) for n in sorted(self.kwargs)))
@property
def type(self):
return self.generator
def empty(self):
return self.awkward0.ObjectArray(self.content.empty(), self.generator, *self.args, **self.kwargs)
def compatible(self, other):
return isinstance(other, _variable) and self.content.compatible(other) and self.generator == other.generator and self.args == other.args and self.kwargs == other.kwargs
def numitems(self, numbytes, numentries):
return self.content.numitems(numbytes, numentries)
def source_numitems(self, source):
return self.content.source_numitems(source)
def fromroot(self, data, byteoffsets, local_entrystart, local_entrystop, keylen):
return self.content.fromroot(data, byteoffsets, local_entrystart, local_entrystop, keylen)
def destination(self, numitems, numentries):
return self.content.destination(numitems, numentries)
def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop):
self.content.fill(source, destination, itemstart, itemstop, entrystart, entrystop)
def clip(self, destination, itemstart, itemstop, entrystart, entrystop):
return self.content.clip(destination, itemstart, itemstop, entrystart, entrystop)
def finalize(self, destination, branch):
out = self.awkward0.ObjectArray(self.content.finalize(destination, branch), self.generator, *self.args, **self.kwargs)
if self.debug_reading:
print("reading {0}".format(repr(out)))
return out
class _variable_withoffsets(_variable):
def fromroot(self, data, byteoffsets, local_entrystart, local_entrystop, keylen):
out = self.content.fromroot(data, byteoffsets, local_entrystart, local_entrystop, keylen)
out.byteoffsets = byteoffsets[local_entrystart:local_entrystop] + keylen + self.content.skipbytes
return out
def destination(self, numitems, numentries):
out = self.content.destination(numitems, numentries)
out.byteoffsets = self.awkward0.numpy.empty(numentries, dtype=self.awkward0.numpy.int32)
return out
def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop):
self.content.fill(source, destination, itemstart, itemstop, entrystart, entrystop)
destination.byteoffsets[entrystart:entrystop] = source.byteoffsets
def clip(self, destination, itemstart, itemstop, entrystart, entrystop):
out = self.content.clip(destination, itemstart, itemstop, entrystart, entrystop)
out.byteoffsets = destination.byteoffsets[entrystart:entrystop]
return out
def finalize(self, destination, branch):
out = self.awkward0.ObjectArray(JaggedWithByteOffsets(self.content.finalize(destination, branch), destination.byteoffsets), self.generator, *self.args, **self.kwargs)
if self.debug_reading:
print("reading {0}".format(repr(out)))
return out
class JaggedWithByteOffsets(object):
def __init__(self, jagged, byteoffsets):
self.jagged = jagged
self.byteoffsets = byteoffsets
def __len__(self):
return len(self.jagged)
def __getitem__(self, where):
return self.jagged[where], -self.byteoffsets[where]
class asgenobj(_variable_withoffsets):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (_variable.__metaclass__,), {})
class _Wrapper(object):
def __init__(self, cls, context):
self.cls = cls
self.context = context
def __call__(self, arg):
bytes, origin = arg
source = uproot3.source.source.Source(bytes)
cursor = uproot3.source.cursor.Cursor(0, origin=origin)
return self.cls.read(source, cursor, self.context, None)
def __repr__(self):
if isinstance(self.cls, type):
return self.cls.__name__
else:
return repr(self.cls)
def __init__(self, cls, context, skipbytes):
super(asgenobj, self).__init__(uproot3.interp.jagged.asjagged(uproot3.interp.numerical.asdtype(self.awkward0.ObjectArray.CHARTYPE), skipbytes=skipbytes), asgenobj._Wrapper(cls, context))
def speedbump(self, value):
out = copy.copy(self)
out.generator = copy.copy(self.generator)
out.generator.context = copy.copy(out.generator.context)
out.generator.context.speedbump = value
return out
def compatible(self, other):
return isinstance(other, asgenobj) and self.generator.cls.__name__ == other.generator.cls.__name__
def __repr__(self):
return "asgenobj({0})".format(self.generator)
class asstring(_variable):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (_variable.__metaclass__,), {})
def __init__(self, skipbytes=1):
super(asstring, self).__init__(uproot3.interp.jagged.asjagged(uproot3.interp.numerical.asdtype(self.awkward0.ObjectArray.CHARTYPE), skipbytes=skipbytes), lambda array: _tobytes(array))
def __repr__(self):
return "asstring({0})".format("" if self.content.skipbytes == 1 else repr(self.content.skipbytes))
@property
def identifier(self):
return "asstring({0})".format("" if self.content.skipbytes == 1 else repr(self.content.skipbytes))
def compatible(self, other):
return isinstance(other, asstring)
|
pydgin/sim.py | kevinyuan/pydgin | 159 | 11136383 | #=======================================================================
# sim.py
#=======================================================================
# This is the common top-level simulator. ISA implementations can use
# various hooks to configure the behavior.
import os
import sys
# ensure we know where the pypy source code is
# XXX: removed the dependency to PYDGIN_PYPY_SRC_DIR because rpython
# libraries are much slower than native python when running on an
# interpreter. So unless the user have added rpython source to their
# PYTHONPATH, we should use native python.
#try:
# sys.path.append( os.environ['PYDGIN_PYPY_SRC_DIR'] )
#except KeyError as e:
# print "NOTE: PYDGIN_PYPY_SRC_DIR not defined, using pure python " \
# "implementation"
from pydgin.debug import Debug, pad, pad_hex
from pydgin.misc import FatalError, NotImplementedInstError
from pydgin.jit import JitDriver, hint, set_user_param, set_param
def jitpolicy(driver):
from rpython.jit.codewriter.policy import JitPolicy
return JitPolicy()
#-------------------------------------------------------------------------
# Sim
#-------------------------------------------------------------------------
# Abstract simulator class
class Sim( object ):
def __init__( self, arch_name_human, arch_name="", jit_enabled=False ):
# the human-friendly architecture name can contain large caps, special
# characters etc.
self.arch_name_human = arch_name_human
if arch_name == "":
self.arch_name = arch_name_human.lower()
else:
self.arch_name = arch_name
self.jit_enabled = jit_enabled
if jit_enabled:
self.jitdriver = JitDriver( greens =['pc',],
reds = ['max_insts', 'state', 'sim',],
virtualizables =['state',],
get_printable_location=self.get_location,
)
# Set the default trace limit here. Different ISAs can override this
# value if necessary
self.default_trace_limit = 400000
self.max_insts = 0
#-----------------------------------------------------------------------
# decode
#-----------------------------------------------------------------------
# This needs to be implemented in the child class
def decode( self, bits ):
raise NotImplementedError()
#-----------------------------------------------------------------------
# hooks provided for isa implementations
#-----------------------------------------------------------------------
def pre_execute( self ):
pass
def post_execute( self ):
pass
#-----------------------------------------------------------------------
# init_state
#-----------------------------------------------------------------------
# This needs to be implemented in the child class
def init_state( self, exe_file, exe_name, run_argv, testbin ):
raise NotImplementedError()
#-----------------------------------------------------------------------
# help message
#-----------------------------------------------------------------------
# the help message to display on --help
help_message = """
Pydgin %s Instruction Set Simulator
usage: %s <args> <sim_exe> <sim_args>
<sim_exe> the executable to be simulated
<sim_args> arguments to be passed to the simulated executable
<args> the following optional arguments are supported:
--help,-h Show this message and exit
--test Run in testing mode (for running asm tests)
--env,-e <NAME>=<VALUE>
Set an environment variable to be passed to the
simulated program. Can use multiple --env flags to set
multiple environment variables.
--debug,-d <flags>[:<start_after>]
Enable debug flags in a comma-separated form (e.g.
"--debug syscalls,insts"). If provided, debugs starts
after <start_after> cycles. The following flags are
supported:
insts cycle-by-cycle instructions
rf register file accesses
mem memory accesses
regdump register dump
syscalls syscall information
bootstrap initial stack and register state
--max-insts <i> Run until the maximum number of instructions
--jit <flags> Set flags to tune the JIT (see
rpython.rlib.jit.PARAMETER_DOCS)
"""
#-----------------------------------------------------------------------
# get_location
#-----------------------------------------------------------------------
# for debug printing in PYPYLOG
@staticmethod
def get_location( pc ):
# TODO: add the disassembly of the instruction here as well
return "pc: %x" % pc
#-----------------------------------------------------------------------
# run
#-----------------------------------------------------------------------
def run( self ):
self = hint( self, promote=True )
s = self.state
max_insts = self.max_insts
jitdriver = self.jitdriver
while s.running:
jitdriver.jit_merge_point(
pc = s.fetch_pc(),
max_insts = max_insts,
state = s,
sim = self,
)
# constant-fold pc and mem
pc = hint( s.fetch_pc(), promote=True )
old = pc
mem = hint( s.mem, promote=True )
if s.debug.enabled( "insts" ):
print pad( "%x" % pc, 8, " ", False ),
# the print statement in memcheck conflicts with @elidable in iread.
# So we use normal read if memcheck is enabled which includes the
# memory checks
if s.debug.enabled( "memcheck" ):
inst_bits = mem.read( pc, 4 )
else:
# we use trace elidable iread instead of just read
inst_bits = mem.iread( pc, 4 )
try:
inst, exec_fun = self.decode( inst_bits )
if s.debug.enabled( "insts" ):
print "%s %s %s" % (
pad_hex( inst_bits ),
pad( inst.str, 12 ),
pad( "%d" % s.num_insts, 8 ), ),
self.pre_execute()
exec_fun( s, inst )
except NotImplementedInstError:
# re-decode instruction to get the instruction name
inst, _ = self.decode( inst_bits )
print "Instruction not implemented: %s (pc: 0x%s), aborting!" \
% ( inst.str, pad_hex( pc ) )
break
except FatalError as error:
print "Exception in execution (pc: 0x%s), aborting!" % pad_hex( pc )
print "Exception message: %s" % error.msg
break
s.num_insts += 1 # TODO: should this be done inside instruction definition?
if s.stats_en: s.stat_num_insts += 1
self.post_execute()
if s.debug.enabled( "insts" ):
print
if s.debug.enabled( "regdump" ):
s.rf.print_regs( per_row=4 )
# check if we have reached the end of the maximum instructions and
# exit if necessary
if max_insts != 0 and s.num_insts >= max_insts:
print "Reached the max_insts (%d), exiting." % max_insts
break
if s.fetch_pc() < old:
jitdriver.can_enter_jit(
pc = s.fetch_pc(),
max_insts = max_insts,
state = s,
sim = self,
)
print 'DONE! Status =', s.status
print 'Instructions Executed =', s.num_insts
#-----------------------------------------------------------------------
# get_entry_point
#-----------------------------------------------------------------------
# generates and returns the entry_point function used to start the
# simulator
def get_entry_point( self ):
def entry_point( argv ):
# set the trace_limit parameter of the jitdriver
if self.jit_enabled:
set_param( self.jitdriver, "trace_limit", self.default_trace_limit )
filename_idx = 0
debug_flags = []
debug_starts_after = 0
testbin = False
max_insts = 0
envp = []
# we're using a mini state machine to parse the args
prev_token = ""
# list of tokens that require an additional arg
tokens_with_args = [ "-h", "--help",
"-e", "--env",
"-d", "--debug",
"--max-insts",
"--jit",
]
# go through the args one by one and parse accordingly
for i in xrange( 1, len( argv ) ):
token = argv[i]
if prev_token == "":
if token == "--help" or token == "-h":
print self.help_message % ( self.arch_name_human, argv[0] )
return 0
elif token == "--test":
testbin = True
elif token == "--debug" or token == "-d":
prev_token = token
# warn the user if debugs are not enabled for this translation
if not Debug.global_enabled:
print "WARNING: debugs are not enabled for this translation. " + \
"To allow debugs, translate with --debug option."
elif token in tokens_with_args:
prev_token = token
elif token[:1] == "-":
# unknown option
print "Unknown argument %s" % token
return 1
else:
# this marks the start of the program name
filename_idx = i
break
else:
if prev_token == "--env" or prev_token == "-e":
envp.append( token )
elif prev_token == "--debug" or prev_token == "-d":
# if debug start after provided (using a colon), parse it
debug_tokens = token.split( ":" )
if len( debug_tokens ) > 1:
debug_starts_after = int( debug_tokens[1] )
debug_flags = debug_tokens[0].split( "," )
elif prev_token == "--max-insts":
self.max_insts = int( token )
elif prev_token == "--jit":
# pass the jit flags to rpython.rlib.jit
set_user_param( self.jitdriver, token )
prev_token = ""
if filename_idx == 0:
print "You must supply a filename"
return 1
# create a Debug object which contains the debug flags
self.debug = Debug( debug_flags, debug_starts_after )
filename = argv[ filename_idx ]
# args after program are args to the simulated program
run_argv = argv[ filename_idx : ]
# Open the executable for reading
try:
exe_file = open( filename, 'rb' )
except IOError:
print "Could not open file %s" % filename
return 1
# Call ISA-dependent init_state to load program, initialize memory
# etc.
self.init_state( exe_file, filename, run_argv, envp, testbin )
# pass the state to debug for cycle-triggered debugging
self.debug.set_state( self.state )
# Close after loading
exe_file.close()
# Execute the program
self.run()
return 0
return entry_point
#-----------------------------------------------------------------------
# target
#-----------------------------------------------------------------------
# Enables RPython translation of our interpreter.
def target( self, driver, args ):
# if --debug flag is provided in translation, we enable debug printing
if "--debug" in args:
print "Enabling debugging"
Debug.global_enabled = True
else:
print "Disabling debugging"
# form a name
exe_name = "pydgin-%s" % self.arch_name
if driver.config.translation.jit:
exe_name += "-jit"
else:
exe_name += "-nojit"
if Debug.global_enabled:
exe_name += "-debug"
print "Translated binary name:", exe_name
driver.exe_name = exe_name
# NOTE: RPython has an assertion to check the type of entry_point to
# be function (not a bound method). So we use get_entry_point which
# generates a function type
#return self.entry_point, None
return self.get_entry_point(), None
#-------------------------------------------------------------------------
# init_sim
#-------------------------------------------------------------------------
# Simulator implementations need to call this function at the top level.
# This takes care of adding target function to top level environment and
# running the simulation in interpreted mode if directly called
# ( __name__ == "__main__" )
def init_sim( sim ):
# this is a bit hacky: we get the global variables of the caller from
# the stack frame to determine if this is being run top level and add
# target function required by rpython toolchain
caller_globals = sys._getframe(1).f_globals
caller_name = caller_globals[ "__name__" ]
# add target function to top level
caller_globals[ "target" ] = sim.target
#-----------------------------------------------------------------------
# main
#-----------------------------------------------------------------------
# Enables CPython simulation of our interpreter.
if caller_name == "__main__":
# enable debug flags in interpreted mode
Debug.global_enabled = True
sys.exit(sim.get_entry_point()( sys.argv ))
|
lib/utils/meshrenderer/gl_utils/tiles.py | bertid/clean-pvnet | 284 | 11136391 | <reponame>bertid/clean-pvnet
import numpy as np
import cv2
def tiles(batch, rows, cols, spacing_x=0, spacing_y=0, scale=1.0):
if batch.ndim == 4:
N, H, W, C = batch.shape
elif batch.ndim == 3:
N, H, W = batch.shape
C = 1
else:
raise ValueError('Invalid batch shape: {}'.format(batch.shape))
H = int(H*scale)
W = int(W*scale)
img = np.ones((rows*H+(rows-1)*spacing_y, cols*W+(cols-1)*spacing_x, C))
i = 0
for row in xrange(rows):
for col in xrange(cols):
start_y = row*(H+spacing_y)
end_y = start_y + H
start_x = col*(W+spacing_x)
end_x = start_x + W
if i < N:
if C > 1:
img[start_y:end_y,start_x:end_x,:] = cv2.resize(batch[i], (W,H))
else:
img[start_y:end_y,start_x:end_x,0] = cv2.resize(batch[i], (W,H))
i += 1
return img
def tiles4(batch, rows, cols, spacing_x=0, spacing_y=0, scale=1.0):
if batch.ndim == 4:
N, H, W, C = batch.shape
assert C == 4
H = int(H*scale)
W = int(W*scale)
img = np.ones((2*rows*H+(2*rows-1)*spacing_y, cols*W+(cols-1)*spacing_x, 3))
i = 0
for row in xrange(0,2*rows,2):
for col in xrange(cols):
start_y = row*(H+spacing_y)
end_y = start_y + H
start_x = col*(W+spacing_x)
end_x = start_x + W
if i < N:
rgb = batch[i,:,:,:3]
depth = batch[i,:,:,3:4]
depth = np.tile(depth, (1,1,3))
img[start_y:end_y,start_x:end_x,:] = cv2.resize(rgb, (W,H))
img[end_y:end_y+H,start_x:end_x,:] = cv2.resize(depth, (W,H))
i += 1
return img |
src/invoice2data/extract/plugins/tables.py | bosd/invoice2data | 944 | 11136397 | <reponame>bosd/invoice2data<filename>src/invoice2data/extract/plugins/tables.py
"""
Plugin to extract tables from an invoice.
"""
import re
import logging
logger = logging.getLogger(__name__)
DEFAULT_OPTIONS = {"field_separator": r"\s+", "line_separator": r"\n"}
def extract(self, content, output):
"""Try to extract tables from an invoice"""
for table in self["tables"]:
# First apply default options.
plugin_settings = DEFAULT_OPTIONS.copy()
plugin_settings.update(table)
table = plugin_settings
# Validate settings
assert "start" in table, "Table start regex missing"
assert "end" in table, "Table end regex missing"
assert "body" in table, "Table body regex missing"
start = re.search(table["start"], content)
end = re.search(table["end"], content)
if not start or not end:
logger.warning("no table body found - start %s, end %s", start, end)
continue
table_body = content[start.end() : end.start()]
logger.debug("START table body content ========================")
logger.debug(table_body)
logger.debug("END table body content ==========================")
logger.debug(f"Regex pattern = {table['body']}")
for line in re.split(table["line_separator"], table_body):
# if the line has empty lines in it , skip them
if not line.strip("").strip("\n") or not line:
continue
match = re.search(table["body"], line)
if match:
for field, value in match.groupdict().items():
# If a field name already exists, do not overwrite it
if field in output:
continue
if field.startswith("date") or field.endswith("date"):
output[field] = self.parse_date(value)
if not output[field]:
logger.error("Date parsing failed on date '%s'", value)
return None
elif field.startswith("amount"):
output[field] = self.parse_number(value)
else:
output[field] = value
logger.debug("ignoring *%s* because it doesn't match anything", line)
|
venv/Lib/site-packages/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py | richung99/digitizePlots | 585 | 11136446 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..utils import ComputeTDI
def test_ComputeTDI_inputs():
input_map = dict(
args=dict(
argstr="%s",
),
contrast=dict(
argstr="-constrast %s",
),
data_type=dict(
argstr="-datatype %s",
),
dixel=dict(
argstr="-dixel %s",
extensions=None,
),
ends_only=dict(
argstr="-ends_only",
),
environ=dict(
nohash=True,
usedefault=True,
),
fwhm_tck=dict(
argstr="-fwhm_tck %f",
),
in_file=dict(
argstr="%s",
extensions=None,
mandatory=True,
position=-2,
),
in_map=dict(
argstr="-image %s",
extensions=None,
),
map_zero=dict(
argstr="-map_zero",
),
max_tod=dict(
argstr="-tod %d",
),
nthreads=dict(
argstr="-nthreads %d",
nohash=True,
),
out_file=dict(
argstr="%s",
extensions=None,
position=-1,
usedefault=True,
),
precise=dict(
argstr="-precise",
),
reference=dict(
argstr="-template %s",
extensions=None,
),
stat_tck=dict(
argstr="-stat_tck %s",
),
stat_vox=dict(
argstr="-stat_vox %s",
),
tck_weights=dict(
argstr="-tck_weights_in %s",
extensions=None,
),
upsample=dict(
argstr="-upsample %d",
),
use_dec=dict(
argstr="-dec",
),
vox_size=dict(
argstr="-vox %s",
sep=",",
),
)
inputs = ComputeTDI.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ComputeTDI_outputs():
output_map = dict(
out_file=dict(
extensions=None,
),
)
outputs = ComputeTDI.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
data/dataloader.py | alexa/bort | 469 | 11136483 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dataloader utils for pretraining.
Mostly taken from https://github.com/dmlc/gluon-nlp/blob/v0.9.x/src/gluonnlp/data/datasetloader.py
"""
__all__ = ['DatasetLoader', 'SamplerFn', 'DatasetFn', 'DataLoaderFn']
import multiprocessing
from gluonnlp.data.stream import _PathDataset
class DatasetFn(object):
"""Callable object to generate a gluon.data.Dataset given a url.
Subclasses should override the __call__ method.
"""
def __call__(self, dataset_url):
raise NotImplementedError
class SamplerFn(object):
"""Callable object to generate a gluon.data.sampler.Sampler given a dataset.
Subclasses should override the __call__ method.
"""
def __call__(self, dataset):
raise NotImplementedError
class DataLoaderFn(object):
"""Callable object to generate a DataLoader object given a dataset and sampler.
Subclasses should override the __call__ method.
"""
def __call__(self, dataset, sampler):
raise NotImplementedError
class SimpleDataLoaderFn(object):
"""A simple callable object that geneartes a data loader by applying
dataloader_cls(dataset, batch_sampler=sampler, **dataset_params)
"""
def __init__(self, dataloader_cls, dataloader_params):
self._dataloader_cls = dataloader_cls
self._dataloader_params = dataloader_params
def __call__(self, dataset, sampler):
return self._dataloader_cls(dataset, batch_sampler=sampler,
**self._dataloader_params)
class SimpleDatasetFn(DatasetFn):
"""A simple callable object that geneartes a dataset by applying
dataset_cls(url, **dataset_params)
"""
def __init__(self, dataset_cls, dataset_params):
self._dataset_cls = dataset_cls
self._dataset_params = dataset_params
def __call__(self, dataset_url):
return self._dataset_cls(dataset_url, **self._dataset_params)
def _worker_fn(url, dataset_fn, sampler_fn):
"""Function to generate the dataset and sampler for each worker."""
dataset = dataset_fn(url)
sampler = sampler_fn(dataset)
return (dataset, sampler)
class _MultiWorkerIter(object):
"""Internal multi-worker iterator for DataLoader."""
def __init__(self, worker_pool, worker_fn, dataset, file_sampler,
dataset_fn, sampler_fn, dataloader_fn, prefetch):
self._worker_pool = worker_pool
self._worker_fn = worker_fn
self._dataset = dataset
self._dataset_fn = dataset_fn
self._sampler_fn = sampler_fn
self._dataloader_fn = dataloader_fn
self._prefetch = prefetch
# send and receive index for datasets
self._rcvd_idx = 0
self._sent_idx = 0
self._data_buffer = {}
self._dataset_iter = iter(file_sampler)
self._num_datasets = len(self._dataset)
# need to keep a reference of the dataloader
self._dataloader_ref = None
self._dataloader = None
# pre-fetch
for _ in range(self._prefetch):
self._push_next_dataset()
def _push_next_dataset(self):
"""Assign next dataset workload to workers."""
if self._sent_idx < len(self._dataset):
url = self._dataset[self._sent_idx]
else:
return
# push to worker asynchronously
async_ret = self._worker_pool.apply_async(
self._worker_fn, (url, self._dataset_fn, self._sampler_fn))
# data buffer stores the async result
self._data_buffer[self._sent_idx] = async_ret
self._sent_idx += 1
def _next_dataset(self):
"""Retrieve the next dataset. Returns None if no dataset is available."""
if self._rcvd_idx == self._sent_idx:
assert not self._data_buffer, 'Data buffer should be empty at this moment'
return None
assert self._rcvd_idx < self._sent_idx, \
'rcvd_idx must be smaller than sent_idx'
assert self._rcvd_idx in self._data_buffer, \
'fatal error with _next_dataset, rcvd_idx missing'
ret = self._data_buffer.pop(self._rcvd_idx)
dataset, sampler = ret.get()
self._rcvd_idx += 1
return dataset, sampler
def __next__(self):
"""Next mini-batch"""
while True:
if self._dataloader_ref is None:
# load next dataset and create a data loader
self._push_next_dataset()
result = self._next_dataset()
if result is None:
raise StopIteration
dataset, sampler = result
self._dataloader_ref = self._dataloader_fn(dataset, sampler)
self._dataloader = iter(self._dataloader_ref)
try:
# load next mini-batch from the dataloader
result = next(self._dataloader)
return result
except StopIteration:
self._dataloader = None
self._dataloader_ref = None
def next(self):
"""Next mini-batch"""
return self.__next__()
def __iter__(self):
"""Returns the iterator object"""
return self
class DatasetLoader(object):
"""Loads data from a list of datasets and returns mini-batches of data.
One dataset is loaded at a time.
Parameters
----------
file_pattern: str
Path to the input text files.
file_sampler : str or gluon.data.Sampler, defaults to 'random'
The sampler used to sample a file. The following string values are supported:
- 'sequential': SequentialSampler
- 'random': RandomSampler
dataset_fn : DatasetFn, callable
Callable object to generate a gluon.data.Dataset given a url.
sampler_fn : SamplerFn, callable
Callable object to generate a gluon.data.sampler.Sampler given a dataset.
dataloader_fn : DataloaderFn, callable
Callable object to generate a data loader object given a url.
num_dataset_workers : int
Number of worker process for dataset creation.
prefetch : int, default is `num_dataset_workers`
The number of prefetching datasets only works if `num_workers` > 0.
If `prefetch` > 0, it allow worker process to prefetch certain datasets before
acquiring data from iterators.
Note that using large prefetching batch will provide smoother bootstrapping performance,
but will consume more memory. Using smaller number may forfeit the purpose of using
multiple worker processes, try reduce `num_workers` in this case.
By default it defaults to `num_workers`.
"""
def __init__(self, file_patterns, file_sampler, dataset_fn,
sampler_fn, dataloader_fn, num_dataset_workers=1, prefetch=None):
self._dataset = _PathDataset(file_patterns)
self._file_sampler = file_sampler
self._dataset_fn = dataset_fn
self._sampler_fn = sampler_fn
self._dataloader_fn = dataloader_fn
self._num_dataset_workers = num_dataset_workers
self._prefetch = max(
0, int(prefetch) if prefetch is not None else num_dataset_workers)
self._worker_pool = None
if self._num_dataset_workers > 0:
self._worker_pool = multiprocessing.Pool(self._num_dataset_workers)
assert self._num_dataset_workers >= 0, \
'num_dataset_workers must be non-negative'
assert isinstance(sampler_fn, SamplerFn), \
'sampler_fn must be an instance of SamplerFn'
assert isinstance(dataloader_fn, DataLoaderFn), \
'dataloader_fn must be an instance of DataLoaderFn'
def __iter__(self):
if self._num_dataset_workers == 0:
def _same_process_iter():
for idx in self._file_sampler:
url = self._dataset[idx]
dataset, sampler = _worker_fn(
url, self._dataset_fn, self._sampler_fn)
dataloader = self._dataloader_fn(dataset, sampler)
for batch in dataloader:
yield batch
return _same_process_iter()
# multi-worker
return _MultiWorkerIter(self._worker_pool,
worker_fn=_worker_fn,
dataset=self._dataset,
file_sampler=self._file_sampler,
dataset_fn=self._dataset_fn,
sampler_fn=self._sampler_fn,
dataloader_fn=self._dataloader_fn,
prefetch=self._prefetch)
def __del__(self):
if self._worker_pool:
# manually terminate due to a bug that pool is not automatically terminated
# https://bugs.python.org/issue34172
assert isinstance(self._worker_pool, multiprocessing.pool.Pool)
self._worker_pool.terminate()
|
gdspy/polygon.py | gdmcbain/gdspy | 239 | 11136485 | ######################################################################
# #
# Copyright 2009 <NAME>. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys
if sys.version_info.major < 3:
from builtins import zip
from builtins import open
from builtins import int
from builtins import round
from builtins import range
from builtins import super
from future import standard_library
standard_library.install_aliases()
else:
# Python 3 doesn't have basestring, as unicode is type string
# Python 2 doesn't equate unicode to string, but both are basestring
# Now isinstance(s, basestring) will be True for any python version
basestring = str
import struct
import itertools
import warnings
import numpy
from gdspy import clipper
from gdspy.hobby import _hobby
_directions_dict = {"+x": 0, "+y": 0.5, "-x": 1, "-y": -0.5}
_directions_list = ["+x", "+y", "-x", "-y"]
_halfpi = 0.5 * numpy.pi
_mpone = numpy.array((-1.0, 1.0))
class PolygonSet(object):
"""
Set of polygonal objects.
Parameters
----------
polygons : iterable of array-like[N][2]
List containing the coordinates of the vertices of each polygon.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Attributes
----------
polygons : list of numpy array[N][2]
Coordinates of the vertices of each polygon.
layers : list of integer
The GDSII layer number for each element.
datatypes : list of integer
The GDSII datatype for each element (between 0 and 255).
properties : {integer: string} dictionary
Properties for these elements.
Notes
-----
The last point should not be equal to the first (polygons are
automatically closed).
The original GDSII specification supports only a maximum of 199
vertices per polygon.
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(self, polygons, layer=0, datatype=0):
self.polygons = [numpy.array(p) for p in polygons]
self.layers = [layer] * len(self.polygons)
self.datatypes = [datatype] * len(self.polygons)
self.properties = {}
def __str__(self):
return (
"PolygonSet ({} polygons, {} vertices, layers {}, datatypes {})"
).format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
def get_bounding_box(self):
"""
Calculate the bounding box of the polygons.
Returns
-------
out : Numpy array[2, 2] or None
Bounding box of this polygon in the form [[x_min, y_min],
[x_max, y_max]], or None if the polygon is empty.
"""
if len(self.polygons) == 0:
return None
return numpy.array(
(
(
min(pts[:, 0].min() for pts in self.polygons),
min(pts[:, 1].min() for pts in self.polygons),
),
(
max(pts[:, 0].max() for pts in self.polygons),
max(pts[:, 1].max() for pts in self.polygons),
),
)
)
def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : `PolygonSet`
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle) * _mpone
c0 = numpy.array(center)
new_polys = []
for points in self.polygons:
pts = points - c0
new_polys.append(pts * ca + pts[:, ::-1] * sa + c0)
self.polygons = new_polys
return self
def scale(self, scalex, scaley=None, center=(0, 0)):
"""
Scale this object.
Parameters
----------
scalex : number
Scaling factor along the first axis.
scaley : number or None
Scaling factor along the second axis. If None, same as
`scalex`.
center : array-like[2]
Center point for the scaling operation.
Returns
-------
out : `PolygonSet`
This object.
"""
c0 = numpy.array(center)
s = scalex if scaley is None else numpy.array((scalex, scaley))
self.polygons = [(points - c0) * s + c0 for points in self.polygons]
return self
def to_gds(self, outfile, multiplier):
"""
Convert this object to a series of GDSII elements.
Parameters
----------
outfile : open file
Output to write the GDSII.
multiplier : number
A number that multiplies all dimensions written in the GDSII
elements.
"""
for ii in range(len(self.polygons)):
if len(self.polygons[ii]) > 8190:
warnings.warn(
"[GDSPY] Polygons with more than 8190 are not supported by the "
"official GDSII specification. This GDSII file might not be "
"compatible with all readers.",
stacklevel=4,
)
outfile.write(
struct.pack(
">4Hh2Hh",
4,
0x0800,
6,
0x0D02,
self.layers[ii],
6,
0x0E02,
self.datatypes[ii],
)
)
xy = numpy.empty((self.polygons[ii].shape[0] + 1, 2), dtype=">i4")
xy[:-1] = numpy.round(self.polygons[ii] * multiplier)
xy[-1] = xy[0]
i0 = 0
while i0 < xy.shape[0]:
i1 = min(i0 + 8190, xy.shape[0])
outfile.write(struct.pack(">2H", 4 + 8 * (i1 - i0), 0x1003))
outfile.write(xy[i0:i1].tobytes())
i0 = i1
else:
outfile.write(
struct.pack(
">4Hh2Hh2H",
4,
0x0800,
6,
0x0D02,
self.layers[ii],
6,
0x0E02,
self.datatypes[ii],
12 + 8 * len(self.polygons[ii]),
0x1003,
)
)
xy = numpy.round(self.polygons[ii] * multiplier).astype(">i4")
outfile.write(xy.tobytes())
outfile.write(xy[0].tobytes())
if self.properties is not None and len(self.properties) > 0:
size = 0
for attr, value in self.properties.items():
if len(value) % 2 != 0:
value = value + "\0"
outfile.write(
struct.pack(">5H", 6, 0x2B02, attr, 4 + len(value), 0x2C06)
)
outfile.write(value.encode("ascii"))
size += len(value) + 2
if size > 128:
warnings.warn(
"[GDSPY] Properties with size larger than 128 bytes are not "
"officially supported by the GDSII specification. This file "
"might not be compatible with all readers.",
stacklevel=4,
)
outfile.write(struct.pack(">2H", 4, 0x1100))
def to_svg(self, outfile, scaling, precision):
"""
Write an SVG fragment representation of this object.
Parameters
----------
outfile : open file
Output to write the SVG representation.
scaling : number
Scaling factor for the geometry.
precision : positive integer or `None`
Maximal number of digits for coordinates after scaling.
"""
for p, l, d in zip(self.polygons, self.layers, self.datatypes):
outfile.write('<polygon class="l{}d{}" points="'.format(l, d))
outfile.write(
" ".join(
",".join(
(
numpy.format_float_positional(
pt[0], trim="0", precision=precision
),
numpy.format_float_positional(
pt[1], trim="0", precision=precision
),
)
)
for pt in scaling * p
)
)
outfile.write('"/>\n')
def area(self, by_spec=False):
"""
Calculate the total area of this polygon set.
Parameters
----------
by_spec : bool
If True, the return value is a dictionary with
``{(layer, datatype): area}``.
Returns
-------
out : number, dictionary
Area of this object.
"""
if by_spec:
path_area = {}
for poly, key in zip(self.polygons, zip(self.layers, self.datatypes)):
poly_area = 0
for ii in range(1, len(poly) - 1):
poly_area += (poly[0][0] - poly[ii + 1][0]) * (
poly[ii][1] - poly[0][1]
) - (poly[0][1] - poly[ii + 1][1]) * (poly[ii][0] - poly[0][0])
if key in path_area:
path_area[key] += 0.5 * abs(poly_area)
else:
path_area[key] = 0.5 * abs(poly_area)
else:
path_area = 0
for points in self.polygons:
poly_area = 0
for ii in range(1, len(points) - 1):
poly_area += (points[0][0] - points[ii + 1][0]) * (
points[ii][1] - points[0][1]
) - (points[0][1] - points[ii + 1][1]) * (
points[ii][0] - points[0][0]
)
path_area += 0.5 * abs(poly_area)
return path_area
def fracture(self, max_points=199, precision=1e-3):
"""
Slice these polygons in the horizontal and vertical directions
so that each resulting piece has at most `max_points`. This
operation occurs in place.
Parameters
----------
max_points : integer
Maximal number of points in each resulting polygon (at least
5 for the fracture to occur).
precision : float
Desired precision for rounding vertice coordinates.
Returns
-------
out : `PolygonSet`
This object.
"""
if max_points > 4:
ii = 0
while ii < len(self.polygons):
if len(self.polygons[ii]) > max_points:
pts0 = sorted(self.polygons[ii][:, 0])
pts1 = sorted(self.polygons[ii][:, 1])
ncuts = len(pts0) // max_points
if pts0[-1] - pts0[0] > pts1[-1] - pts1[0]:
# Vertical cuts
cuts = [
pts0[int(i * len(pts0) / (ncuts + 1.0) + 0.5)]
for i in range(1, ncuts + 1)
]
chopped = clipper._chop(
self.polygons[ii], cuts, 0, 1 / precision
)
else:
# Horizontal cuts
cuts = [
pts1[int(i * len(pts1) / (ncuts + 1.0) + 0.5)]
for i in range(1, ncuts + 1)
]
chopped = clipper._chop(
self.polygons[ii], cuts, 1, 1 / precision
)
self.polygons.pop(ii)
layer = self.layers.pop(ii)
datatype = self.datatypes.pop(ii)
self.polygons.extend(
numpy.array(x) for x in itertools.chain.from_iterable(chopped)
)
npols = sum(len(c) for c in chopped)
self.layers.extend(layer for _ in range(npols))
self.datatypes.extend(datatype for _ in range(npols))
else:
ii += 1
return self
def fillet(self, radius, points_per_2pi=128, max_points=199, precision=1e-3):
"""
Round the corners of these polygons and fractures them into
polygons with less vertices if necessary.
Parameters
----------
radius : number, array-like
Radius of the corners. If number: all corners filleted by
that amount. If array: specify fillet radii on a
per-polygon basis (length must be equal to the number of
polygons in this `PolygonSet`). Each element in the array
can be a number (all corners filleted by the same amount) or
another array of numbers, one per polygon vertex.
Alternatively, the array can be flattened to have one radius
per `PolygonSet` vertex.
points_per_2pi : integer
Number of vertices used to approximate a full circle. The
number of vertices in each corner of the polygon will be the
fraction of this number corresponding to the angle
encompassed by that corner with respect to 2 pi.
max_points : integer
Maximal number of points in each resulting polygon (at least
5, otherwise the resulting polygon is not fractured).
precision : float
Desired precision for rounding vertice coordinates in case
of fracturing.
Returns
-------
out : `PolygonSet`
This object.
"""
two_pi = 2 * numpy.pi
fracture = False
if numpy.isscalar(radius):
radii = [[radius] * p.shape[0] for p in self.polygons]
else:
if len(radius) == len(self.polygons):
radii = []
for r, p in zip(radius, self.polygons):
if numpy.isscalar(r):
radii.append([r] * p.shape[0])
else:
if len(r) != p.shape[0]:
raise ValueError(
"[GDSPY] Wrong length in fillet radius list. "
"Found {} radii for polygon with {} vertices.".format(
len(r), len(p.shape[0])
)
)
radii.append(r)
else:
total = sum(p.shape[0] for p in self.polygons)
if len(radius) != total:
raise ValueError(
"[GDSPY] Wrong length in fillet radius list. "
"Expected lengths are {} or {}; got {}.".format(
len(self.polygons), total, len(radius)
)
)
radii = []
n = 0
for p in self.polygons:
radii.append(radius[n : n + p.shape[0]])
n += p.shape[0]
for jj in range(len(self.polygons)):
vec = self.polygons[jj].astype(float) - numpy.roll(self.polygons[jj], 1, 0)
length = (vec[:, 0] ** 2 + vec[:, 1] ** 2) ** 0.5
ii = numpy.flatnonzero(length)
if len(ii) < len(length):
self.polygons[jj] = numpy.array(self.polygons[jj][ii])
radii[jj] = [radii[jj][i] for i in ii]
vec = self.polygons[jj].astype(float) - numpy.roll(
self.polygons[jj], 1, 0
)
length = (vec[:, 0] ** 2 + vec[:, 1] ** 2) ** 0.5
vec[:, 0] = vec[:, 0] / length
vec[:, 1] = vec[:, 1] / length
dvec = numpy.roll(vec, -1, 0) - vec
norm = (dvec[:, 0] ** 2 + dvec[:, 1] ** 2) ** 0.5
ii = numpy.flatnonzero(norm)
dvec[ii, 0] = dvec[ii, 0] / norm[ii]
dvec[ii, 1] = dvec[ii, 1] / norm[ii]
dot = numpy.roll(vec, -1, 0) * vec
theta = numpy.arccos(dot[:, 0] + dot[:, 1])
ct = numpy.cos(theta * 0.5)
tt = numpy.tan(theta * 0.5)
new_points = []
for ii in range(-1, len(self.polygons[jj]) - 1):
if theta[ii] > 1e-6:
a0 = -vec[ii] * tt[ii] - dvec[ii] / ct[ii]
a0 = numpy.arctan2(a0[1], a0[0])
a1 = vec[ii + 1] * tt[ii] - dvec[ii] / ct[ii]
a1 = numpy.arctan2(a1[1], a1[0])
if a1 - a0 > numpy.pi:
a1 -= two_pi
elif a1 - a0 < -numpy.pi:
a1 += two_pi
n = max(
int(numpy.ceil(abs(a1 - a0) / two_pi * points_per_2pi) + 0.5), 2
)
a = numpy.linspace(a0, a1, n)
ll = radii[jj][ii] * tt[ii]
if ll > 0.49 * length[ii]:
r = 0.49 * length[ii] / tt[ii]
ll = 0.49 * length[ii]
else:
r = radii[jj][ii]
if ll > 0.49 * length[ii + 1]:
r = 0.49 * length[ii + 1] / tt[ii]
new_points.extend(
r * dvec[ii] / ct[ii]
+ self.polygons[jj][ii]
+ numpy.vstack((r * numpy.cos(a), r * numpy.sin(a))).transpose()
)
else:
new_points.append(self.polygons[jj][ii])
self.polygons[jj] = numpy.array(new_points)
if len(new_points) > max_points:
fracture = True
if fracture:
self.fracture(max_points, precision)
return self
def translate(self, dx, dy):
"""
Translate this polygon.
Parameters
----------
dx : number
Distance to move in the x-direction.
dy : number
Distance to move in the y-direction.
Returns
-------
out : `PolygonSet`
This object.
"""
vec = numpy.array((dx, dy))
self.polygons = [points + vec for points in self.polygons]
return self
def mirror(self, p1, p2=(0, 0)):
"""
Mirror the polygons over a line through points 1 and 2
Parameters
----------
p1 : array-like[2]
first point defining the reflection line
p2 : array-like[2]
second point defining the reflection line
Returns
-------
out : `PolygonSet`
This object.
"""
origin = numpy.array(p1)
vec = numpy.array(p2) - origin
vec_r = vec * (2 / numpy.inner(vec, vec))
self.polygons = [
numpy.outer(numpy.inner(points - origin, vec_r), vec) - points + 2 * origin
for points in self.polygons
]
return self
class Polygon(PolygonSet):
"""
Polygonal geometric object.
Parameters
----------
points : array-like[N][2]
Coordinates of the vertices of the polygon.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Notes
-----
The last point should not be equal to the first (polygons are
automatically closed).
The original GDSII specification supports only a maximum of 199
vertices per polygon.
Examples
--------
>>> triangle_pts = [(0, 40), (15, 40), (10, 50)]
>>> triangle = gdspy.Polygon(triangle_pts)
>>> myCell.add(triangle)
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(self, points, layer=0, datatype=0):
self.layers = [layer]
self.datatypes = [datatype]
self.polygons = [numpy.array(points)]
self.properties = {}
def __str__(self):
return "Polygon ({} vertices, layer {}, datatype {})".format(
len(self.polygons[0]), self.layers[0], self.datatypes[0]
)
class Rectangle(PolygonSet):
"""
Rectangular geometric object.
Parameters
----------
point1 : array-like[2]
Coordinates of a corner of the rectangle.
point2 : array-like[2]
Coordinates of the corner of the rectangle opposite to `point1`.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Examples
--------
>>> rectangle = gdspy.Rectangle((0, 0), (10, 20))
>>> myCell.add(rectangle)
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(self, point1, point2, layer=0, datatype=0):
self.layers = [layer]
self.datatypes = [datatype]
self.polygons = [
numpy.array(
[
[point1[0], point1[1]],
[point1[0], point2[1]],
[point2[0], point2[1]],
[point2[0], point1[1]],
]
)
]
self.properties = {}
def __str__(self):
return (
"Rectangle (({0[0]}, {0[1]}) to ({1[0]}, {1[1]}), layer {2}, datatype {3})"
).format(
self.polygons[0][0], self.polygons[0][2], self.layers[0], self.datatypes[0]
)
def __repr__(self):
return "Rectangle(({0[0]}, {0[1]}), ({1[0]}, {1[1]}), {2}, {3})".format(
self.polygons[0][0], self.polygons[0][2], self.layers[0], self.datatypes[0]
)
class Round(PolygonSet):
"""
Circular geometric object.
Represent a circle, ellipse, ring or their sections.
Parameters
----------
center : array-like[2]
Coordinates of the center of the circle/ring.
radius : number, array-like[2]
Radius of the circle/outer radius of the ring. To build an
ellipse an array of 2 numbers can be used, representing the
radii in the horizontal and vertical directions.
inner_radius : number, array-like[2]
Inner radius of the ring. To build an elliptical hole, an array
of 2 numbers can be used, representing the radii in the
horizontal and vertical directions.
initial_angle : number
Initial angle of the circular/ring section (in *radians*).
final_angle : number
Final angle of the circular/ring section (in *radians*).
tolerance : float
Approximate curvature resolution. The number of points is
automatically calculated.
number_of_points : integer or None
Manually define the number of vertices that form the object
(polygonal approximation). Overrides `tolerance`.
max_points : integer
If the number of points in the element is greater than
`max_points`, it will be fractured in smaller polygons with
at most `max_points` each. If `max_points` is zero no fracture
will occur.
layer : integer
The GDSII layer number for this element.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
Examples
--------
>>> circle = gdspy.Round((30, 5), 8)
>>> ell_ring = gdspy.Round((50, 5), (8, 7), inner_radius=(5, 4))
>>> pie_slice = gdspy.Round((30, 25), 8, initial_angle=0,
... final_angle=-5.0*numpy.pi/6.0)
>>> arc = gdspy.Round((50, 25), 8, inner_radius=5,
... initial_angle=-5.0*numpy.pi/6.0,
... final_angle=0)
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(
self,
center,
radius,
inner_radius=0,
initial_angle=0,
final_angle=0,
tolerance=0.01,
number_of_points=None,
max_points=199,
layer=0,
datatype=0,
):
if hasattr(radius, "__iter__"):
orx, ory = radius
radius = max(radius)
def outer_transform(a):
r = a - ((a + numpy.pi) % (2 * numpy.pi) - numpy.pi)
t = numpy.arctan2(orx * numpy.sin(a), ory * numpy.cos(a)) + r
t[a == numpy.pi] = numpy.pi
return t
else:
orx = ory = radius
def outer_transform(a):
return a
if hasattr(inner_radius, "__iter__"):
irx, iry = inner_radius
inner_radius = max(inner_radius)
def inner_transform(a):
r = a - ((a + numpy.pi) % (2 * numpy.pi) - numpy.pi)
t = numpy.arctan2(irx * numpy.sin(a), iry * numpy.cos(a)) + r
t[a == numpy.pi] = numpy.pi
return t
else:
irx = iry = inner_radius
def inner_transform(a):
return a
if isinstance(number_of_points, float):
warnings.warn(
"[GDSPY] Use of a floating number as number_of_points "
"is deprecated in favor of tolerance.",
category=DeprecationWarning,
stacklevel=2,
)
tolerance = number_of_points
number_of_points = None
if number_of_points is None:
full_angle = (
2 * numpy.pi
if final_angle == initial_angle
else abs(final_angle - initial_angle)
)
number_of_points = max(
3,
1 + int(0.5 * full_angle / numpy.arccos(1 - tolerance / radius) + 0.5),
)
if inner_radius > 0:
number_of_points *= 2
pieces = (
1
if max_points == 0
else int(numpy.ceil(number_of_points / float(max_points)))
)
number_of_points = number_of_points // pieces
self.layers = [layer] * pieces
self.datatypes = [datatype] * pieces
self.polygons = [numpy.zeros((number_of_points, 2)) for _ in range(pieces)]
self.properties = {}
if final_angle == initial_angle and pieces > 1:
final_angle += 2 * numpy.pi
angles = numpy.linspace(initial_angle, final_angle, pieces + 1)
oang = outer_transform(angles)
iang = inner_transform(angles)
for ii in range(pieces):
if oang[ii + 1] == oang[ii]:
if inner_radius <= 0:
t = (
numpy.arange(number_of_points)
* 2.0
* numpy.pi
/ number_of_points
)
self.polygons[ii][:, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:, 1] = numpy.sin(t) * ory + center[1]
else:
n2 = number_of_points // 2
n1 = number_of_points - n2
t = numpy.arange(n1) * 2.0 * numpy.pi / (n1 - 1)
self.polygons[ii][:n1, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:n1, 1] = numpy.sin(t) * ory + center[1]
t = numpy.arange(n2) * -2.0 * numpy.pi / (n2 - 1)
self.polygons[ii][n1:, 0] = numpy.cos(t) * irx + center[0]
self.polygons[ii][n1:, 1] = numpy.sin(t) * iry + center[1]
else:
if inner_radius <= 0:
t = numpy.linspace(oang[ii], oang[ii + 1], number_of_points - 1)
self.polygons[ii][1:, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][1:, 1] = numpy.sin(t) * ory + center[1]
self.polygons[ii][0] += center
else:
n2 = number_of_points // 2
n1 = number_of_points - n2
t = numpy.linspace(oang[ii], oang[ii + 1], n1)
self.polygons[ii][:n1, 0] = numpy.cos(t) * orx + center[0]
self.polygons[ii][:n1, 1] = numpy.sin(t) * ory + center[1]
t = numpy.linspace(iang[ii + 1], iang[ii], n2)
self.polygons[ii][n1:, 0] = numpy.cos(t) * irx + center[0]
self.polygons[ii][n1:, 1] = numpy.sin(t) * iry + center[1]
def __str__(self):
return ("Round ({} polygons, {} vertices, layers {}, datatypes {})").format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
class Text(PolygonSet):
"""
Polygonal text object.
Each letter is formed by a series of polygons.
Parameters
----------
text : string
The text to be converted in geometric objects.
size : number
Height of the character. The width of a character and the
distance between characters are this value multiplied by 5 / 9
and 8 / 9, respectively. For vertical text, the distance is
multiplied by 11 / 9.
position : array-like[2]
Text position (lower left corner).
horizontal : bool
If True, the text is written from left to right; if
False, from top to bottom.
angle : number
The angle of rotation of the text.
layer : integer
The GDSII layer number for these elements.
datatype : integer
The GDSII datatype for this element (between 0 and 255).
Examples
--------
>>> text = gdspy.Text('Sample text', 20, (-10, -100))
>>> myCell.add(text)
"""
# fmt: off
_font = {
'!': [[(2, 2), (3, 2), (3, 3), (2, 3)], [(2, 4), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8), (2, 7), (2, 6), (2, 5)]],
'"': [[(1, 7), (2, 7), (2, 8), (2, 9), (1, 9), (1, 8)], [(3, 7), (4, 7), (4, 8), (4, 9), (3, 9), (3, 8)]],
'#': [[(0, 3), (1, 3), (1, 2), (2, 2), (2, 3), (2, 4), (2, 5), (3, 5), (3, 4), (2, 4), (2, 3), (3, 3), (3, 2), (4, 2), (4, 3), (5, 3), (5, 4), (4, 4), (4, 5), (5, 5), (5, 6), (4, 6), (4, 7), (3, 7), (3, 6), (2, 6), (2, 7), (1, 7), (1, 6), (0, 6), (0, 5), (1, 5), (1, 4), (0, 4)]],
'$': [[(0, 2), (1, 2), (2, 2), (2, 1), (3, 1), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (4, 4), (4, 5), (3, 5), (3, 6), (3, 7), (4, 7), (5, 7), (5, 8), (4, 8), (3, 8), (3, 9), (2, 9), (2, 8), (1, 8), (1, 7), (2, 7), (2, 6), (1, 6), (1, 5), (2, 5), (2, 4), (2, 3), (1, 3), (0, 3)], [(0, 6), (1, 6), (1, 7), (0, 7)], [(4, 3), (5, 3), (5, 4), (4, 4)]],
'%': [[(0, 2), (1, 2), (1, 3), (1, 4), (0, 4), (0, 3)], [(0, 7), (1, 7), (2, 7), (2, 8), (2, 9), (1, 9), (0, 9), (0, 8)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 2), (4, 2), (5, 2), (5, 3), (5, 4), (4, 4), (3, 4), (3, 3)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
'&': [[(0, 3), (1, 3), (1, 4), (1, 5), (0, 5), (0, 4)], [(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 2), (2, 2), (3, 2), (3, 3), (2, 3), (1, 3)], [(1, 5), (2, 5), (3, 5), (3, 6), (3, 7), (3, 8), (2, 8), (2, 7), (2, 6), (1, 6)], [(1, 8), (2, 8), (2, 9), (1, 9)], [(3, 3), (4, 3), (4, 4), (4, 5), (3, 5), (3, 4)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 5), (5, 5), (5, 6), (4, 6)]], "'": [[(2, 7), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8)]],
'(': [[(1, 4), (2, 4), (2, 5), (2, 6), (2, 7), (1, 7), (1, 6), (1, 5)], [(2, 3), (3, 3), (3, 4), (2, 4)], [(2, 7), (3, 7), (3, 8), (2, 8)], [(3, 2), (4, 2), (4, 3), (3, 3)], [(3, 8), (4, 8), (4, 9), (3, 9)]],
')': [[(3, 4), (4, 4), (4, 5), (4, 6), (4, 7), (3, 7), (3, 6), (3, 5)], [(1, 2), (2, 2), (2, 3), (1, 3)], [(1, 8), (2, 8), (2, 9), (1, 9)], [(2, 3), (3, 3), (3, 4), (2, 4)], [(2, 7), (3, 7), (3, 8), (2, 8)]],
'*': [[(0, 2), (1, 2), (1, 3), (0, 3)], [(0, 4), (1, 4), (1, 3), (2, 3), (2, 2), (3, 2), (3, 3), (4, 3), (4, 4), (5, 4), (5, 5), (4, 5), (4, 6), (3, 6), (3, 7), (2, 7), (2, 6), (1, 6), (1, 5), (0, 5)], [(0, 6), (1, 6), (1, 7), (0, 7)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 6), (5, 6), (5, 7), (4, 7)]],
'+': [[(0, 4), (1, 4), (2, 4), (2, 3), (2, 2), (3, 2), (3, 3), (3, 4), (4, 4), (5, 4), (5, 5), (4, 5), (3, 5), (3, 6), (3, 7), (2, 7), (2, 6), (2, 5), (1, 5), (0, 5)]],
',': [[(1, 0), (2, 0), (2, 1), (1, 1)], [(2, 1), (3, 1), (3, 2), (3, 3), (2, 3), (2, 2)]],
'-': [[(0, 4), (1, 4), (2, 4), (3, 4), (4, 4), (5, 4), (5, 5), (4, 5), (3, 5), (2, 5), (1, 5), (0, 5)]],
'.': [[(2, 2), (3, 2), (3, 3), (2, 3)]],
'/': [[(0, 2), (1, 2), (1, 3), (1, 4), (0, 4), (0, 3)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
'0': [[(0, 3), (1, 3), (1, 4), (2, 4), (2, 5), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 6), (4, 6), (4, 5), (4, 4), (4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (3, 7)]],
'1': [[(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8), (1, 8), (1, 7), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3), (1, 3)]],
'2': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (0, 4), (0, 3)], [(0, 7), (1, 7), (1, 8), (0, 8)], [(1, 4), (2, 4), (3, 4), (3, 5), (2, 5), (1, 5)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(3, 5), (4, 5), (4, 6), (3, 6)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'3': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)], [(0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)], [(1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'4': [[(0, 4), (1, 4), (2, 4), (3, 4), (3, 3), (3, 2), (4, 2), (4, 3), (4, 4), (5, 4), (5, 5), (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), (3, 9), (2, 9), (2, 8), (3, 8), (3, 7), (3, 6), (3, 5), (2, 5), (1, 5), (1, 6), (0, 6), (0, 5)], [(1, 6), (2, 6), (2, 7), (2, 8), (1, 8), (1, 7)]],
'5': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)], [(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)]],
'6': [[(0, 3), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)]],
'7': [[(0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (4, 7), (4, 6), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)], [(2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (2, 5), (2, 4), (2, 3)], [(3, 5), (4, 5), (4, 6), (3, 6)]],
'8': [[(0, 3), (1, 3), (1, 4), (1, 5), (0, 5), (0, 4)], [(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'9': [[(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 5), (2, 5), (3, 5), (4, 5), (4, 4), (4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6), (3, 6), (2, 6), (1, 6)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)]],
':': [[(2, 2), (3, 2), (3, 3), (2, 3)], [(2, 5), (3, 5), (3, 6), (2, 6)]],
';': [[(1, 0), (2, 0), (2, 1), (1, 1)], [(2, 1), (3, 1), (3, 2), (3, 3), (2, 3), (2, 2)], [(2, 4), (3, 4), (3, 5), (2, 5)]],
'<': [[(0, 5), (1, 5), (1, 6), (0, 6)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(1, 6), (2, 6), (2, 7), (1, 7)], [(2, 3), (3, 3), (4, 3), (4, 4), (3, 4), (2, 4)], [(2, 7), (3, 7), (4, 7), (4, 8), (3, 8), (2, 8)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 8), (5, 8), (5, 9), (4, 9)]],
'=': [[(0, 3), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3), (5, 4), (4, 4), (3, 4), (2, 4), (1, 4), (0, 4)], [(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 5), (5, 6), (4, 6), (3, 6), (2, 6), (1, 6), (0, 6)]],
'>': [[(0, 2), (1, 2), (1, 3), (0, 3)], [(0, 8), (1, 8), (1, 9), (0, 9)], [(1, 3), (2, 3), (3, 3), (3, 4), (2, 4), (1, 4)], [(1, 7), (2, 7), (3, 7), (3, 8), (2, 8), (1, 8)], [(3, 4), (4, 4), (4, 5), (3, 5)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 5), (5, 5), (5, 6), (4, 6)]],
'?': [[(0, 7), (1, 7), (1, 8), (0, 8)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(2, 2), (3, 2), (3, 3), (2, 3)], [(2, 4), (3, 4), (3, 5), (2, 5)], [(3, 5), (4, 5), (4, 6), (3, 6)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'@': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(2, 4), (3, 4), (4, 4), (4, 5), (3, 5), (3, 6), (3, 7), (2, 7), (2, 6), (2, 5)], [(4, 5), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6)]],
'A': [[(0, 2), (1, 2), (1, 3), (1, 4), (2, 4), (3, 4), (4, 4), (4, 3), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6), (4, 5), (3, 5), (2, 5), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)]],
'B': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'C': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9)]],
'D': [[(0, 2), (1, 2), (2, 2), (3, 2), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(3, 3), (4, 3), (4, 4), (3, 4)], [(3, 7), (4, 7), (4, 8), (3, 8)], [(4, 4), (5, 4), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6), (4, 5)]],
'E': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)]],
'F': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)]],
'G': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (3, 6), (2, 6), (2, 5), (3, 5), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9)]],
'H': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 4), (4, 3), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)]],
'I': [[(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (1, 8), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3), (1, 3)]],
'J': [[(0, 3), (1, 3), (1, 4), (0, 4)], [(0, 8), (1, 8), (2, 8), (3, 8), (3, 7), (3, 6), (3, 5), (3, 4), (3, 3), (4, 3), (4, 4), (4, 5), (4, 6), (4, 7), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)], [(1, 2), (2, 2), (3, 2), (3, 3), (2, 3), (1, 3)]],
'K': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (2, 6), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(2, 4), (3, 4), (3, 5), (2, 5)], [(2, 6), (3, 6), (3, 7), (2, 7)], [(3, 3), (4, 3), (4, 4), (3, 4)], [(3, 7), (4, 7), (4, 8), (3, 8)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 8), (5, 8), (5, 9), (4, 9)]],
'L': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)]],
'M': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (2, 7), (2, 8), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(2, 5), (3, 5), (3, 6), (3, 7), (2, 7), (2, 6)], [(3, 7), (4, 7), (4, 6), (4, 5), (4, 4), (4, 3), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (3, 8)]],
'N': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (2, 7), (2, 8), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(2, 5), (3, 5), (3, 6), (3, 7), (2, 7), (2, 6)], [(3, 4), (4, 4), (4, 3), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6), (4, 5), (3, 5)]],
'O': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4)]],
'P': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'Q': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4), (3, 4), (3, 3), (2, 3), (1, 3)], [(1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9)], [(2, 4), (3, 4), (3, 5), (2, 5)]],
'R': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (3, 4), (4, 4), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6), (1, 7), (1, 8), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 2), (5, 2), (5, 3), (5, 4), (4, 4), (4, 3)], [(4, 6), (5, 6), (5, 7), (5, 8), (4, 8), (4, 7)]],
'S': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)], [(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 5), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (1, 6)], [(1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9)], [(4, 3), (5, 3), (5, 4), (5, 5), (4, 5), (4, 4)]],
'T': [[(0, 8), (1, 8), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)]],
'U': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4)]],
'V': [[(0, 5), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6)], [(1, 3), (2, 3), (2, 4), (2, 5), (1, 5), (1, 4)], [(2, 2), (3, 2), (3, 3), (2, 3)], [(3, 3), (4, 3), (4, 4), (4, 5), (3, 5), (3, 4)], [(4, 5), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6)]],
'W': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (2, 3), (1, 3)], [(2, 3), (3, 3), (3, 4), (3, 5), (3, 6), (2, 6), (2, 5), (2, 4)], [(3, 2), (4, 2), (4, 3), (3, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (4, 6), (4, 5), (4, 4)]],
'X': [[(0, 2), (1, 2), (1, 3), (1, 4), (0, 4), (0, 3)], [(0, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(1, 6), (2, 6), (2, 7), (1, 7)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 4), (4, 4), (4, 5), (3, 5)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 2), (5, 2), (5, 3), (5, 4), (4, 4), (4, 3)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
'Y': [[(0, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8)], [(1, 5), (2, 5), (2, 6), (2, 7), (1, 7), (1, 6)], [(2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (2, 5), (2, 4), (2, 3)], [(3, 5), (4, 5), (4, 6), (4, 7), (3, 7), (3, 6)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
'Z': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (0, 4), (0, 3)], [(0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9), (1, 9), (0, 9)], [(1, 4), (2, 4), (2, 5), (1, 5)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 6), (4, 6), (4, 7), (3, 7)]],
'[': [[(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (3, 8), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (1, 8), (1, 7), (1, 6), (1, 5), (1, 4), (1, 3)]],
'\\': [[(0, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8)], [(1, 6), (2, 6), (2, 7), (1, 7)], [(2, 5), (3, 5), (3, 6), (2, 6)], [(3, 4), (4, 4), (4, 5), (3, 5)], [(4, 2), (5, 2), (5, 3), (5, 4), (4, 4), (4, 3)]],
']': [[(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (4, 4), (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), (3, 9), (2, 9), (1, 9), (1, 8), (2, 8), (3, 8), (3, 7), (3, 6), (3, 5), (3, 4), (3, 3), (2, 3), (1, 3)]],
'^': [[(0, 6), (1, 6), (1, 7), (0, 7)], [(1, 7), (2, 7), (2, 8), (1, 8)], [(2, 8), (3, 8), (3, 9), (2, 9)], [(3, 7), (4, 7), (4, 8), (3, 8)], [(4, 6), (5, 6), (5, 7), (4, 7)]],
'_': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)]],
'`': [[(1, 8), (2, 8), (2, 9), (1, 9)], [(2, 7), (3, 7), (3, 8), (2, 8)]],
'a': [[(0, 3), (1, 3), (1, 4), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (3, 5), (2, 5), (1, 5), (1, 4), (2, 4), (3, 4), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)]],
'b': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4)]],
'c': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7), (2, 7), (1, 7)]],
'd': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8), (4, 7), (3, 7), (2, 7), (1, 7), (1, 6), (2, 6), (3, 6), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)]],
'e': [[(0, 3), (1, 3), (1, 4), (2, 4), (3, 4), (4, 4), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (3, 5), (2, 5), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)]],
'f': [[(0, 5), (1, 5), (1, 4), (1, 3), (1, 2), (2, 2), (2, 3), (2, 4), (2, 5), (3, 5), (4, 5), (4, 6), (3, 6), (2, 6), (2, 7), (2, 8), (1, 8), (1, 7), (1, 6), (0, 6)], [(2, 8), (3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9), (2, 9)]],
'g': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 0), (2, 0), (3, 0), (4, 0), (4, 1), (3, 1), (2, 1), (1, 1)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 1), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)]],
'h': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3)]],
'i': [[(1, 6), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (2, 7), (1, 7)], [(2, 8), (3, 8), (3, 9), (2, 9)]],
'j': [[(0, 0), (1, 0), (2, 0), (2, 1), (1, 1), (0, 1)], [(1, 6), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (2, 1), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (2, 7), (1, 7)], [(2, 8), (3, 8), (3, 9), (2, 9)]],
'k': [[(0, 2), (1, 2), (1, 3), (1, 4), (2, 4), (3, 4), (3, 5), (2, 5), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (0, 9), (0, 8), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(3, 3), (4, 3), (4, 4), (3, 4)], [(3, 5), (4, 5), (4, 6), (3, 6)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 6), (5, 6), (5, 7), (4, 7)]],
'l': [[(1, 8), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (2, 9), (1, 9)]],
'm': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (2, 5), (2, 4), (2, 3), (2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3)]],
'n': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(4, 2), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3)]],
'o': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4)]],
'p': [[(0, 0), (1, 0), (1, 1), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3), (0, 2), (0, 1)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4)]],
'q': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 1), (4, 0), (5, 0), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)], [(1, 6), (2, 6), (3, 6), (4, 6), (4, 7), (3, 7), (2, 7), (1, 7)]],
'r': [[(0, 2), (1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (3, 6), (2, 6), (1, 6), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4), (0, 3)], [(3, 6), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7)]],
's': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3), (0, 3)], [(0, 5), (1, 5), (1, 6), (0, 6)], [(1, 4), (2, 4), (3, 4), (4, 4), (4, 5), (3, 5), (2, 5), (1, 5)], [(1, 6), (2, 6), (3, 6), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7), (2, 7), (1, 7)], [(4, 3), (5, 3), (5, 4), (4, 4)]],
't': [[(1, 6), (2, 6), (2, 5), (2, 4), (2, 3), (3, 3), (3, 4), (3, 5), (3, 6), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8), (2, 7), (1, 7)], [(3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3)]],
'u': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 3), (3, 3), (2, 3), (1, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6), (4, 5), (4, 4)]],
'v': [[(0, 5), (1, 5), (1, 6), (1, 7), (0, 7), (0, 6)], [(1, 3), (2, 3), (2, 4), (2, 5), (1, 5), (1, 4)], [(2, 2), (3, 2), (3, 3), (2, 3)], [(3, 3), (4, 3), (4, 4), (4, 5), (3, 5), (3, 4)], [(4, 5), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6)]],
'w': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 2), (2, 2), (2, 3), (1, 3)], [(2, 3), (3, 3), (3, 4), (3, 5), (3, 6), (2, 6), (2, 5), (2, 4)], [(3, 2), (4, 2), (4, 3), (3, 3)], [(4, 3), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6), (4, 5), (4, 4)]],
'x': [[(0, 2), (1, 2), (1, 3), (0, 3)], [(0, 6), (1, 6), (1, 7), (0, 7)], [(1, 3), (2, 3), (2, 4), (1, 4)], [(1, 5), (2, 5), (2, 6), (1, 6)], [(2, 4), (3, 4), (3, 5), (2, 5)], [(3, 3), (4, 3), (4, 4), (3, 4)], [(3, 5), (4, 5), (4, 6), (3, 6)], [(4, 2), (5, 2), (5, 3), (4, 3)], [(4, 6), (5, 6), (5, 7), (4, 7)]],
'y': [[(0, 3), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (0, 7), (0, 6), (0, 5), (0, 4)], [(1, 0), (2, 0), (3, 0), (4, 0), (4, 1), (3, 1), (2, 1), (1, 1)], [(1, 2), (2, 2), (3, 2), (4, 2), (4, 1), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (4, 7), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (2, 3), (1, 3)]],
'z': [[(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3), (2, 3), (2, 4), (1, 4), (1, 3), (0, 3)], [(0, 6), (1, 6), (2, 6), (3, 6), (3, 5), (4, 5), (4, 6), (5, 6), (5, 7), (4, 7), (3, 7), (2, 7), (1, 7), (0, 7)], [(2, 4), (3, 4), (3, 5), (2, 5)]],
'{': [[(1, 5), (2, 5), (2, 4), (2, 3), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (2, 8), (2, 7), (2, 6), (1, 6)], [(3, 2), (4, 2), (5, 2), (5, 3), (4, 3), (3, 3)], [(3, 8), (4, 8), (5, 8), (5, 9), (4, 9), (3, 9)]],
'|': [[(2, 2), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (2, 9), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4), (2, 3)]],
'}': [[(0, 2), (1, 2), (2, 2), (2, 3), (1, 3), (0, 3)], [(0, 8), (1, 8), (2, 8), (2, 9), (1, 9), (0, 9)], [(2, 3), (3, 3), (3, 4), (3, 5), (4, 5), (4, 6), (3, 6), (3, 7), (3, 8), (2, 8), (2, 7), (2, 6), (2, 5), (2, 4)]],
'~': [[(0, 6), (1, 6), (1, 7), (1, 8), (0, 8), (0, 7)], [(1, 8), (2, 8), (2, 9), (1, 9)], [(2, 7), (3, 7), (3, 8), (2, 8)], [(3, 6), (4, 6), (4, 7), (3, 7)], [(4, 7), (5, 7), (5, 8), (5, 9), (4, 9), (4, 8)]],
}
# fmt: on
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(
self, text, size, position=(0, 0), horizontal=True, angle=0, layer=0, datatype=0
):
self.polygons = []
posX = 0
posY = 0
text_multiplier = size / 9.0
if angle == 0:
ca = 1
sa = 0
else:
ca = numpy.cos(angle)
sa = numpy.sin(angle)
for jj in range(len(text)):
if text[jj] == "\n":
if horizontal:
posY -= 11
posX = 0
else:
posX += 8
posY = 0
elif text[jj] == "\t":
if horizontal:
posX = posX + 32 - (posX + 8) % 32
else:
posY = posY - 11 - (posY - 22) % 44
else:
if text[jj] in Text._font:
for p in Text._font[text[jj]]:
polygon = p[:]
for ii in range(len(polygon)):
xp = text_multiplier * (posX + polygon[ii][0])
yp = text_multiplier * (posY + polygon[ii][1])
polygon[ii] = (
position[0] + xp * ca - yp * sa,
position[1] + xp * sa + yp * ca,
)
self.polygons.append(numpy.array(polygon))
if horizontal:
posX += 8
else:
posY -= 11
self.layers = [layer] * len(self.polygons)
self.datatypes = [datatype] * len(self.polygons)
self.properties = {}
def __str__(self):
return ("Text ({} polygons, {} vertices, layers {}, datatypes {})").format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
class Path(PolygonSet):
"""
Series of geometric objects that form a path or a collection of
parallel paths.
Parameters
----------
width : number
The width of each path.
initial_point : array-like[2]
Starting position of the path.
number_of_paths : positive integer
Number of parallel paths to create simultaneously.
distance : number
Distance between the centers of adjacent paths.
Attributes
----------
x : number
Current position of the path in the x direction.
y : number
Current position of the path in the y direction.
w : number
*Half*-width of each path.
n : integer
Number of parallel paths.
direction : '+x', '-x', '+y', '-y' or number
Direction or angle (in *radians*) the path points to.
distance : number
Distance between the centers of adjacent paths.
length : number
Length of the central path axis. If only one path is created,
this is the real length of the path.
properties : {integer: string} dictionary
Properties for this path.
"""
__slots__ = (
"layers",
"datatypes",
"polygons",
"x",
"y",
"w",
"n",
"direction",
"distance",
"length",
"properties",
)
def __init__(self, width, initial_point=(0, 0), number_of_paths=1, distance=0):
self.x = initial_point[0]
self.y = initial_point[1]
self.w = width * 0.5
self.n = number_of_paths
self.direction = "+x"
self.distance = distance
self.length = 0.0
self.polygons = []
self.layers = []
self.datatypes = []
self.properties = {}
def __str__(self):
if self.n > 1:
return "Path (x{}, end at ({}, {}) towards {}, length {}, width {}, {} apart, {} polygons, {} vertices, layers {}, datatypes {})".format(
self.n,
self.x,
self.y,
self.direction,
self.length,
self.w * 2,
self.distance,
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
else:
return "Path (end at ({}, {}) towards {}, length {}, width {}, {} polygons, {} vertices, layers {}, datatypes {})".format(
self.x,
self.y,
self.direction,
self.length,
self.w * 2,
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
def translate(self, dx, dy):
"""
Translate this object.
Parameters
----------
dx : number
Distance to move in the x-direction.
dy : number
Distance to move in the y-direction.
Returns
-------
out : `Path`
This object.
"""
vec = numpy.array((dx, dy))
self.polygons = [points + vec for points in self.polygons]
self.x += dx
self.y += dy
return self
def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : `Path`
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle) * _mpone
c0 = numpy.array(center)
if isinstance(self.direction, basestring):
self.direction = _directions_dict[self.direction] * numpy.pi
self.direction += angle
cur = numpy.array((self.x, self.y)) - c0
self.x, self.y = cur * ca + cur[::-1] * sa + c0
self.polygons = [
(points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
for points in self.polygons
]
return self
def scale(self, scalex, scaley=None, center=(0, 0)):
"""
Scale this object.
Parameters
----------
scalex : number
Scaling factor along the first axis.
scaley : number or None
Scaling factor along the second axis. If None, same as
`scalex`.
center : array-like[2]
Center point for the scaling operation.
Returns
-------
out : `Path`
This object.
Notes
-----
The direction of the path is not modified by this method and
its width is scaled only by `scalex`.
"""
c0 = numpy.array(center)
s = scalex if scaley is None else numpy.array((scalex, scaley))
self.polygons = [(points - c0) * s + c0 for points in self.polygons]
self.x = (self.x - c0[0]) * scalex + c0[0]
self.y = (self.y - c0[1]) * (scalex if scaley is None else scaley) + c0[1]
self.w *= scalex
return self
def mirror(self, p1, p2=(0, 0)):
"""
Mirror the polygons over a line through points 1 and 2
Parameters
----------
p1 : array-like[2]
first point defining the reflection line
p2 : array-like[2]
second point defining the reflection line
Returns
-------
out : `Path`
This object.
"""
origin = numpy.array(p1)
vec = numpy.array(p2) - origin
vec_r = vec * (2 / numpy.inner(vec, vec))
self.polygons = [
numpy.outer(numpy.inner(points - origin, vec_r), vec) - points + 2 * origin
for points in self.polygons
]
dot = (self.x - origin[0]) * vec_r[0] + (self.y - origin[1]) * vec_r[1]
self.x = dot * vec[0] - self.x + 2 * origin[0]
self.y = dot * vec[1] - self.y + 2 * origin[1]
if isinstance(self.direction, basestring):
self.direction = _directions_dict[self.direction] * numpy.pi
self.direction = 2 * numpy.arctan2(vec[1], vec[0]) - self.direction
return self
def segment(
self,
length,
direction=None,
final_width=None,
final_distance=None,
axis_offset=0,
layer=0,
datatype=0,
):
"""
Add a straight section to the path.
Parameters
----------
length : number
Length of the section to add.
direction : '+x', '-x', '+y', '-y' or number
Direction or angle (in *radians*) of rotation of the
segment.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
axis_offset : number
If set, the paths will be offset from their direction by
this amount.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number
of paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between
0 and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
"""
if direction is None:
direction = self.direction
else:
self.direction = direction
if direction == "+x":
ca = 1
sa = 0
elif direction == "-x":
ca = -1
sa = 0
elif direction == "+y":
ca = 0
sa = 1
elif direction == "-y":
ca = 0
sa = -1
else:
ca = numpy.cos(direction)
sa = numpy.sin(direction)
old_x = self.x
old_y = self.y
self.x += length * ca + axis_offset * sa
self.y += length * sa - axis_offset * ca
old_w = self.w
old_distance = self.distance
if final_width is not None:
self.w = final_width * 0.5
if final_distance is not None:
self.distance = final_distance
if (self.w != 0) or (old_w != 0):
for ii in range(self.n):
d0 = ii * self.distance - (self.n - 1) * self.distance * 0.5
old_d0 = ii * old_distance - (self.n - 1) * old_distance * 0.5
self.polygons.append(
numpy.array(
[
(
old_x + (old_d0 - old_w) * sa,
old_y - (old_d0 - old_w) * ca,
),
(
old_x + (old_d0 + old_w) * sa,
old_y - (old_d0 + old_w) * ca,
),
(self.x + (d0 + self.w) * sa, self.y - (d0 + self.w) * ca),
(self.x + (d0 - self.w) * sa, self.y - (d0 - self.w) * ca),
]
)
)
if self.w == 0:
self.polygons[-1] = self.polygons[-1][:-1]
if old_w == 0:
self.polygons[-1] = self.polygons[-1][1:]
self.length += (length ** 2 + axis_offset ** 2) ** 0.5
if isinstance(layer, list):
self.layers.extend((layer * (self.n // len(layer) + 1))[: self.n])
else:
self.layers.extend(layer for _ in range(self.n))
if isinstance(datatype, list):
self.datatypes.extend(
(datatype * (self.n // len(datatype) + 1))[: self.n]
)
else:
self.datatypes.extend(datatype for _ in range(self.n))
return self
def arc(
self,
radius,
initial_angle,
final_angle,
tolerance=0.01,
number_of_points=None,
max_points=199,
final_width=None,
final_distance=None,
layer=0,
datatype=0,
):
"""
Add a curved section to the path.
Parameters
----------
radius : number
Central radius of the section.
initial_angle : number
Initial angle of the curve (in *radians*).
final_angle : number
Final angle of the curve (in *radians*).
tolerance : float
Approximate curvature resolution. The number of points is
automatically calculated.
number_of_points : integer or None
Manually define the number of vertices that form the object
(polygonal approximation). Overrides `tolerance`.
max_points : integer
If the number of points in the element is greater than
`max_points`, it will be fractured in smaller polygons with
at most `max_points` each. If `max_points` is zero no
fracture will occur.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
"""
cx = self.x - radius * numpy.cos(initial_angle)
cy = self.y - radius * numpy.sin(initial_angle)
self.x = cx + radius * numpy.cos(final_angle)
self.y = cy + radius * numpy.sin(final_angle)
if final_angle > initial_angle:
self.direction = final_angle + numpy.pi * 0.5
else:
self.direction = final_angle - numpy.pi * 0.5
old_w = self.w
old_distance = self.distance
if final_width is not None:
self.w = final_width * 0.5
if final_distance is not None:
self.distance = final_distance
if isinstance(number_of_points, float):
warnings.warn(
"[GDSPY] Use of a floating number as number_of_points "
"is deprecated in favor of tolerance.",
category=DeprecationWarning,
stacklevel=2,
)
tolerance = number_of_points
number_of_points = None
if number_of_points is None:
r = (
radius
+ max(old_distance, self.distance) * (self.n - 1) * 0.5
+ max(old_w, self.w)
)
number_of_points = max(
6,
2
+ 2
* int(
0.5
* abs(final_angle - initial_angle)
/ numpy.arccos(1 - tolerance / r)
+ 0.5
),
)
pieces = (
1
if max_points == 0
else int(numpy.ceil(number_of_points / float(max_points)))
)
number_of_points = number_of_points // pieces
widths = numpy.linspace(old_w, self.w, pieces + 1)
distances = numpy.linspace(old_distance, self.distance, pieces + 1)
angles = numpy.linspace(initial_angle, final_angle, pieces + 1)
if (self.w != 0) or (old_w != 0):
for jj in range(pieces):
for ii in range(self.n):
self.polygons.append(numpy.zeros((number_of_points, 2)))
r0 = (
radius
+ ii * distances[jj + 1]
- (self.n - 1) * distances[jj + 1] * 0.5
)
old_r0 = (
radius + ii * distances[jj] - (self.n - 1) * distances[jj] * 0.5
)
pts2 = number_of_points // 2
pts1 = number_of_points - pts2
ang = numpy.linspace(angles[jj], angles[jj + 1], pts1)
rad = numpy.linspace(old_r0 + widths[jj], r0 + widths[jj + 1], pts1)
self.polygons[-1][:pts1, 0] = numpy.cos(ang) * rad + cx
self.polygons[-1][:pts1, 1] = numpy.sin(ang) * rad + cy
if widths[jj + 1] == 0:
pts1 -= 1
pts2 += 1
if widths[jj] == 0:
self.polygons[-1][: pts1 - 1] = numpy.array(
self.polygons[-1][1:pts1]
)
pts1 -= 1
pts2 += 1
ang = numpy.linspace(angles[jj + 1], angles[jj], pts2)
rad = numpy.linspace(r0 - widths[jj + 1], old_r0 - widths[jj], pts2)
if rad[0] <= 0 or rad[-1] <= 0:
warnings.warn(
"[GDSPY] Path arc with width larger than radius "
"created: possible self-intersecting polygon.",
stacklevel=2,
)
self.polygons[-1][pts1:, 0] = numpy.cos(ang) * rad + cx
self.polygons[-1][pts1:, 1] = numpy.sin(ang) * rad + cy
self.length += abs((angles[jj + 1] - angles[jj]) * radius)
if isinstance(layer, list):
self.layers.extend((layer * (self.n // len(layer) + 1))[: self.n])
else:
self.layers.extend(layer for _ in range(self.n))
if isinstance(datatype, list):
self.datatypes.extend(
(datatype * (self.n // len(datatype) + 1))[: self.n]
)
else:
self.datatypes.extend(datatype for _ in range(self.n))
return self
def turn(
self,
radius,
angle,
tolerance=0.01,
number_of_points=None,
max_points=199,
final_width=None,
final_distance=None,
layer=0,
datatype=0,
):
"""
Add a curved section to the path.
Parameters
----------
radius : number
Central radius of the section.
angle : 'r', 'l', 'rr', 'll' or number
Angle (in *radians*) of rotation of the path. The values
'r' and 'l' represent 90-degree turns cw and ccw,
respectively; the values 'rr' and 'll' represent analogous
180-degree turns.
tolerance : float
Approximate curvature resolution. The number of points is
automatically calculated.
number_of_points : integer or None
Manually define the number of vertices that form the object
(polygonal approximation). Overrides `tolerance`.
max_points : integer
If the number of points in the element is greater than
`max_points`, it will be fractured in smaller polygons with
at most `max_points` each. If `max_points` is zero no
fracture will occur.
final_width : number
If set, the paths of this segment will have their widths
linearly changed from their current value to this one.
final_distance : number
If set, the distance between paths is linearly change from
its current value to this one along this segment.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
"""
exact = True
if angle == "r":
delta_i = _halfpi
delta_f = 0
elif angle == "rr":
delta_i = _halfpi
delta_f = -delta_i
elif angle == "l":
delta_i = -_halfpi
delta_f = 0
elif angle == "ll":
delta_i = -_halfpi
delta_f = -delta_i
elif angle < 0:
exact = False
delta_i = _halfpi
delta_f = delta_i + angle
else:
exact = False
delta_i = -_halfpi
delta_f = delta_i + angle
if self.direction == "+x":
self.direction = 0
elif self.direction == "-x":
self.direction = numpy.pi
elif self.direction == "+y":
self.direction = _halfpi
elif self.direction == "-y":
self.direction = -_halfpi
elif exact:
exact = False
self.arc(
radius,
self.direction + delta_i,
self.direction + delta_f,
tolerance,
number_of_points,
max_points,
final_width,
final_distance,
layer,
datatype,
)
if exact:
self.direction = _directions_list[int(round(self.direction / _halfpi)) % 4]
return self
def parametric(
self,
curve_function,
curve_derivative=None,
tolerance=0.01,
number_of_evaluations=5,
max_points=199,
final_width=None,
final_distance=None,
relative=True,
layer=0,
datatype=0,
):
"""
Add a parametric curve to the path.
`curve_function` will be evaluated uniformly in the interval
[0, 1] at least `number_of_points` times. More points will be
added to the curve at the midpoint between evaluations if that
points presents error larger than `tolerance`.
Parameters
----------
curve_function : callable
Function that defines the curve. Must be a function of one
argument (that varies from 0 to 1) that returns a 2-element
array with the coordinates of the curve.
curve_derivative : callable
If set, it should be the derivative of the curve function.
Must be a function of one argument (that varies from 0 to 1)
that returns a 2-element array. If None, the derivative
will be calculated numerically.
tolerance : number
Acceptable tolerance for the approximation of the curve
function by a finite number of evaluations.
number_of_evaluations : integer
Initial number of points where the curve function will be
evaluated. According to `tolerance`, more evaluations will
be performed.
max_points : integer
Elements will be fractured until each polygon has at most
`max_points`. If `max_points` is less than 4, no fracture
will occur.
final_width : number or function
If set to a number, the paths of this segment will have
their widths linearly changed from their current value to
this one. If set to a function, it must be a function of
one argument (that varies from 0 to 1) and returns the width
of the path.
final_distance : number or function
If set to a number, the distance between paths is linearly
change from its current value to this one. If set to a
function, it must be a function of one argument (that varies
from 0 to 1) and returns the width of the path.
relative : bool
If True, the return values of `curve_function` are used as
offsets from the current path position, i.e., to ensure a
continuous path, ``curve_function(0)`` must be (0, 0).
Otherwise, they are used as absolute coordinates.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The norm of the vector returned by `curve_derivative` is not
important. Only the direction is used.
The original GDSII specification supports only a maximum of 199
vertices per polygon.
Examples
--------
>>> def my_parametric_curve(t):
... return (2**t, t**2)
>>> def my_parametric_curve_derivative(t):
... return (0.69315 * 2**t, 2 * t)
>>> my_path.parametric(my_parametric_curve,
... my_parametric_curve_derivative)
"""
err = tolerance ** 2
points = list(numpy.linspace(0, 1, number_of_evaluations))
values = [numpy.array(curve_function(u)) for u in points]
delta = points[1]
i = 1
while i < len(points):
midpoint = 0.5 * (points[i] + points[i - 1])
midvalue = numpy.array(curve_function(midpoint))
test_err = (values[i] + values[i - 1]) / 2 - midvalue
if test_err[0] ** 2 + test_err[1] ** 2 > err:
delta = min(delta, points[i] - midpoint)
points.insert(i, midpoint)
values.insert(i, midvalue)
else:
i += 1
points = numpy.array(points)
values = numpy.array(values)
dvs = values[1:] - values[:-1]
self.length += ((dvs[:, 0] ** 2 + dvs[:, 1] ** 2) ** 0.5).sum()
delta *= 0.5
if curve_derivative is None:
derivs = numpy.vstack(
(
numpy.array(curve_function(delta)) - values[0],
[
numpy.array(curve_function(u + delta))
- numpy.array(curve_function(u - delta))
for u in points[1:-1]
],
values[-1] - numpy.array(curve_function(1 - delta)),
)
)
else:
derivs = numpy.array([curve_derivative(u) for u in points])
if not callable(final_width):
if final_width is None:
width = numpy.full_like(points, self.w)
else:
width = self.w + (final_width * 0.5 - self.w) * points
self.w = final_width * 0.5
else:
width = numpy.array([0.5 * final_width(u) for u in points])
self.w = width[-1]
if not callable(final_distance):
if final_distance is None:
dist = numpy.full_like(points, self.distance)
else:
dist = self.distance + (final_distance - self.distance) * points
self.distance = final_distance
else:
dist = numpy.array([final_distance(u) for u in points])
self.distance = dist[-1]
np = points.shape[0]
sh = (np, 1)
if relative:
x0 = values + numpy.array((self.x, self.y))
else:
x0 = values
dx = (
derivs[:, ::-1]
* _mpone
/ ((derivs[:, 0] ** 2 + derivs[:, 1] ** 2) ** 0.5).reshape(sh)
)
width = width.reshape(sh)
dist = dist.reshape(sh)
self.x = x0[-1, 0]
self.y = x0[-1, 1]
self.direction = numpy.arctan2(-dx[-1, 0], dx[-1, 1])
if max_points < 4:
max_points = np
else:
max_points = max_points // 2
i0 = 0
while i0 < np - 1:
i1 = min(i0 + max_points, np)
for ii in range(self.n):
p1 = x0[i0:i1] + dx[i0:i1] * (
dist[i0:i1] * (ii - (self.n - 1) * 0.5) + width[i0:i1]
)
p2 = (
x0[i0:i1]
+ dx[i0:i1]
* (dist[i0:i1] * (ii - (self.n - 1) * 0.5) - width[i0:i1])
)[::-1]
if width[i1 - 1, 0] == 0:
p2 = p2[1:]
if width[i0, 0] == 0:
p1 = p1[1:]
self.polygons.append(numpy.concatenate((p1, p2)))
if isinstance(layer, list):
self.layers.extend((layer * (self.n // len(layer) + 1))[: self.n])
else:
self.layers.extend(layer for _ in range(self.n))
if isinstance(datatype, list):
self.datatypes.extend(
(datatype * (self.n // len(datatype) + 1))[: self.n]
)
else:
self.datatypes.extend(datatype for _ in range(self.n))
i0 = i1 - 1
return self
def bezier(
self,
points,
tolerance=0.01,
number_of_evaluations=5,
max_points=199,
final_width=None,
final_distance=None,
relative=True,
layer=0,
datatype=0,
):
"""
Add a Bezier curve to the path.
A Bezier curve is added to the path starting from its current
position and finishing at the last point in the `points` array.
Parameters
----------
points : array-like[N][2]
Control points defining the Bezier curve.
tolerance : number
Acceptable tolerance for the approximation of the curve
function by a finite number of evaluations.
number_of_evaluations : integer
Initial number of points where the curve function will be
evaluated. According to `tolerance`, more evaluations will
be performed.
max_points : integer
Elements will be fractured until each polygon has at most
`max_points`. If `max_points` is zero no fracture will
occur.
final_width : number or function
If set to a number, the paths of this segment will have
their widths linearly changed from their current value to
this one. If set to a function, it must be a function of
one argument (that varies from 0 to 1) and returns the width
of the path.
final_distance : number or function
If set to a number, the distance between paths is linearly
change from its current value to this one. If set to a
function, it must be a function of one argument (that varies
from 0 to 1) and returns the width of the path.
relative : bool
If True, all coordinates in the `points` array are used as
offsets from the current path position, i.e., if the path is
at (1, -2) and the last point in the array is (10, 25), the
constructed Bezier will end at (1 + 10, -2 + 25) = (11, 23).
Otherwise, the points are used as absolute coordinates.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
"""
if relative:
pts = numpy.vstack(([(0, 0)], points))
else:
pts = numpy.vstack(([(self.x, self.y)], points))
dpts = (pts.shape[0] - 1) * (pts[1:] - pts[:-1])
self.parametric(
_func_bezier(pts),
_func_bezier(dpts),
tolerance,
number_of_evaluations,
max_points,
final_width,
final_distance,
relative,
layer,
datatype,
)
return self
def smooth(
self,
points,
angles=None,
curl_start=1,
curl_end=1,
t_in=1,
t_out=1,
cycle=False,
tolerance=0.01,
number_of_evaluations=5,
max_points=199,
final_widths=None,
final_distances=None,
relative=True,
layer=0,
datatype=0,
):
"""
Add a smooth interpolating curve through the given points.
Uses the Hobby algorithm [1]_ to calculate a smooth
interpolating curve made of cubic Bezier segments between each
pair of points.
Parameters
----------
points : array-like[N][2]
Vertices in the interpolating curve.
angles : array-like[N + 1] or None
Tangent angles at each point (in *radians*). Any angles
defined as None are automatically calculated.
curl_start : number
Ratio between the mock curvatures at the first point and at
its neighbor. A value of 1 renders the first segment a good
approximation for a circular arc. A value of 0 will better
approximate a straight segment. It has no effect for closed
curves or when an angle is defined for the first point.
curl_end : number
Ratio between the mock curvatures at the last point and at
its neighbor. It has no effect for closed curves or when an
angle is defined for the first point.
t_in : number or array-like[N + 1]
Tension parameter when arriving at each point. One value
per point or a single value used for all points.
t_out : number or array-like[N + 1]
Tension parameter when leaving each point. One value per
point or a single value used for all points.
cycle : bool
If True, calculates control points for a closed curve,
with an additional segment connecting the first and last
points.
tolerance : number
Acceptable tolerance for the approximation of the curve
function by a finite number of evaluations.
number_of_evaluations : integer
Initial number of points where the curve function will be
evaluated. According to `tolerance`, more evaluations will
be performed.
max_points : integer
Elements will be fractured until each polygon has at most
`max_points`. If `max_points` is zero no fracture will
occur.
final_widths : array-like[M]
Each element corresponds to the final width of a segment in
the whole curve. If an element is a number, the paths of
this segment will have their widths linearly changed to this
value. If a function, it must be a function of one argument
(that varies from 0 to 1) and returns the width of the path.
The length of the array must be equal to the number of
segments in the curve, i.e., M = N - 1 for an open curve and
M = N for a closed one.
final_distances : array-like[M]
Each element corresponds to the final distance between paths
of a segment in the whole curve. If an element is a number,
the distance between paths is linearly change to this value.
If a function, it must be a function of one argument (that
varies from 0 to 1) and returns the width of the path. The
length of the array must be equal to the number of segments
in the curve, i.e., M = N - 1 for an open curve and M = N
for a closed one.
relative : bool
If True, all coordinates in the `points` array are used as
offsets from the current path position, i.e., if the path is
at (1, -2) and the last point in the array is (10, 25), the
constructed curve will end at (1 + 10, -2 + 25) = (11, 23).
Otherwise, the points are used as absolute coordinates.
layer : integer, list
The GDSII layer numbers for the elements of each path. If
the number of layers in the list is less than the number of
paths, the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0
and 255). If the number of datatypes in the list is less
than the number of paths, the list is repeated.
Returns
-------
out : `Path`
This object.
Notes
-----
The original GDSII specification supports only a maximum of 199
vertices per polygon.
References
----------
.. [1] <NAME>. *Discrete Comput. Geom.* (1986) 1: 123.
`DOI: 10.1007/BF02187690
<https://doi.org/10.1007/BF02187690>`_
"""
if relative:
points = numpy.vstack(([(0.0, 0.0)], points)) + numpy.array(
(self.x, self.y)
)
else:
points = numpy.vstack(([(self.x, self.y)], points))
cta, ctb = _hobby(points, angles, curl_start, curl_end, t_in, t_out, cycle)
if final_widths is None:
final_widths = [None] * cta.shape[0]
if final_distances is None:
final_distances = [None] * cta.shape[0]
for i in range(points.shape[0] - 1):
self.bezier(
[cta[i], ctb[i], points[i + 1]],
tolerance,
number_of_evaluations,
max_points,
final_widths[i],
final_distances[i],
False,
layer,
datatype,
)
if cycle:
self.bezier(
[cta[-1], ctb[-1], points[0]],
tolerance,
number_of_evaluations,
max_points,
final_widths[-1],
final_distances[-1],
False,
layer,
datatype,
)
return self
_pmone = numpy.array((1.0, -1.0))
class L1Path(PolygonSet):
"""
Series of geometric objects that form a path or a collection of
parallel paths with Manhattan geometry.
.. deprecated:: 1.4
`L1Path` is deprecated in favor of FlexPath and will be removed
in a future version of Gdspy.
Parameters
----------
initial_point : array-like[2]
Starting position of the path.
direction : '+x', '+y', '-x', '-y'
Starting direction of the path.
width : number
The initial width of each path.
length : array-like
Lengths of each section to add.
turn : array-like
Direction to turn before each section. The sign indicate the
turn direction (ccw is positive), and the modulus is a
multiplicative factor for the path width after each turn. Must
have 1 element less then `length`.
number_of_paths : positive integer
Number of parallel paths to create simultaneously.
distance : number
Distance between the centers of adjacent paths.
max_points : integer
The paths will be fractured in polygons with at most
`max_points` (must be at least 6). If `max_points` is zero no
fracture will occur.
layer : integer, list
The GDSII layer numbers for the elements of each path. If the
number of layers in the list is less than the number of paths,
the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0 and
255). If the number of datatypes in the list is less than the
number of paths, the list is repeated.
Attributes
----------
x : number
Final position of the path in the x direction.
y : number
Final position of the path in the y direction.
direction : '+x', '-x', '+y', '-y' or number
Direction or angle (in *radians*) the path points to. The
numerical angle is returned only after a rotation of the object.
properties : {integer: string} dictionary
Properties for this path.
Examples
--------
>>> length = [10, 30, 15, 15, 15, 15, 10]
>>> turn = [1, -1, -1, 3, -1, 1]
>>> l1path = gdspy.L1Path((0, 0), '+x', 2, length, turn)
>>> myCell.add(l1path)
"""
__slots__ = "layers", "datatypes", "polygons", "direction", "x", "y", "properties"
def __init__(
self,
initial_point,
direction,
width,
length,
turn,
number_of_paths=1,
distance=0,
max_points=199,
layer=0,
datatype=0,
):
warnings.warn(
"[GDSPY] L1Path is deprecated favor of FlexPath and will be "
"removed in a future version of Gdspy.",
category=DeprecationWarning,
stacklevel=2,
)
if not isinstance(layer, list):
layer = [layer]
if not isinstance(datatype, list):
datatype = [datatype]
layer = (layer * (number_of_paths // len(layer) + 1))[:number_of_paths]
datatype = (datatype * (number_of_paths // len(datatype) + 1))[:number_of_paths]
w = width * 0.5
points = len(turn) + 1 if max_points == 0 else max_points // 2 - 1
paths = [[[], []] for ii in range(number_of_paths)]
self.polygons = []
self.layers = []
self.datatypes = []
self.properties = {}
self.x = initial_point[0]
self.y = initial_point[1]
if direction == "+x":
direction = 0
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append((initial_point[0], d0 + initial_point[1] - w))
paths[ii][1].append((initial_point[0], d0 + initial_point[1] + w))
elif direction == "+y":
direction = 1
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append((d0 + initial_point[0] + w, initial_point[1]))
paths[ii][1].append((d0 + initial_point[0] - w, initial_point[1]))
elif direction == "-x":
direction = 2
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append((initial_point[0], d0 + initial_point[1] + w))
paths[ii][1].append((initial_point[0], d0 + initial_point[1] - w))
elif direction == "-y":
direction = 3
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append((d0 + initial_point[0] - w, initial_point[1]))
paths[ii][1].append((d0 + initial_point[0] + w, initial_point[1]))
for jj in range(len(turn)):
points -= 1
if direction == 0:
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append(
(self.x + length[jj] - (d0 - w) * turn[jj], paths[ii][0][-1][1])
)
paths[ii][1].append(
(self.x + length[jj] - (d0 + w) * turn[jj], paths[ii][1][-1][1])
)
self.x += length[jj]
elif direction == 1:
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append(
(paths[ii][0][-1][0], self.y + length[jj] - (d0 - w) * turn[jj])
)
paths[ii][1].append(
(paths[ii][1][-1][0], self.y + length[jj] - (d0 + w) * turn[jj])
)
self.y += length[jj]
elif direction == 2:
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append(
(self.x - length[jj] - (d0 + w) * turn[jj], paths[ii][0][-1][1])
)
paths[ii][1].append(
(self.x - length[jj] - (d0 - w) * turn[jj], paths[ii][1][-1][1])
)
self.x -= length[jj]
elif direction == 3:
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append(
(paths[ii][0][-1][0], self.y - length[jj] - (d0 + w) * turn[jj])
)
paths[ii][1].append(
(paths[ii][1][-1][0], self.y - length[jj] - (d0 - w) * turn[jj])
)
self.y -= length[jj]
if points == 0:
for p in paths:
if direction % 2 == 0:
min_dist = 1e300
for x1 in [p[0][-2][0], p[1][-2][0]]:
for x2 in [p[0][-1][0], p[1][-1][0]]:
if abs(x1 - x2) < min_dist:
x0 = 0.5 * (x1 + x2)
min_dist = abs(x1 - x2)
p0 = (x0, p[0][-1][1])
p1 = (x0, p[1][-1][1])
else:
min_dist = 1e300
for y1 in [p[0][-2][1], p[1][-2][1]]:
for y2 in [p[0][-1][1], p[1][-1][1]]:
if abs(y1 - y2) < min_dist:
y0 = 0.5 * (y1 + y2)
min_dist = abs(y1 - y2)
p0 = (p[0][-1][0], y0)
p1 = (p[1][-1][0], y0)
self.polygons.append(
numpy.array(p[0][:-1] + [p0, p1] + p[1][-2::-1])
)
p[0] = [p0, p[0][-1]]
p[1] = [p1, p[1][-1]]
self.layers.extend(layer)
self.datatypes.extend(datatype)
points = max_points // 2 - 2
if turn[jj] > 0:
direction = (direction + 1) % 4
else:
direction = (direction - 1) % 4
if direction == 0:
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append((self.x + length[-1], paths[ii][0][-1][1]))
paths[ii][1].append((self.x + length[-1], paths[ii][1][-1][1]))
self.x += length[-1]
elif direction == 1:
for ii in range(number_of_paths):
d0 = ii * distance - (number_of_paths - 1) * distance * 0.5
paths[ii][0].append((paths[ii][0][-1][0], self.y + length[-1]))
paths[ii][1].append((paths[ii][1][-1][0], self.y + length[-1]))
self.y += length[-1]
elif direction == 2:
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append((self.x - length[-1], paths[ii][0][-1][1]))
paths[ii][1].append((self.x - length[-1], paths[ii][1][-1][1]))
self.x -= length[-1]
elif direction == 3:
for ii in range(number_of_paths):
d0 = (number_of_paths - 1) * distance * 0.5 - ii * distance
paths[ii][0].append((paths[ii][0][-1][0], self.y - length[-1]))
paths[ii][1].append((paths[ii][1][-1][0], self.y - length[-1]))
self.y -= length[jj]
self.direction = ["+x", "+y", "-x", "-y"][direction]
self.polygons.extend(numpy.array(p[0] + p[1][::-1]) for p in paths)
self.layers.extend(layer)
self.datatypes.extend(datatype)
def __str__(self):
return "L1Path (end at ({}, {}) towards {}, {} polygons, {} vertices, layers {}, datatypes {})".format(
self.x,
self.y,
self.direction,
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
def rotate(self, angle, center=(0, 0)):
"""
Rotate this object.
Parameters
----------
angle : number
The angle of rotation (in *radians*).
center : array-like[2]
Center point for the rotation.
Returns
-------
out : `L1Path`
This object.
"""
ca = numpy.cos(angle)
sa = numpy.sin(angle) * _mpone
c0 = numpy.array(center)
if isinstance(self.direction, basestring):
self.direction = _directions_dict[self.direction] * numpy.pi
self.direction += angle
cur = numpy.array((self.x, self.y)) - c0
self.x, self.y = cur * ca + cur[::-1] * sa + c0
self.polygons = [
(points - c0) * ca + (points - c0)[:, ::-1] * sa + c0
for points in self.polygons
]
return self
class PolyPath(PolygonSet):
"""
Series of geometric objects that form a polygonal path or a
collection of parallel polygonal paths.
.. deprecated:: 1.4
`PolyPath` is deprecated in favor of FlexPath and will be removed
in a future version of Gdspy.
Parameters
----------
points : array-like[N][2]
Points along the center of the path.
width : number or array-like[N]
Width of the path. If an array is given, width at each
endpoint.
number_of_paths : positive integer
Number of parallel paths to create simultaneously.
distance : number or array-like[N]
Distance between the centers of adjacent paths. If an array is
given, distance at each endpoint.
corners : 'miter' or 'bevel'
Type of joins.
ends : 'flush', 'round', 'extended'
Type of end caps for the paths.
max_points : integer
The paths will be fractured in polygons with at most
`max_points` (must be at least 4). If `max_points` is zero no
fracture will occur.
layer : integer, list
The GDSII layer numbers for the elements of each path. If the
number of layers in the list is less than the number of paths,
the list is repeated.
datatype : integer, list
The GDSII datatype for the elements of each path (between 0 and
255). If the number of datatypes in the list is less than the
number of paths, the list is repeated.
Notes
-----
The bevel join will give strange results if the number of paths is
greater than 1.
"""
__slots__ = "layers", "datatypes", "polygons", "properties"
def __init__(
self,
points,
width,
number_of_paths=1,
distance=0,
corners="miter",
ends="flush",
max_points=199,
layer=0,
datatype=0,
):
warnings.warn(
"[GDSPY] PolyPath is deprecated favor of FlexPath and will "
"be removed in a future version of Gdspy.",
category=DeprecationWarning,
stacklevel=2,
)
if not isinstance(layer, list):
layer = [layer]
if not isinstance(datatype, list):
datatype = [datatype]
if hasattr(width, "__iter__"):
width = numpy.array(width) * 0.5
else:
width = numpy.array([width * 0.5])
len_w = len(width)
if hasattr(distance, "__iter__"):
distance = numpy.array(distance)
else:
distance = numpy.array([distance])
len_d = len(distance)
points = numpy.array(points, dtype=float)
self.polygons = []
self.layers = []
self.datatypes = []
self.properties = {}
if points.shape[0] == 2 and number_of_paths == 1:
v = points[1] - points[0]
v = v / (v[0] ** 2 + v[1] ** 2) ** 0.5
w0 = width[0]
w1 = width[1 % len_w]
if ends == "round":
a = numpy.arctan2(v[1], v[0]) + _halfpi
self.polygons.append(
Round(
points[0],
w0,
initial_angle=a,
final_angle=a + numpy.pi,
number_of_points=33,
).polygons[0]
)
self.polygons.append(
Round(
points[1],
w1,
initial_angle=a - numpy.pi,
final_angle=a,
number_of_points=33,
).polygons[0]
)
self.layers.extend(layer[:1] * 2)
self.datatypes.extend(datatype[:1] * 2)
if ends == "extended":
points[0] = points[0] - v * w0
points[1] = points[1] + v * w1
u = numpy.array((-v[1], v[0]))
if w0 == 0:
self.polygons.append(
numpy.array((points[0], points[1] - u * w1, points[1] + u * w1))
)
elif w1 == 0:
self.polygons.append(
numpy.array((points[0] + u * w0, points[0] - u * w0, points[1]))
)
else:
self.polygons.append(
numpy.array(
(
points[0] + u * w0,
points[0] - u * w0,
points[1] - u * w1,
points[1] + u * w1,
)
)
)
self.layers.append(layer[0])
self.datatypes.append(datatype[0])
return
if corners not in ["miter", "bevel"]:
if corners in [0, 1]:
corners = ["miter", "bevel"][corners]
warnings.warn(
"[GDSPY] Argument corners must be one of 'miter' or 'bevel'.",
category=DeprecationWarning,
stacklevel=2,
)
else:
raise ValueError(
"[GDSPY] Argument corners must be one of 'miter' or 'bevel'."
)
bevel = corners == "bevel"
if ends not in ["flush", "round", "extended"]:
if ends in [0, 1, 2]:
ends = ["flush", "round", "extended"][ends]
warnings.warn(
"[GDSPY] Argument ends must be one of 'flush', "
"'round', or 'extended'.",
category=DeprecationWarning,
stacklevel=2,
)
else:
raise ValueError(
"[GDSPY] Argument ends must be one of 'flush', "
"'round', or 'extended'."
)
if ends == "extended":
v = points[0] - points[1]
v = v / (v[0] ** 2 + v[1] ** 2) ** 0.5
points[0] = points[0] + v * width[0]
v = points[-1] - points[-2]
v = v / (v[0] ** 2 + v[1] ** 2) ** 0.5
points[-1] = points[-1] + v * width[(points.shape[0] - 1) % len_w]
elif ends == "round":
v0 = points[1] - points[0]
angle0 = numpy.arctan2(v0[1], v0[0]) + _halfpi
v0 = numpy.array((-v0[1], v0[0])) / (v0[0] ** 2 + v0[1] ** 2) ** 0.5
d0 = 0.5 * (number_of_paths - 1) * distance[0]
v1 = points[-1] - points[-2]
angle1 = numpy.arctan2(v1[1], v1[0]) - _halfpi
v1 = numpy.array((-v1[1], v1[0])) / (v1[0] ** 2 + v1[1] ** 2) ** 0.5
j1w = (points.shape[0] - 1) % len_w
j1d = (points.shape[0] - 1) % len_d
d1 = 0.5 * (number_of_paths - 1) * distance[j1d]
self.polygons.extend(
(
Round(
points[0] + v0 * (ii * distance[0] - d0),
width[0],
initial_angle=angle0,
final_angle=angle0 + numpy.pi,
number_of_points=33,
).polygons[0]
for ii in range(number_of_paths)
)
)
self.polygons.extend(
(
Round(
points[-1] + v1 * (ii * distance[j1d] - d1),
width[j1w],
initial_angle=angle1,
final_angle=angle1 + numpy.pi,
number_of_points=33,
).polygons[0]
)
for ii in range(number_of_paths)
)
self.layers.extend(
((layer * (number_of_paths // len(layer) + 1))[:number_of_paths]) * 2
)
self.datatypes.extend(
((datatype * (number_of_paths // len(datatype) + 1))[:number_of_paths])
* 2
)
v = points[1] - points[0]
v = numpy.array((-v[1], v[0])) / (v[0] ** 2 + v[1] ** 2) ** 0.5
d0 = 0.5 * (number_of_paths - 1) * distance[0]
d1 = 0.5 * (number_of_paths - 1) * distance[1 % len_d]
paths = [
[
[points[0] + (ii * distance[0] - d0 - width[0]) * v],
[points[0] + (ii * distance[0] - d0 + width[0]) * v],
]
for ii in range(number_of_paths)
]
p1 = [
(
points[1] + (ii * distance[1 % len_d] - d1 - width[1 % len_w]) * v,
points[1] + (ii * distance[1 % len_d] - d1 + width[1 % len_w]) * v,
)
for ii in range(number_of_paths)
]
for jj in range(1, points.shape[0] - 1):
j0d = jj % len_d
j0w = jj % len_w
j1d = (jj + 1) % len_d
j1w = (jj + 1) % len_w
v = points[jj + 1] - points[jj]
v = numpy.array((-v[1], v[0])) / (v[0] ** 2 + v[1] ** 2) ** 0.5
d0 = d1
d1 = 0.5 * (number_of_paths - 1) * distance[j1d]
p0 = p1
p1 = []
pp = []
for ii in range(number_of_paths):
pp.append(
(
points[jj] + (ii * distance[j0d] - d0 - width[j0w]) * v,
points[jj] + (ii * distance[j0d] - d0 + width[j0w]) * v,
)
)
p1.append(
(
points[jj + 1] + (ii * distance[j1d] - d1 - width[j1w]) * v,
points[jj + 1] + (ii * distance[j1d] - d1 + width[j1w]) * v,
)
)
for kk in (0, 1):
p0m = paths[ii][kk][-1] - p0[ii][kk]
p1p = pp[ii][kk] - p1[ii][kk]
vec = p0m[0] * p1p[1] - p1p[0] * p0m[1]
if abs(vec) > 1e-30:
p = (
_pmone
* (
p0m * p1p[::-1] * p1[ii][kk]
- p1p * p0m[::-1] * p0[ii][kk]
+ p0m * p1p * (p0[ii][kk][::-1] - p1[ii][kk][::-1])
)
/ vec
)
l0 = (p - pp[ii][kk]) * p1p
l1 = (p - p0[ii][kk]) * p0m
if bevel and l0[0] + l0[1] > 0 and l1[0] + l1[1] < 0:
paths[ii][kk].append(p0[ii][kk])
paths[ii][kk].append(pp[ii][kk])
else:
paths[ii][kk].append(p)
if (
max_points > 0
and len(paths[ii][0]) + len(paths[ii][1]) + 3 > max_points
):
diff = paths[ii][0][0] - paths[ii][1][0]
if diff[0] ** 2 + diff[1] ** 2 == 0:
paths[ii][1] = paths[ii][1][1:]
diff = paths[ii][0][-1] - paths[ii][1][-1]
if diff[0] ** 2 + diff[1] ** 2 == 0:
self.polygons.append(
numpy.array(paths[ii][0] + paths[ii][1][-2::-1])
)
else:
self.polygons.append(
numpy.array(paths[ii][0] + paths[ii][1][::-1])
)
paths[ii][0] = paths[ii][0][-1:]
paths[ii][1] = paths[ii][1][-1:]
self.layers.append(layer[ii % len(layer)])
self.datatypes.append(datatype[ii % len(datatype)])
for ii in range(number_of_paths):
diff = paths[ii][0][0] - paths[ii][1][0]
if diff[0] ** 2 + diff[1] ** 2 == 0:
paths[ii][1] = paths[ii][1][1:]
diff = p1[ii][0] - p1[ii][1]
if diff[0] ** 2 + diff[1] ** 2 != 0:
paths[ii][0].append(p1[ii][0])
paths[ii][1].append(p1[ii][1])
self.polygons.extend(numpy.array(pol[0] + pol[1][::-1]) for pol in paths)
self.layers.extend(
(layer * (number_of_paths // len(layer) + 1))[:number_of_paths]
)
self.datatypes.extend(
(datatype * (number_of_paths // len(datatype) + 1))[:number_of_paths]
)
def __str__(self):
return "PolyPath ({} polygons, {} vertices, layers {}, datatypes {})".format(
len(self.polygons),
sum([len(p) for p in self.polygons]),
list(set(self.layers)),
list(set(self.datatypes)),
)
from gdspy.path import _func_bezier
|
bcs-ui/backend/tests/dashboard/workloads/test_container.py | masanqi/bk-bcs | 599 | 11136513 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import mock
import pytest
pytestmark = pytest.mark.django_db
namespace, pod_name, container_name = 'default', 'test_pod_name', 'echoserver'
class TestContainer:
""" 测试 Container 相关接口 """
def test_list(self, api_client, project_id, cluster_id, dashboard_container_api_patch):
""" 测试获取资源列表接口 """
response = api_client.get(
f'/api/dashboard/projects/{project_id}/clusters/{cluster_id}/'
+ f'namespaces/{namespace}/workloads/pods/{pod_name}/containers/'
)
assert response.json()['code'] == 0
ret = response.json()['data'][0]
assert set(ret.keys()) == {'container_id', 'image', 'name', 'status', 'message', 'reason'}
def test_retrieve(self, api_client, project_id, cluster_id, dashboard_container_api_patch):
""" 测试获取单个容器信息 """
response = api_client.get(
f'/api/dashboard/projects/{project_id}/clusters/{cluster_id}/'
+ f'namespaces/{namespace}/workloads/pods/{pod_name}/containers/{container_name}/'
)
assert response.json()['code'] == 0
assert set(response.json()['data'].keys()) == {
'host_name',
'host_ip',
'container_ip',
'container_id',
'container_name',
'image',
'network_mode',
'ports',
'command',
'volumes',
'labels',
'resources',
}
def test_fetch_env_info(self, api_client, project_id, cluster_id, dashboard_container_api_patch):
""" 测试获取单个容器环境变量配置信息 """
response = api_client.get(
f'/api/dashboard/projects/{project_id}/clusters/{cluster_id}/namespaces/{namespace}'
+ f'/workloads/pods/{pod_name}/containers/{container_name}/env_info/'
)
assert response.json()['code'] == 0
assert response.json()['data'] == [
{'name': 'env1', 'value': 'xxx'},
{'name': 'env2', 'value': 'xxx'},
{'name': 'env3', 'value': 'xxx'},
]
|
mmseg/models/decode_heads/dnl_head.py | weiyx16/mmsegmentation | 103 | 11136522 | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import NonLocal2d
from torch import nn
from ..builder import HEADS
from .fcn_head import FCNHead
class DisentangledNonLocal2d(NonLocal2d):
"""Disentangled Non-Local Blocks.
Args:
temperature (float): Temperature to adjust attention. Default: 0.05
"""
def __init__(self, *arg, temperature, **kwargs):
super().__init__(*arg, **kwargs)
self.temperature = temperature
self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1)
def embedded_gaussian(self, theta_x, phi_x):
"""Embedded gaussian with temperature."""
# NonLocal2d pairwise_weight: [N, HxW, HxW]
pairwise_weight = torch.matmul(theta_x, phi_x)
if self.use_scale:
# theta_x.shape[-1] is `self.inter_channels`
pairwise_weight /= theta_x.shape[-1]**0.5
pairwise_weight /= self.temperature
pairwise_weight = pairwise_weight.softmax(dim=-1)
return pairwise_weight
def forward(self, x):
# x: [N, C, H, W]
n = x.size(0)
# g_x: [N, HxW, C]
g_x = self.g(x).view(n, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
# theta_x: [N, HxW, C], phi_x: [N, C, HxW]
if self.mode == 'gaussian':
theta_x = x.view(n, self.in_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
if self.sub_sample:
phi_x = self.phi(x).view(n, self.in_channels, -1)
else:
phi_x = x.view(n, self.in_channels, -1)
elif self.mode == 'concatenation':
theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
else:
theta_x = self.theta(x).view(n, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(n, self.inter_channels, -1)
# subtract mean
theta_x -= theta_x.mean(dim=-2, keepdim=True)
phi_x -= phi_x.mean(dim=-1, keepdim=True)
pairwise_func = getattr(self, self.mode)
# pairwise_weight: [N, HxW, HxW]
pairwise_weight = pairwise_func(theta_x, phi_x)
# y: [N, HxW, C]
y = torch.matmul(pairwise_weight, g_x)
# y: [N, C, H, W]
y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
*x.size()[2:])
# unary_mask: [N, 1, HxW]
unary_mask = self.conv_mask(x)
unary_mask = unary_mask.view(n, 1, -1)
unary_mask = unary_mask.softmax(dim=-1)
# unary_x: [N, 1, C]
unary_x = torch.matmul(unary_mask, g_x)
# unary_x: [N, C, 1, 1]
unary_x = unary_x.permute(0, 2, 1).contiguous().reshape(
n, self.inter_channels, 1, 1)
output = x + self.conv_out(y + unary_x)
return output
@HEADS.register_module()
class DNLHead(FCNHead):
"""Disentangled Non-Local Neural Networks.
This head is the implementation of `DNLNet
<https://arxiv.org/abs/2006.06668>`_.
Args:
reduction (int): Reduction factor of projection transform. Default: 2.
use_scale (bool): Whether to scale pairwise_weight by
sqrt(1/inter_channels). Default: False.
mode (str): The nonlocal mode. Options are 'embedded_gaussian',
'dot_product'. Default: 'embedded_gaussian.'.
temperature (float): Temperature to adjust attention. Default: 0.05
"""
def __init__(self,
reduction=2,
use_scale=True,
mode='embedded_gaussian',
temperature=0.05,
**kwargs):
super(DNLHead, self).__init__(num_convs=2, **kwargs)
self.reduction = reduction
self.use_scale = use_scale
self.mode = mode
self.temperature = temperature
self.dnl_block = DisentangledNonLocal2d(
in_channels=self.channels,
reduction=self.reduction,
use_scale=self.use_scale,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
mode=self.mode,
temperature=self.temperature)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
output = self.convs[0](x)
output = self.dnl_block(output)
output = self.convs[1](output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output
|
Configuration/Geometry/python/GeometryExtendedGFlashReco_cff.py | ckamtsikis/cmssw | 852 | 11136530 | import FWCore.ParameterSet.Config as cms
# Ideal geometry, needed for transient ECAL alignement
from Configuration.Geometry.GeometryExtendedGFlash_cff import *
from Configuration.Geometry.GeometryReco_cff import *
|
cogs/admin.py | igotadell/pollmaster | 101 | 11136538 | <reponame>igotadell/pollmaster
import logging
from discord.ext import commands
class Admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
# every commands needs owner permissions
async def cog_check(self, ctx):
return self.bot.owner == ctx.author
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CheckFailure):
await ctx.send("Only the owner can use this module. Join the support discord server if you are having "
"any problems. This usage has been logged.")
logger.warning(f'User {ctx.author} ({ctx.author.id}) has tried to access a restricted '
f'command via {ctx.message.content}.')
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Missing a required argument for this command.")
else:
logger.warning(error)
@commands.command(aliases=['r'])
async def reload(self, ctx, *, cog):
if cog == 'c':
cog = 'poll_controls'
logger.info(f'Trying to reload cog: cogs.{cog}.')
reply = ''
try:
self.bot.reload_extension('cogs.'+cog)
reply = f'Extension "cogs.{cog}" successfully reloaded.'
except commands.ExtensionNotFound:
reply = f'Extension "cogs.{cog}" not found.'
except commands.NoEntryPointError:
reply = f'Extension "cogs.{cog}" is missing a setup function.'
except commands.ExtensionFailed:
reply = f'Extension "cogs.{cog}" failed to start.'
except commands.ExtensionNotLoaded:
reply = f'Extension "cogs.{cog}" is not loaded... trying to load it. '
try:
self.bot.load_extension('cogs.'+cog)
except commands.ExtensionAlreadyLoaded:
reply += f'Could not load or reload extension since it is already loaded...'
except commands.ExtensionNotFound:
reply += f'Extension "cogs.{cog}" not found.'
except commands.ExtensionFailed:
reply = f'Extension "cogs.{cog}" failed to start.'
finally:
logger.info(reply)
await ctx.send(reply)
def setup(bot):
global logger
logger = logging.getLogger('discord')
bot.add_cog(Admin(bot)) |
pgoapi/protos/pogoprotos/data/logs/raid_rewards_log_entry_pb2.py | aroo135/pgoapi | 842 | 11136548 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/logs/raid_rewards_log_entry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_data_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__data__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/logs/raid_rewards_log_entry.proto',
package='pogoprotos.data.logs',
syntax='proto3',
serialized_pb=_b('\n1pogoprotos/data/logs/raid_rewards_log_entry.proto\x12\x14pogoprotos.data.logs\x1a)pogoprotos/inventory/item/item_data.proto\"\x93\x02\n\x13RaidRewardsLogEntry\x12@\n\x06result\x18\x01 \x01(\x0e\x32\x30.pogoprotos.data.logs.RaidRewardsLogEntry.Result\x12\x14\n\x0cis_exclusive\x18\x02 \x01(\x08\x12\x32\n\x05items\x18\x03 \x03(\x0b\x32#.pogoprotos.inventory.item.ItemData\x12<\n\x0f\x64\x65\x66\x61ult_rewards\x18\x04 \x03(\x0b\x32#.pogoprotos.inventory.item.ItemData\x12\x10\n\x08stardust\x18\x05 \x01(\x05\" \n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x62\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__data__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RAIDREWARDSLOGENTRY_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='pogoprotos.data.logs.RaidRewardsLogEntry.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=362,
serialized_end=394,
)
_sym_db.RegisterEnumDescriptor(_RAIDREWARDSLOGENTRY_RESULT)
_RAIDREWARDSLOGENTRY = _descriptor.Descriptor(
name='RaidRewardsLogEntry',
full_name='pogoprotos.data.logs.RaidRewardsLogEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='pogoprotos.data.logs.RaidRewardsLogEntry.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_exclusive', full_name='pogoprotos.data.logs.RaidRewardsLogEntry.is_exclusive', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='items', full_name='pogoprotos.data.logs.RaidRewardsLogEntry.items', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_rewards', full_name='pogoprotos.data.logs.RaidRewardsLogEntry.default_rewards', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stardust', full_name='pogoprotos.data.logs.RaidRewardsLogEntry.stardust', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RAIDREWARDSLOGENTRY_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=394,
)
_RAIDREWARDSLOGENTRY.fields_by_name['result'].enum_type = _RAIDREWARDSLOGENTRY_RESULT
_RAIDREWARDSLOGENTRY.fields_by_name['items'].message_type = pogoprotos_dot_inventory_dot_item_dot_item__data__pb2._ITEMDATA
_RAIDREWARDSLOGENTRY.fields_by_name['default_rewards'].message_type = pogoprotos_dot_inventory_dot_item_dot_item__data__pb2._ITEMDATA
_RAIDREWARDSLOGENTRY_RESULT.containing_type = _RAIDREWARDSLOGENTRY
DESCRIPTOR.message_types_by_name['RaidRewardsLogEntry'] = _RAIDREWARDSLOGENTRY
RaidRewardsLogEntry = _reflection.GeneratedProtocolMessageType('RaidRewardsLogEntry', (_message.Message,), dict(
DESCRIPTOR = _RAIDREWARDSLOGENTRY,
__module__ = 'pogoprotos.data.logs.raid_rewards_log_entry_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.logs.RaidRewardsLogEntry)
))
_sym_db.RegisterMessage(RaidRewardsLogEntry)
# @@protoc_insertion_point(module_scope)
|
plugin.video.fanfilm/resources/lib/libraries/unwise.py | mrknow/filmkodi | 105 | 11136564 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
def execute(str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec (ss)
page_value=__unwise(w,i,s,e)
except: return
return page_value
def __unwise(w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)
I1lI = ''.join(l1lI)
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return execute(ret)
else:
return ret
|
OpenMatch/modules/matchers/__init__.py | vishalbelsare/OpenMatch | 403 | 11136568 | <reponame>vishalbelsare/OpenMatch<gh_stars>100-1000
from OpenMatch.modules.matchers.kernel_matcher import KernelMatcher
|
utils/metrics.py | niqbal996/ViewAL | 126 | 11136572 | import numpy as np
# https://stats.stackexchange.com/questions/179835/how-to-build-a-confusion-matrix-for-a-multiclass-classifier
def calculate_miou(confusion_matrix):
MIoU = np.divide(np.diag(confusion_matrix), (
np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) -
np.diag(confusion_matrix)))
MIoU = np.nanmean(MIoU)
return MIoU
class Evaluator(object):
def __init__(self, num_class):
np.seterr(divide='ignore', invalid='ignore')
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,) * 2)
def Pixel_Accuracy(self):
return np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
def Pixel_Accuracy_Class(self):
Acc = np.divide(np.diag(self.confusion_matrix), self.confusion_matrix.sum(axis=1))
Acc = np.nanmean(Acc)
return Acc
def Mean_Intersection_over_Union(self):
MIoU = np.divide(np.diag(self.confusion_matrix), (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix)))
MIoU = np.nanmean(MIoU)
return MIoU
def Mean_Intersection_over_Union_20(self):
MIoU = 0
if self.num_class > 20:
subset_20 = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 23, 27, 32, 33, 35, 38])
confusion_matrix = self.confusion_matrix[subset_20[:, None], subset_20]
MIoU = np.divide(np.diag(confusion_matrix), (
np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) -
np.diag(confusion_matrix)))
MIoU = np.nanmean(MIoU)
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.divide(np.diag(self.confusion_matrix), (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix)))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]
count = np.bincount(label, minlength=self.num_class**2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image, return_miou=False):
assert gt_image.shape == pre_image.shape
confusion_matrix = self._generate_matrix(gt_image, pre_image)
self.confusion_matrix += confusion_matrix
if return_miou:
return calculate_miou(confusion_matrix)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
def dump_matrix(self, path):
np.save(path, self.confusion_matrix)
|
PythonAPI/carissma_project/lib/python3.5/site-packages/mpl_toolkits/axes_grid/axes_grid.py | AbdulHoffmann/carla_carissma | 445 | 11136619 | <reponame>AbdulHoffmann/carla_carissma<gh_stars>100-1000
from mpl_toolkits.axisartist.axes_divider import LocatableAxes
from mpl_toolkits.axisartist.axes_grid import (
AxesGrid, CbarAxes, Grid, ImageGrid)
|
apps/modules/theme_setting/process/page.py | Bension/osroom | 579 | 11136625 | #!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : <NAME>
import os
import shutil
from flask import request
import regex as re
from flask_babel import gettext
from apps.app import mdbs
from apps.configs.sys_config import THEME_TEMPLATE_FOLDER
from apps.core.flask.reqparse import arg_verify
from apps.core.utils.get_config import get_config
def add_page():
routing = request.argget.all('routing')
content = request.argget.all('content', "")
ctype = request.argget.all('type', 'html')
theme_name = request.argget.all("theme_name")
s, r = arg_verify([(gettext("theme name"), theme_name)], required=True)
if not s:
return r
s, r = arg_verify(reqargs=[(gettext("file type"), ctype)], only=["html"],
required=True)
if not s:
return r
if ctype == "html":
dirname = "pages"
else:
dirname = "static"
regex_filter = r"(osr/|osr-admin/)"
s, r = arg_verify(reqargs=[(gettext("routing"), routing)], required=True)
if not s:
data = r
elif re.search(regex_filter, routing):
data = {"msg": gettext("This route can not be used"), "msg_type": "w",
"custom_status": 403}
else:
filename = os.path.split(routing)[-1]
path = "{}/{}/{}/{}".format(
THEME_TEMPLATE_FOLDER,
theme_name,
dirname,
os.path.split(routing)[0]).replace(
"//",
"/")
# 是否存在同名的目录
relative_path = "{}/{}".format(path, filename)
if os.path.exists(relative_path):
data = {
"msg": gettext("This route can not be used"),
"msg_type": "w",
"custom_status": 403}
return data
# 是否存在同名的html文件
file = "{}/{}.{}".format(path, filename, ctype)
if os.path.exists(file):
data = {"msg": gettext("Routing existing"), "msg_type": "w",
"custom_status": 403}
return data
if not os.path.exists(path):
os.makedirs(path)
with open(file, "w") as wf:
wf.write(content)
# 记录
mdbs["sys"].db.theme.update_one({"theme_name": theme_name},
{"$addToSet": {"custom_pages": "{}.{}".format(filename, ctype)}},
upsert=True)
data = {"msg": gettext("Added successfully"), "msg_type": "s",
"custom_status": 201, "url": "/{}".format(routing.strip("/"))}
return data
def delete_page():
"""
删除再管理端自定义页面
:return:
"""
filename = request.argget.all('filename', "index").strip("/")
file_path = request.argget.all('file_path', "").strip("/")
theme_name = request.argget.all("theme_name")
s, r = arg_verify([(gettext("theme name"), theme_name)], required=True)
if not s:
return r
path = os.path.join(
THEME_TEMPLATE_FOLDER,
theme_name
)
file_path = "{}/{}".format(path, file_path)
file = os.path.join(file_path, filename)
if not os.path.exists(file):
mdbs["sys"].db.theme.update_one(
{"theme_name": theme_name},
{"$pull": {"custom_pages": filename}}
)
data = {"msg": gettext("File not found,'{}'").format(file),
"msg_type": "w", "custom_status": 404}
else:
custom = mdbs["sys"].db.theme.find_one(
{
"theme_name": theme_name,
"custom_pages": filename
})
if custom:
os.remove(file)
mdbs["sys"].db.theme.update_one(
{
"theme_name": theme_name
},
{"$pull": {"custom_pages": filename}})
if not os.listdir(file_path):
shutil.rmtree(file_path)
data = {"msg": gettext("Successfully deleted"), "msg_type": "s",
"custom_status": 204}
else:
data = {
"msg": gettext("This file can not be deleted"),
"msg_type": "w",
"custom_status": 403}
return data
|
pyriemann/__init__.py | qbarthelemy/pyRiemann | 301 | 11136632 | from ._version import __version__
from . import classification
from . import tangentspace
from . import channelselection
from . import estimation
from . import spatialfilters
from . import clustering
from . import stats
from . import embedding
from . import preprocessing
__all__ = [
'__version__',
'classification',
'tangentspace',
'channelselection',
'estimation',
'spatialfilters',
'clustering',
'stats',
'embedding',
'preprocessing',
]
|
classifier.py | lloyd31/GANDCTAnalysis | 108 | 11136639 | import argparse
import datetime as dt
import os
import numpy as np
import tensorflow as tf
from src.dataset import deserialize_data
from src.models import (build_multinomial_regression,
build_multinomial_regression_l1,
build_multinomial_regression_l1_l2,
build_multinomial_regression_l2, build_resnet,
build_simple_cnn, build_simple_nn)
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
# Upsampling / FFHQ
# TRAIN_SIZE = 20_000
# VAL_SIZE = 2_000
# TEST_SIZE = 10_000
# complete size
TRAIN_SIZE = 500_000
VAL_SIZE = 50_000
TEST_SIZE = 150_000
CLASSES = 5
CHANNEL_DIM = 3
INPUT_SHAPE = [128, 128, CHANNEL_DIM]
# Fix for consistent results
tf.random.set_seed(1)
def load_tfrecord(path, train=True, unbounded=True):
"""Load tfrecords."""
raw_image_dataset = tf.data.TFRecordDataset(path)
dataset = raw_image_dataset.map(lambda x: deserialize_data(
x, shape=INPUT_SHAPE), num_parallel_calls=AUTOTUNE)
if train:
dataset = dataset.take(TRAIN_SIZE)
dataset = dataset.batch(BATCH_SIZE)
if unbounded:
dataset = dataset.repeat()
return dataset.prefetch(AUTOTUNE)
def build_model(args):
input_shape = INPUT_SHAPE
mirrored_strategy = tf.distribute.MirroredStrategy()
learning_rate = 0.001
# select model
with mirrored_strategy.scope():
if args.MODEL == "resnet":
model = build_resnet(input_shape, CLASSES)
elif args.MODEL == "nn":
model = build_simple_nn(input_shape, CLASSES, l2=args.l2)
elif args.MODEL == "cnn":
model = build_simple_cnn(input_shape, CLASSES)
elif args.MODEL == "log":
model = build_multinomial_regression(
input_shape, CLASSES)
elif args.MODEL == "log1":
model = build_multinomial_regression_l1(
input_shape, CLASSES, l_1=args.l1)
elif args.MODEL == "log2":
model = build_multinomial_regression_l2(
input_shape, CLASSES, l_2=args.l2)
elif args.MODEL == "log3":
model = build_multinomial_regression_l1_l2(
input_shape, CLASSES, l_1=args.l1, l_2=args.l2)
else:
raise NotImplementedError(
"Error model you selected not available!")
if CLASSES == 1:
loss = tf.keras.losses.binary_crossentropy
else:
loss = tf.keras.losses.sparse_categorical_crossentropy
metrics = ["acc"]
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate, ),
loss=loss,
metrics=metrics)
model_name = f"{args.MODEL}_{dt.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}_batch_{args.batch_size}_learning_rate_{learning_rate}"
return model, model_name
def train(args):
train_dataset = load_tfrecord(args.TRAIN_DATASET)
val_dataset = load_tfrecord(args.VAL_DATASET)
model, model_name = build_model(args)
log_path = f"./log/{model_name}"
ckpt_dir = f"./ckpt/{model_name}/"
model_dir = f"./final_models/{model_name}/"
os.makedirs(ckpt_dir)
os.makedirs(model_dir)
update_freq = 50
if args.debug:
callbacks = None
else:
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=log_path,
update_freq=update_freq,
),
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=args.early_stopping,
restore_best_weights=True,
),
]
model.summary()
model.fit(train_dataset, epochs=args.epochs, steps_per_epoch=TRAIN_SIZE // BATCH_SIZE,
validation_data=val_dataset,
validation_steps=VAL_SIZE // BATCH_SIZE,
callbacks=callbacks)
_, eval_accuracy = model.evaluate(
val_dataset, steps=VAL_SIZE // BATCH_SIZE, verbose=0)
return model, eval_accuracy, model_dir
def train_and_save_model(args):
model, eval_accuracy, model_dir = train(args)
print(
f"Saving model with accuracy - {eval_accuracy:.2%} - to {model_dir}")
model.save(model_dir, save_format="tf")
def test(args):
test_dataset = load_tfrecord(args.TEST_DATASET, train=False)
# load model
model = tf.keras.models.load_model(args.MODEL)
model.summary()
model.evaluate(test_dataset, steps=TEST_SIZE // BATCH_SIZE)
def main(args):
args.grayscale = True
if args.mode == "train":
train_and_save_model(args)
elif args.mode == "test":
test(args)
else:
raise NotImplementedError("Specified non valid mode!")
def parse_args():
global BATCH_SIZE, INPUT_SHAPE, CLASSES, CHANNEL_DIM
parser = argparse.ArgumentParser()
parser.add_argument(
"--size", "-s", help="Images to load.", type=int, default=None)
commands = parser.add_subparsers(help="Mode {train|test}.", dest="mode")
train = commands.add_parser("train")
epochs = 50
train.add_argument(
"MODEL", help="Select model to train {resnet, cnn, nn, log, log1, log2, log3}.", type=str)
train.add_argument("TRAIN_DATASET", help="Dataset to load.", type=str)
train.add_argument("VAL_DATASET", help="Dataset to load.", type=str)
train.add_argument("--debug", "-d", help="Debug mode.",
action="store_true")
train.add_argument(
"--epochs", "-e", help=f"Epochs to train for; Default: {epochs}.", type=int, default=epochs)
train.add_argument("--image_size",
help=f"Image size. Default: {INPUT_SHAPE}", type=int, default=128)
train.add_argument("--early_stopping",
help=f"Early stopping criteria. Default: 5", type=int, default=5)
train.add_argument("--classes",
help=f"Classes. Default: {CLASSES}", type=int, default=CLASSES)
train.add_argument("--grayscale", "-g",
help=f"Train on grayscaled images.", action="store_true")
train.add_argument("--batch_size", "-b",
help=f"Batch size. Default: {BATCH_SIZE}", type=int, default=BATCH_SIZE)
train.add_argument("--l1",
help=f"L1 reguralizer intensity. Default: 0.01", type=float, default=0.01)
train.add_argument("--l2",
help=f"L2 reguralizer intensity. Default: 0.01", type=float, default=0.01)
test = commands.add_parser("test")
test.add_argument("MODEL", help="Path to model.", type=str)
test.add_argument("TEST_DATASET", help="Dataset to load.", type=str)
test.add_argument("--image_size",
help=f"Image size. Default: {INPUT_SHAPE}", type=int, default=128)
test.add_argument("--grayscale", "-g",
help=f"Test on grayscaled images.", action="store_true")
test.add_argument("--batch_size", "-b",
help=f"Batch size. Default: {BATCH_SIZE}", type=int, default=BATCH_SIZE)
args = parser.parse_args()
BATCH_SIZE = args.batch_size
if args.grayscale:
CHANNEL_DIM = 1
INPUT_SHAPE = [args.image_size, args.image_size, CHANNEL_DIM]
if "classes" in args:
CLASSES = args.classes
return args
if __name__ == "__main__":
main(parse_args())
|
pypower/t/t_modcost.py | Bengt/PYPOWER | 221 | 11136662 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Tests for code in C{modcost}.
"""
from numpy import array
from pypower.totcost import totcost
from pypower.modcost import modcost
from pypower.t.t_begin import t_begin
from pypower.t.t_is import t_is
from pypower.t.t_end import t_end
def t_modcost(quiet=False):
"""Tests for code in C{modcost}.
@author: <NAME> (PSERC Cornell)
"""
n_tests = 80
t_begin(n_tests, quiet)
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
gencost0 = array([
[2, 0, 0, 3, 0.01, 0.1, 1, 0, 0, 0, 0, 0],
[2, 0, 0, 5, 0.0006, 0.005, 0.04, 0.3, 2, 0, 0, 0],
[1, 0, 0, 4, 0, 0, 10, 200, 20, 600, 30, 1200],
[1, 0, 0, 4, -30, -2400, -20, -1800, -10, -1000, 0, 0]
])
gencost = modcost(gencost0, 5, 'SCALE_F')
##----- POLYSHIFT -----
t = 'modcost SCALE_F - quadratic'
t_is(totcost(gencost, array([0, 0, 0, 0])) / 5, [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([1, 0, 0, 0])) / 5, [1.11, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([2, 0, 0, 0])) / 5, [1.24, 2, 0, 0], 8, t)
t = 'modcost SCALE_F - 4th order polynomial'
t_is(totcost(gencost, array([0, 0, 0, 0])) / 5, [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 1, 0, 0])) / 5, [1, 2.3456, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 2, 0, 0])) / 5, [1, 2.8096, 0, 0], 8, t)
t = 'modcost SCALE_F - pwl (gen)'
t_is(totcost(gencost, array([0, 0, 5, 0 ])) / 5, [1, 2, 100, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 10, 0])) / 5, [1, 2, 200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 15, 0])) / 5, [1, 2, 400, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 20, 0])) / 5, [1, 2, 600, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 25, 0])) / 5, [1, 2, 900, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 30, 0])) / 5, [1, 2, 1200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 35, 0])) / 5, [1, 2, 1500, 0], 8, t)
t = 'modcost SCALE_F - pwl (load)'
t_is(totcost(gencost, array([0, 0, 0, -5 ])) / 5, [1, 2, 0, -500], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -10])) / 5, [1, 2, 0, -1000], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -15])) / 5, [1, 2, 0, -1400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -20])) / 5, [1, 2, 0, -1800], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -25])) / 5, [1, 2, 0, -2100], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -30])) / 5, [1, 2, 0, -2400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -35])) / 5, [1, 2, 0, -2700], 8, t)
gencost = modcost(gencost0, 2, 'SCALE_X')
t = 'modcost SCALE_X - quadratic'
t_is(totcost(gencost, array([0, 0, 0, 0]) * 2), [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([1, 0, 0, 0]) * 2), [1.11, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([2, 0, 0, 0]) * 2), [1.24, 2, 0, 0], 8, t)
t = 'modcost SCALE_X - 4th order polynomial'
t_is(totcost(gencost, array([0, 0, 0, 0]) * 2), [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 1, 0, 0]) * 2), [1, 2.3456, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 2, 0, 0]) * 2), [1, 2.8096, 0, 0], 8, t)
t = 'modcost SCALE_X - pwl (gen)'
t_is(totcost(gencost, array([0, 0, 5, 0 ]) * 2), [1, 2, 100, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 10, 0]) * 2), [1, 2, 200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 15, 0]) * 2), [1, 2, 400, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 20, 0]) * 2), [1, 2, 600, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 25, 0]) * 2), [1, 2, 900, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 30, 0]) * 2), [1, 2, 1200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 35, 0]) * 2), [1, 2, 1500, 0], 8, t)
t = 'modcost SCALE_X - pwl (load)'
t_is(totcost(gencost, array([0, 0, 0, -5 ]) * 2), [1, 2, 0, -500], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -10]) * 2), [1, 2, 0, -1000], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -15]) * 2), [1, 2, 0, -1400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -20]) * 2), [1, 2, 0, -1800], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -25]) * 2), [1, 2, 0, -2100], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -30]) * 2), [1, 2, 0, -2400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -35]) * 2), [1, 2, 0, -2700], 8, t)
gencost = modcost(gencost0, 3, 'SHIFT_F')
t = 'modcost SHIFT_F - quadratic'
t_is(totcost(gencost, array([0, 0, 0, 0])) - 3, [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([1, 0, 0, 0])) - 3, [1.11, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([2, 0, 0, 0])) - 3, [1.24, 2, 0, 0], 8, t)
t = 'modcost SHIFT_F - 4th order polynomial'
t_is(totcost(gencost, array([0, 0, 0, 0])) - 3, [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 1, 0, 0])) - 3, [1, 2.3456, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 2, 0, 0])) - 3, [1, 2.8096, 0, 0], 8, t)
t = 'modcost SHIFT_F - pwl (gen)'
t_is(totcost(gencost, array([0, 0, 5, 0 ])) - 3, [1, 2, 100, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 10, 0])) - 3, [1, 2, 200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 15, 0])) - 3, [1, 2, 400, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 20, 0])) - 3, [1, 2, 600, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 25, 0])) - 3, [1, 2, 900, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 30, 0])) - 3, [1, 2, 1200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 35, 0])) - 3, [1, 2, 1500, 0], 8, t)
t = 'modcost SHIFT_F - pwl (load)'
t_is(totcost(gencost, array([0, 0, 0, -5 ])) - 3, [1, 2, 0, -500], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -10])) - 3, [1, 2, 0, -1000], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -15])) - 3, [1, 2, 0, -1400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -20])) - 3, [1, 2, 0, -1800], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -25])) - 3, [1, 2, 0, -2100], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -30])) - 3, [1, 2, 0, -2400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -35])) - 3, [1, 2, 0, -2700], 8, t)
gencost = modcost(gencost0, -4, 'SHIFT_X')
t = 'modcost SHIFT_X - quadratic'
t_is(totcost(gencost, array([0, 0, 0, 0]) - 4), [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([1, 0, 0, 0]) - 4), [1.11, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([2, 0, 0, 0]) - 4), [1.24, 2, 0, 0], 8, t)
t = 'modcost SHIFT_X - 4th order polynomial'
t_is(totcost(gencost, array([0, 0, 0, 0]) - 4), [1, 2, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 1, 0, 0]) - 4), [1, 2.3456, 0, 0], 8, t)
t_is(totcost(gencost, array([0, 2, 0, 0]) - 4), [1, 2.8096, 0, 0], 8, t)
t = 'modcost SHIFT_X - pwl (gen)'
t_is(totcost(gencost, array([0, 0, 5, 0 ]) - 4), [1, 2, 100, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 10, 0]) - 4), [1, 2, 200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 15, 0]) - 4), [1, 2, 400, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 20, 0]) - 4), [1, 2, 600, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 25, 0]) - 4), [1, 2, 900, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 30, 0]) - 4), [1, 2, 1200, 0], 8, t)
t_is(totcost(gencost, array([0, 0, 35, 0]) - 4), [1, 2, 1500, 0], 8, t)
t = 'modcost SHIFT_X - pwl (load)'
t_is(totcost(gencost, array([0, 0, 0, -5 ]) - 4), [1, 2, 0, -500], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -10]) - 4), [1, 2, 0, -1000], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -15]) - 4), [1, 2, 0, -1400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -20]) - 4), [1, 2, 0, -1800], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -25]) - 4), [1, 2, 0, -2100], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -30]) - 4), [1, 2, 0, -2400], 8, t)
t_is(totcost(gencost, array([0, 0, 0, -35]) - 4), [1, 2, 0, -2700], 8, t)
t_end()
if __name__ == '__main__':
t_modcost(quiet=False)
|
neo/Prompt/Commands/Bootstrap.py | volekerb/neo-python | 387 | 11136673 | import sys
from neo.Settings import settings
from prompt_toolkit import prompt
import requests
from tqdm import tqdm
import tarfile
import shutil
import os
def BootstrapBlockchainFile(target_dir, download_location, bootstrap_name, require_confirm=True, delete_bootstrap_file=True):
if download_location is None:
print("no bootstrap location file specified. Please update your configuration file.")
sys.exit(0)
if require_confirm:
print("This will overwrite any data currently in %s.\nType 'confirm' to continue" % target_dir)
try:
confirm = prompt("[confirm]> ", is_password=False)
except KeyboardInterrupt:
confirm = False
if confirm == 'confirm':
return do_bootstrap(download_location, bootstrap_name, target_dir, delete_bootstrap_file=delete_bootstrap_file)
else:
return do_bootstrap(download_location,
bootstrap_name,
target_dir,
tmp_file_name=os.path.join(settings.DATA_DIR_PATH, 'btest.tar.gz'),
tmp_chain_name='btestchain',
delete_bootstrap_file=delete_bootstrap_file)
print("bootstrap cancelled")
sys.exit(0)
def do_bootstrap(download_location, bootstrap_name, destination_dir, tmp_file_name=None, tmp_chain_name='tmpchain', delete_bootstrap_file=True):
if tmp_file_name is None:
tmp_file_name = os.path.join(settings.DATA_DIR_PATH, 'bootstrap.tar.gz')
success = False
try:
source = requests.get(download_location)
source.raise_for_status()
source_json = source.json()
response = requests.get(source_json[bootstrap_name], stream=True)
response.raise_for_status()
print('will download file %s ' % source_json[bootstrap_name])
print('')
# Total size in bytes.
total_size = int(response.headers.get('content-length', 0))
chunkSize = 1024
with open(tmp_file_name, 'wb') as f:
pbar = tqdm(unit="B", total=total_size)
for chunk in response.iter_content(chunk_size=chunkSize):
if chunk: # filter out keep-alive new chunks
pbar.update(len(chunk))
f.write(chunk)
print("download complete")
if os.path.exists(destination_dir):
try:
shutil.rmtree(destination_dir)
except Exception as e:
print("couldn't remove existing dir: %s %s" % (e, destination_dir))
sys.exit(0)
print("Opening archive %s " % tmp_file_name)
# open file
tar = tarfile.open(tmp_file_name)
# get the name of the chain directory in the archive
datadir = tar.getnames()[0]
print("Extracting to %s " % tmp_chain_name)
tar.extractall(tmp_chain_name)
# construct current path in archive of Chain dir
chaindata_dir = "%s/%s" % (tmp_chain_name, datadir)
print("Moving to %s " % destination_dir)
# move chain dir in archive into LEVELDB_PATH
shutil.move(chaindata_dir, destination_dir)
print("closing archive")
tar.close()
success = True
except Exception as e:
print("Could not download: %s " % e)
finally:
print("cleaning up %s " % tmp_chain_name)
if os.path.exists(tmp_chain_name):
shutil.rmtree(tmp_chain_name)
if delete_bootstrap_file and os.path.exists(tmp_file_name):
print("removing temp bootstrap file %s " % tmp_file_name)
os.remove(tmp_file_name)
if success:
print("Successfully downloaded bootstrap chain!")
sys.exit(0)
|
knox/crypto.py | nnamdei/django-rest-knox | 788 | 11136675 | import binascii
from os import urandom as generate_bytes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from knox.settings import knox_settings
sha = knox_settings.SECURE_HASH_ALGORITHM
def create_token_string():
return binascii.hexlify(
generate_bytes(int(knox_settings.AUTH_TOKEN_CHARACTER_LENGTH / 2))
).decode()
def hash_token(token):
'''
Calculates the hash of a token.
input is unhexlified
token must contain an even number of hex digits or a binascii.Error
exception will be raised
'''
digest = hashes.Hash(sha(), backend=default_backend())
digest.update(binascii.unhexlify(token))
return binascii.hexlify(digest.finalize()).decode()
|
dump_match/geom.py | hoverinc/OANet | 209 | 11136712 | <reponame>hoverinc/OANet<filename>dump_match/geom.py
# geom.py ---
#
# Filename: geom.py
# Description:
# Author: <NAME>
# Maintainer:
# Created: Thu Oct 5 14:53:24 2017 (+0200)
# Version:
# Package-Requires: ()
# URL:
# Doc URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change Log:
#
#
#
# Copyright (C)
# Visual Computing Group @ University of Victoria
# Computer Vision Lab @ EPFL
# Code:
import numpy as np
from utils import loadh5
from transformations import quaternion_from_matrix
def parse_geom(geom):
parsed_geom = {}
parsed_geom["K"] = geom[:9].reshape((3, 3))
parsed_geom["R"] = geom[9:18].reshape((3, 3))
parsed_geom["t"] = geom[18:21].reshape((3, 1))
parsed_geom["img_size"] = geom[21:23].reshape((2,))
parsed_geom["K_inv"] = geom[23:32].reshape((3, 3))
parsed_geom["q"] = geom[32:36].reshape([4, 1])
parsed_geom["q_inv"] = geom[36:40].reshape([4, 1])
return parsed_geom
def load_geom(geom_file, scale_factor=1.0, flip_R=False):
# load geometry file
geom_dict = loadh5(geom_file)
# Check if principal point is at the center
K = geom_dict["K"]
# assert(abs(K[0, 2]) < 1e-3 and abs(K[1, 2]) < 1e-3)
# Rescale calbration according to previous resizing
S = np.asarray([[scale_factor, 0, 0],
[0, scale_factor, 0],
[0, 0, 1]])
K = np.dot(S, K)
geom_dict["K"] = K
# Transpose Rotation Matrix if needed
if flip_R:
R = geom_dict["R"].T.copy()
geom_dict["R"] = R
# append things to list
geom_list = []
geom_info_name_list = ["K", "R", "T", "imsize"]
for geom_info_name in geom_info_name_list:
geom_list += [geom_dict[geom_info_name].flatten()]
# Finally do K_inv since inverting K is tricky with theano
geom_list += [np.linalg.inv(geom_dict["K"]).flatten()]
# Get the quaternion from Rotation matrices as well
q = quaternion_from_matrix(geom_dict["R"])
geom_list += [q.flatten()]
# Also add the inverse of the quaternion
q_inv = q.copy()
np.negative(q_inv[1:], q_inv[1:])
geom_list += [q_inv.flatten()]
# Add to list
geom = np.concatenate(geom_list)
return geom
def np_skew_symmetric(v):
zero = np.zeros_like(v[:, 0])
M = np.stack([
zero, -v[:, 2], v[:, 1],
v[:, 2], zero, -v[:, 0],
-v[:, 1], v[:, 0], zero,
], axis=1)
return M
def np_unskew_symmetric(M):
v = np.concatenate([
0.5 * (M[:, 7] - M[:, 5])[None],
0.5 * (M[:, 2] - M[:, 6])[None],
0.5 * (M[:, 3] - M[:, 1])[None],
], axis=1)
return v
def get_episqr(x1, x2, dR, dt):
num_pts = len(x1)
# Make homogeneous coordinates
x1 = np.concatenate([
x1, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
x2 = np.concatenate([
x2, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
# Compute Fundamental matrix
dR = dR.reshape(1, 3, 3)
dt = dt.reshape(1, 3)
F = np.repeat(np.matmul(
np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),
dR
).reshape(-1, 3, 3), num_pts, axis=0)
x2Fx1 = np.matmul(x2.transpose(0, 2, 1), np.matmul(F, x1)).flatten()
ys = x2Fx1**2
return ys.flatten()
def get_episym(x1, x2, dR, dt):
num_pts = len(x1)
# Make homogeneous coordinates
x1 = np.concatenate([
x1, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
x2 = np.concatenate([
x2, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
# Compute Fundamental matrix
dR = dR.reshape(1, 3, 3)
dt = dt.reshape(1, 3)
F = np.repeat(np.matmul(
np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),
dR
).reshape(-1, 3, 3), num_pts, axis=0)
x2Fx1 = np.matmul(x2.transpose(0, 2, 1), np.matmul(F, x1)).flatten()
Fx1 = np.matmul(F, x1).reshape(-1, 3)
Ftx2 = np.matmul(F.transpose(0, 2, 1), x2).reshape(-1, 3)
ys = x2Fx1**2 * (
1.0 / (Fx1[..., 0]**2 + Fx1[..., 1]**2) +
1.0 / (Ftx2[..., 0]**2 + Ftx2[..., 1]**2))
return ys.flatten()
def get_sampsons(x1, x2, dR, dt):
num_pts = len(x1)
# Make homogeneous coordinates
x1 = np.concatenate([
x1, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
x2 = np.concatenate([
x2, np.ones((num_pts, 1))
], axis=-1).reshape(-1, 3, 1)
# Compute Fundamental matrix
dR = dR.reshape(1, 3, 3)
dt = dt.reshape(1, 3)
F = np.repeat(np.matmul(
np.reshape(np_skew_symmetric(dt), (-1, 3, 3)),
dR
).reshape(-1, 3, 3), num_pts, axis=0)
x2Fx1 = np.matmul(x2.transpose(0, 2, 1), np.matmul(F, x1)).flatten()
Fx1 = np.matmul(F, x1).reshape(-1, 3)
Ftx2 = np.matmul(F.transpose(0, 2, 1), x2).reshape(-1, 3)
ys = x2Fx1**2 / (
Fx1[..., 0]**2 + Fx1[..., 1]**2 + Ftx2[..., 0]**2 + Ftx2[..., 1]**2
)
return ys.flatten()
#
# geom.py ends here
|
pytorch3d/common/workaround/utils.py | jkxing/pytorch3d | 6,041 | 11136717 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
def _safe_det_3x3(t: torch.Tensor):
"""
Fast determinant calculation for a batch of 3x3 matrices.
Note, result of this function might not be the same as `torch.det()`.
The differences might be in the last significant digit.
Args:
t: Tensor of shape (N, 3, 3).
Returns:
Tensor of shape (N) with determinants.
"""
det = (
t[..., 0, 0] * (t[..., 1, 1] * t[..., 2, 2] - t[..., 1, 2] * t[..., 2, 1])
- t[..., 0, 1] * (t[..., 1, 0] * t[..., 2, 2] - t[..., 2, 0] * t[..., 1, 2])
+ t[..., 0, 2] * (t[..., 1, 0] * t[..., 2, 1] - t[..., 2, 0] * t[..., 1, 1])
)
return det
|
data/model/oci/label.py | giuseppe/quay | 2,027 | 11136750 | <filename>data/model/oci/label.py
import logging
from data.model import InvalidLabelKeyException, InvalidMediaTypeException, DataModelException
from data.database import (
Label,
Manifest,
TagManifestLabel,
MediaType,
LabelSourceType,
db_transaction,
ManifestLabel,
TagManifestLabelMap,
TagManifestToManifest,
Repository,
TagManifest,
)
from data.text import prefix_search
from util.validation import validate_label_key
from util.validation import is_json
logger = logging.getLogger(__name__)
def list_manifest_labels(manifest_id, prefix_filter=None):
"""
Lists all labels found on the given manifest, with an optional filter by key prefix.
"""
query = (
Label.select(Label, MediaType)
.join(MediaType)
.switch(Label)
.join(LabelSourceType)
.switch(Label)
.join(ManifestLabel)
.where(ManifestLabel.manifest == manifest_id)
)
if prefix_filter is not None:
query = query.where(prefix_search(Label.key, prefix_filter))
return query
def get_manifest_label(label_uuid, manifest):
"""
Retrieves the manifest label on the manifest with the given UUID or None if none.
"""
try:
return (
Label.select(Label, LabelSourceType)
.join(LabelSourceType)
.where(Label.uuid == label_uuid)
.switch(Label)
.join(ManifestLabel)
.where(ManifestLabel.manifest == manifest)
.get()
)
except Label.DoesNotExist:
return None
def create_manifest_label(manifest_id, key, value, source_type_name, media_type_name=None):
"""
Creates a new manifest label on a specific tag manifest.
"""
if not key:
raise InvalidLabelKeyException("Missing key on label")
# Note that we don't prevent invalid label names coming from the manifest to be stored, as Docker
# does not currently prevent them from being put into said manifests.
if source_type_name != "manifest" and not validate_label_key(key):
raise InvalidLabelKeyException("Key `%s` is invalid or reserved" % key)
# Find the matching media type. If none specified, we infer.
if media_type_name is None:
media_type_name = "text/plain"
if is_json(value):
media_type_name = "application/json"
try:
media_type_id = Label.media_type.get_id(media_type_name)
except MediaType.DoesNotExist:
raise InvalidMediaTypeException()
source_type_id = Label.source_type.get_id(source_type_name)
# Ensure the manifest exists.
try:
manifest = (
Manifest.select(Manifest, Repository)
.join(Repository)
.where(Manifest.id == manifest_id)
.get()
)
except Manifest.DoesNotExist:
return None
repository = manifest.repository
with db_transaction():
label = Label.create(
key=key, value=value, source_type=source_type_id, media_type=media_type_id
)
manifest_label = ManifestLabel.create(
manifest=manifest_id, label=label, repository=repository
)
return label
def delete_manifest_label(label_uuid, manifest):
"""
Deletes the manifest label on the tag manifest with the given ID.
Returns the label deleted or None if none.
"""
# Find the label itself.
label = get_manifest_label(label_uuid, manifest)
if label is None:
return None
if not label.source_type.mutable:
raise DataModelException("Cannot delete immutable label")
# Delete the mapping records and label.
# TODO: Remove this code once the TagManifest table is gone.
with db_transaction():
(TagManifestLabelMap.delete().where(TagManifestLabelMap.label == label).execute())
deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()
if deleted_count != 1:
logger.warning("More than a single label deleted for matching label %s", label_uuid)
deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()
if deleted_count != 1:
logger.warning("More than a single label deleted for matching label %s", label_uuid)
label.delete_instance(recursive=False)
return label
|
ipagnn/lib/figure_utils.py | deepneuralmachine/google-research | 23,901 | 11136755 | <gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with matplotlib figures."""
import io
import imageio
import jax.numpy as jnp
import matplotlib.pyplot as plt
def figure_to_image(figure, dpi=None, close=True):
"""Converts the matplotlib plot specified by 'figure' to a numpy image."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
figure.savefig(buf, format='png', dpi=dpi, bbox_inches='tight')
buf.seek(0)
# Convert PNG buffer to numpy array
image = imageio.imread(buf)
buf.close()
if close:
plt.close(figure)
return image
def make_figure(*, data, title, xlabel, ylabel):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
plt.imshow(data)
ax.set_aspect('equal')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.colorbar(orientation='vertical')
return fig
def per_layer_figure(*, state, key_format, items, title, xlabel, ylabel,
show_values=False):
"""Generates a figure with a subplot per layer with consistent scales."""
num_items = len(items)
fig, axes = plt.subplots(
nrows=1, ncols=num_items, figsize=(num_items * 3, 3))
fig.suptitle(title)
def get_value(index, item):
if key_format:
key = key_format.format(item)
all_values = state[key]
value = all_values[0]
else:
value = state[index]
return value
vmin = jnp.inf
vmax = -jnp.inf
for index, item in enumerate(items):
value = get_value(index, item)
value = jnp.where(jnp.isfinite(value), value, jnp.nan)
vmin = jnp.minimum(vmin, jnp.nanmin(value))
vmax = jnp.maximum(vmax, jnp.nanmax(value))
for index, item in enumerate(items):
if num_items == 1:
ax = axes
else:
ax = axes[index]
ax.set_title(f'Time = {index}')
ax.set_xlabel(xlabel)
if index == 0:
ax.set_ylabel(ylabel)
value = get_value(index, item)
im = ax.imshow(value, vmin=vmin, vmax=vmax)
if show_values and len(value) < 25:
# Add text overlays indicating the numerical value.
for node_index, row in enumerate(value):
for timestep, v in enumerate(row):
ax.text(timestep, node_index, str(v),
horizontalalignment='center', verticalalignment='center',
color='black')
ax.set_aspect('equal')
cbar_width = 0.05 # Fraction of a plot
cbar_padding = 0.05
half_padded_cbar_width = cbar_width + cbar_padding
padded_cbar_width = cbar_width + 2 * cbar_padding
fig.subplots_adjust(
right=1 - padded_cbar_width/(num_items+padded_cbar_width))
cbar_ax = fig.add_axes(
[1 - half_padded_cbar_width/(num_items+padded_cbar_width), # left
0.15, # bottom
cbar_width/(num_items+padded_cbar_width), # width
0.7, # top
]
)
fig.colorbar(im, cax=cbar_ax)
return fig
|
torchmetrics/functional/text/cer.py | lucadiliello/metrics | 769 | 11136756 | <filename>torchmetrics/functional/text/cer.py
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.text.helper import _edit_distance
def _cer_update(
predictions: Union[str, List[str]],
references: Union[str, List[str]],
) -> Tuple[Tensor, Tensor]:
"""Update the cer score with the current set of references and predictions.
Args:
predictions: Transcription(s) to score as a string or list of strings
references: Reference(s) for each speech input as a string or list of strings
Returns:
Number of edit operations to get from the reference to the prediction, summed over all samples
Number of character overall references
"""
if isinstance(predictions, str):
predictions = [predictions]
if isinstance(references, str):
references = [references]
errors = tensor(0, dtype=torch.float)
total = tensor(0, dtype=torch.float)
for prediction, reference in zip(predictions, references):
prediction_tokens = prediction
reference_tokens = reference
errors += _edit_distance(list(prediction_tokens), list(reference_tokens))
total += len(reference_tokens)
return errors, total
def _cer_compute(errors: Tensor, total: Tensor) -> Tensor:
"""Compute the Character error rate.
Args:
errors: Number of edit operations to get from the reference to the prediction, summed over all samples
total: Number of characters over all references
Returns:
Character error rate score
"""
return errors / total
def char_error_rate(
predictions: Union[str, List[str]],
references: Union[str, List[str]],
) -> Tensor:
"""character error rate is a common metric of the performance of an automatic speech recognition system. This
value indicates the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
Args:
predictions: Transcription(s) to score as a string or list of strings
references: Reference(s) for each speech input as a string or list of strings
Returns:
Character error rate score
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> char_error_rate(predictions=predictions, references=references)
tensor(0.3415)
"""
errors, total = _cer_update(predictions, references)
return _cer_compute(errors, total)
|
tests/storage/test_redis_sentinel.py | mymedia2/limits | 140 | 11136768 | <reponame>mymedia2/limits<gh_stars>100-1000
import unittest
import mock
import redis.sentinel
from limits.storage import RedisSentinelStorage, storage_from_string
from tests.storage.test_redis import SharedRedisTests
class RedisSentinelStorageTests(SharedRedisTests, unittest.TestCase):
def setUp(self):
self.storage_url = 'redis+sentinel://localhost:26379'
self.service_name = 'localhost-redis-sentinel'
self.storage = RedisSentinelStorage(
self.storage_url,
service_name=self.service_name
)
redis.sentinel.Sentinel([
("localhost", 26379)
]).master_for(self.service_name).flushall()
def test_init_options(self):
with mock.patch(
"limits.storage.redis_sentinel.get_dependency"
) as get_dependency:
storage_from_string(
self.storage_url + '/' + self.service_name,
connection_timeout=1
)
self.assertEqual(
get_dependency().Sentinel.call_args[1]['connection_timeout'], 1
) |
tools/linux_symbolize.py | fengjixuchui/bochspwn | 138 | 11136769 | #!/usr/bin/python
#
# Authors: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import popen2
import re
import sys
if len(sys.argv) < 3:
print "usage: linux_symbolize.py <memlog.txt> <dbgsymbols> <kerneladdr>"
sys.exit(1)
# KLine looks like this
# #0 0xffffffff813534bf (kernel+003534bf)
kerneladdr = 0xffffffff81000000
what_we_need = {}
f = open(sys.argv[1], "r")
for ln in f:
m = re.match(r" #[0-9].*\(([^+]+)\+([0-9a-fA-F]+)\)", ln)
if not m:
continue
what_we_need[(m.group(1), int(m.group(2), 16))] = 1
f.close()
# Send query.
(stdout, stdin) = popen2.popen2("addr2line -f -e %s" % sys.argv[2])
for k in what_we_need:
stdin.write("%x\n" % (k[1] + kerneladdr))
stdin.close()
# Get answer.
for k in what_we_need:
what_we_need[k] = "%24s %s" % (
stdout.readline().strip(),
stdout.readline().strip()
)
f = open(sys.argv[1], "r")
for ln in f:
m = re.match(r" #[0-9].*\(([^+]+)\+([0-9a-fA-F]+)\)", ln)
if not m:
sys.stdout.write(ln)
continue
k = (m.group(1), int(m.group(2), 16))
print "%s %s" % (ln.rstrip(), what_we_need[k])
f.close()
|
windows_packages_gpu/torch/distributions/categorical.py | codeproject/DeepStack | 353 | 11136781 | import torch
from torch._six import nan
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import probs_to_logits, logits_to_probs, lazy_property
class Categorical(Distribution):
r"""
Creates a categorical distribution parameterized by either :attr:`probs` or
:attr:`logits` (but not both).
.. note::
It is equivalent to the distribution that :func:`torch.multinomial`
samples from.
Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is ``probs.size(-1)``.
If :attr:`probs` is 1D with length-`K`, each element is the relative
probability of sampling the class at that index.
If :attr:`probs` is 2D, it is treated as a batch of relative probability
vectors.
.. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum,
and it will be normalized to sum to 1.
See also: :func:`torch.multinomial`
Example::
>>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
tensor(3)
Args:
probs (Tensor): event probabilities
logits (Tensor): event log-odds
"""
arg_constraints = {'probs': constraints.simplex,
'logits': constraints.real}
has_enumerate_support = True
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
if probs.dim() < 1:
raise ValueError("`probs` parameter must be at least one-dimensional.")
self.probs = probs / probs.sum(-1, keepdim=True)
else:
if logits.dim() < 1:
raise ValueError("`logits` parameter must be at least one-dimensional.")
self.logits = logits - logits.logsumexp(dim=-1, keepdim=True)
self._param = self.probs if probs is not None else self.logits
self._num_events = self._param.size()[-1]
batch_shape = self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size()
super(Categorical, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Categorical, _instance)
batch_shape = torch.Size(batch_shape)
param_shape = batch_shape + torch.Size((self._num_events,))
if 'probs' in self.__dict__:
new.probs = self.probs.expand(param_shape)
new._param = new.probs
if 'logits' in self.__dict__:
new.logits = self.logits.expand(param_shape)
new._param = new.logits
new._num_events = self._num_events
super(Categorical, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@constraints.dependent_property
def support(self):
return constraints.integer_interval(0, self._num_events - 1)
@lazy_property
def logits(self):
return probs_to_logits(self.probs)
@lazy_property
def probs(self):
return logits_to_probs(self.logits)
@property
def param_shape(self):
return self._param.size()
@property
def mean(self):
return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device)
@property
def variance(self):
return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device)
def sample(self, sample_shape=torch.Size()):
if not isinstance(sample_shape, torch.Size):
sample_shape = torch.Size(sample_shape)
probs_2d = self.probs.reshape(-1, self._num_events)
samples_2d = torch.multinomial(probs_2d, sample_shape.numel(), True).T
return samples_2d.reshape(self._extended_shape(sample_shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value = value.long().unsqueeze(-1)
value, log_pmf = torch.broadcast_tensors(value, self.logits)
value = value[..., :1]
return log_pmf.gather(-1, value).squeeze(-1)
def entropy(self):
p_log_p = self.logits * self.probs
return -p_log_p.sum(-1)
def enumerate_support(self, expand=True):
num_events = self._num_events
values = torch.arange(num_events, dtype=torch.long, device=self._param.device)
values = values.view((-1,) + (1,) * len(self._batch_shape))
if expand:
values = values.expand((-1,) + self._batch_shape)
return values
|
tests/verena/test_verena.py | kiranmusze/deutschland | 445 | 11136855 | <gh_stars>100-1000
from deutschland.verena.verena import Verena
def test_verena():
v = Verena()
res = v.get()
assert (
len(res) > 0
), "Scraping and extracting all pages of the VERENA portal eturned 0 results. It very likely shouldn't."
|
pandapower/plotting/patch_makers.py | junmuz/pandapower | 104 | 11136881 | from matplotlib.patches import RegularPolygon, Arc, Circle, Rectangle, Ellipse
import numpy as np
from pandapower.plotting.plotting_toolbox import _rotate_dim2, get_color_list, get_angle_list, \
get_linewidth_list
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def node_patches(node_coords, size, patch_type, colors=None, **kwargs):
"""
Creates node patches from coordinates translating the patch type into patches.
:param node_coords: coordinates of the nodes to draw
:type node_coords: iterable
:param size: size of the patch (can be interpreted differently, depending on the patch type)
:type size: float
:param patch_type: type of patches to create - can be one of
- "circle" or "ellipse" for an ellipse (cirlces are just ellipses with the same width \
+ height)\
- "rect" or "rectangle" for a rectangle\
- "poly<n>" for a polygon with n edges
:type patch_type: str
:param colors: colors or color of the patches
:type colors: iterable, float
:param kwargs: additional keyword arguments to pass to the patch initialization \
(might contain "width", "height", "angle" depending on the patch type)
:type kwargs: dict
:return: patches - list of rectangle patches for the nodes
"""
if patch_type.lower() == 'ellipse' or patch_type.lower() == 'circle':
# circles are just ellipses
if patch_type.lower() == "circle" and len(set(kwargs.keys()) & {"width", "height"}) == 1:
wh = kwargs["width"] if "width" in kwargs else kwargs["height"]
width = wh
height = wh
else:
width = kwargs.pop("width", 2 * size)
height = kwargs.pop("height", 2 * size)
angle = kwargs.pop('angle', 0)
return ellipse_patches(node_coords, width, height, angle, color=colors, **kwargs)
elif patch_type.lower() == "rect" or patch_type.lower() == "rectangle":
width = kwargs.pop("width", 2 * size)
height = kwargs.pop("height", 2 * size)
return rectangle_patches(node_coords, width, height, color=colors, **kwargs)
elif patch_type.lower().startswith("poly"):
edges = int(patch_type[4:])
return polygon_patches(node_coords, size, edges, color=colors, **kwargs)
else:
logger.error("Wrong patchtype. Please choose a correct patch type.")
raise ValueError("Wrong patchtype")
def ellipse_patches(node_coords, width, height, angle=0, color=None, **kwargs):
"""
Function to create a list of ellipse patches from node coordinates.
:param node_coords: coordinates of the nodes to draw
:type node_coords: iterable
:param width: width of the ellipse (described by an exterior rectangle)
:type width: float
:param height: height of the ellipse (described by an exterior rectangle)
:type height: float
:param angle: angle by which to rotate the ellipse
:type angle: float
:param color: color or colors of the patches
:type color: iterable, float
:param kwargs: additional keyword arguments to pass to the Ellipse initialization
:type kwargs: dict
:return: patches - list of ellipse patches for the nodes
"""
patches = list()
angles = get_angle_list(angle, len(node_coords))
if color is not None:
colors = get_color_list(color, len(node_coords))
for (x, y), col, ang in zip(node_coords, colors, angles):
patches.append(Ellipse((x, y), width, height, angle=ang, color=col, **kwargs))
else:
for (x, y), ang in zip(node_coords, angles):
patches.append(Ellipse((x, y), width, height, angle=ang, **kwargs))
return patches
def rectangle_patches(node_coords, width, height, color=None, **kwargs):
"""
Function to create a list of rectangle patches from node coordinates.
:param node_coords: coordinates of the nodes to draw
:type node_coords: iterable
:param width: width of the rectangle
:type width: float
:param height: height of the rectangle
:type height: float
:param color: color or colors of the patches
:type color: iterable, float
:param kwargs: additional keyword arguments to pass to the Rectangle initialization
:type kwargs: dict
:return: patches - list of rectangle patches for the nodes
"""
patches = list()
if color is not None:
colors = get_color_list(color, len(node_coords))
for (x, y), col in zip(node_coords, colors):
patches.append(Rectangle((x - width / 2, y - height / 2), width, height, color=color,
**kwargs))
else:
for x, y in node_coords:
patches.append(Rectangle((x - width / 2, y - height / 2), width, height, **kwargs))
return patches
def polygon_patches(node_coords, radius, num_edges, color=None, **kwargs):
"""
Function to create a list of polygon patches from node coordinates. The number of edges for the
polygon can be defined.
:param node_coords: coordinates of the nodes to draw
:type node_coords: iterable
:param radius: radius for the polygon (from centroid to edges)
:type radius: float
:param num_edges: number of edges of the polygon
:type num_edges: int
:param color: color or colors of the patches
:type color: iterable, float
:param kwargs: additional keyword arguments to pass to the Polygon initialization
:type kwargs: dict
:return: patches - list of rectangle patches for the nodes
"""
patches = list()
if color is not None:
colors = get_color_list(color, len(node_coords))
for (x, y), col in zip(node_coords, colors):
patches.append(RegularPolygon([x, y], numVertices=num_edges, radius=radius, color=color,
**kwargs))
else:
for x, y in node_coords:
patches.append(RegularPolygon([x, y], numVertices=num_edges, radius=radius, **kwargs))
return patches
def load_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for loads.
:param node_coords: coordinates of the nodes that the loads belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to load patches\
- polys (list of RegularPolygon) - list containing the load patches\
- keywords (set) - set of keywords removed from kwargs
"""
offset = kwargs.get("offset", 1.2 * size)
all_angles = get_angle_list(angles, len(node_coords))
edgecolor = kwargs.get("patch_edgecolor", "w")
facecolor = kwargs.get("patch_facecolor", "w")
edgecolors = get_color_list(edgecolor, len(node_coords))
facecolors = get_color_list(facecolor, len(node_coords))
polys, lines = list(), list()
for i, node_geo in enumerate(node_coords):
p2 = node_geo + _rotate_dim2(np.array([0, offset + size]), all_angles[i])
p3 = node_geo + _rotate_dim2(np.array([0, offset + size / 2]), all_angles[i])
polys.append(RegularPolygon(p2, numVertices=3, radius=size, orientation=-all_angles[i],
fc=facecolors[i], ec=edgecolors[i]))
lines.append((node_geo, p3))
return lines, polys, {"offset", "patch_edgecolor", "patch_facecolor"}
def gen_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for generators.
:param node_coords: coordinates of the nodes that the generators belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to generator patches\
- polys (list of RegularPolygon) - list containing the generator patches\
- keywords (set) - set of keywords removed from kwargs
"""
polys, lines = list(), list()
offset = kwargs.get("offset", 2. * size)
all_angles = get_angle_list(angles, len(node_coords))
edgecolor = kwargs.get("patch_edgecolor", "k")
facecolor = kwargs.get("patch_facecolor", (1, 0, 0, 0))
edgecolors = get_color_list(edgecolor, len(node_coords))
facecolors = get_color_list(facecolor, len(node_coords))
for i, node_geo in enumerate(node_coords):
p2 = node_geo + _rotate_dim2(np.array([0, size + offset]), all_angles[i])
polys.append(Circle(p2, size, fc=facecolors[i], ec=edgecolors[i]))
polys.append(
Arc(p2 + np.array([-size / 6.2, -size / 2.6]), size / 2, size, theta1=65, theta2=120,
ec=edgecolors[i]))
polys.append(
Arc(p2 + np.array([size / 6.2, size / 2.6]), size / 2, size, theta1=245, theta2=300,
ec=edgecolors[i]))
lines.append((node_geo, p2 + _rotate_dim2(np.array([0, size]), -all_angles[i])))
return lines, polys, {"offset", "patch_edgecolor", "patch_facecolor"}
def sgen_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for static generators.
:param node_coords: coordinates of the nodes that the static generators belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset", "r_triangle",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to static generator patches\
- polys (list of RegularPolygon) - list containing the static generator patches\
- keywords (set) - set of keywords removed from kwargs
"""
polys, lines = list(), list()
offset = kwargs.get("offset", 2 * size)
r_triangle = kwargs.get("r_triangles", size * 0.4)
edgecolor = kwargs.get("patch_edgecolor", "w")
facecolor = kwargs.get("patch_facecolor", "w")
edgecolors = get_color_list(edgecolor, len(node_coords))
facecolors = get_color_list(facecolor, len(node_coords))
for i, node_geo in enumerate(node_coords):
mid_circ = node_geo + _rotate_dim2(np.array([0, offset + size]), angles[i])
circ_edge = node_geo + _rotate_dim2(np.array([0, offset]), angles[i])
mid_tri1 = mid_circ + _rotate_dim2(np.array([r_triangle, -r_triangle / 4]), angles[i])
mid_tri2 = mid_circ + _rotate_dim2(np.array([-r_triangle, r_triangle / 4]), angles[i])
# dropped perpendicular foot of triangle1
perp_foot1 = mid_tri1 + _rotate_dim2(np.array([0, -r_triangle / 2]), angles[i])
line_end1 = perp_foot1 + + _rotate_dim2(np.array([-2.5 * r_triangle, 0]), angles[i])
perp_foot2 = mid_tri2 + _rotate_dim2(np.array([0, r_triangle / 2]), angles[i])
line_end2 = perp_foot2 + + _rotate_dim2(np.array([2.5 * r_triangle, 0]), angles[i])
polys.append(Circle(mid_circ, size, fc=facecolors[i], ec=edgecolors[i]))
polys.append(RegularPolygon(mid_tri1, numVertices=3, radius=r_triangle,
orientation=-angles[i], fc=facecolors[i], ec=edgecolors[i]))
polys.append(RegularPolygon(mid_tri2, numVertices=3, radius=r_triangle,
orientation=np.pi - angles[i], fc=facecolors[i],
ec=edgecolors[i]))
lines.append((node_geo, circ_edge))
lines.append((perp_foot1, line_end1))
lines.append((perp_foot2, line_end2))
return lines, polys, {"offset", "r_triangle", "patch_edgecolor", "patch_facecolor"}
def storage_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for storage systems.
:param node_coords: coordinates of the nodes that the storage system belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset", "r_triangle",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to storage patches\
- polys (list of RegularPolygon) - list containing the storage patches\
- keywords (set) - set of keywords removed from kwargs
"""
polys, lines = list(), list()
offset = kwargs.get("offset", 1 * size)
r_triangle = kwargs.get("r_triangles", size * 0.4)
for i, node_geo in enumerate(node_coords):
mid_circ = node_geo + _rotate_dim2(np.array([0, offset + r_triangle * 2.]), angles[i])
circ_edge = node_geo + _rotate_dim2(np.array([0, offset]), angles[i])
mid_tri1 = mid_circ + _rotate_dim2(np.array([-r_triangle, -r_triangle]), angles[i])
# dropped perpendicular foot of triangle1
perp_foot1 = mid_tri1 + _rotate_dim2(np.array([r_triangle * 0.5, -r_triangle/4]), angles[i])
line_end1 = perp_foot1 + _rotate_dim2(np.array([1 * r_triangle, 0]), angles[i])
perp_foot2 = mid_tri1 + _rotate_dim2(np.array([0, -r_triangle]), angles[i])
line_end2 = perp_foot2 + _rotate_dim2(np.array([2. * r_triangle, 0]), angles[i])
lines.append((node_geo, circ_edge))
lines.append((perp_foot1, line_end1))
lines.append((perp_foot2, line_end2))
return lines, polys, {"offset", "r_triangle", "patch_edgecolor", "patch_facecolor"}
def ext_grid_patches(node_coords, size, angles, **kwargs):
"""
Creation function of patches for external grids.
:param node_coords: coordinates of the nodes that the external grids belong to.
:type node_coords: iterable
:param size: size of the patch
:type size: float
:param angles: angles by which to rotate the patches (in radians)
:type angles: iterable(float), float
:param kwargs: additional keyword arguments (might contain parameters "offset",\
"patch_edgecolor" and "patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines leading to external grid patches\
- polys (list of RegularPolygon) - list containing the external grid patches\
- keywords (set) - set of keywords removed from kwargs (empty
"""
offset = kwargs.get("offset", 2 * size)
all_angles = get_angle_list(angles, len(node_coords))
edgecolor = kwargs.get("patch_edgecolor", "w")
facecolor = kwargs.get("patch_facecolor", "w")
edgecolors = get_color_list(edgecolor, len(node_coords))
facecolors = get_color_list(facecolor, len(node_coords))
polys, lines = list(), list()
for i, node_geo in enumerate(node_coords):
p2 = node_geo + _rotate_dim2(np.array([0, offset]), all_angles[i])
p_ll = p2 + _rotate_dim2(np.array([-size, 0]), all_angles[i])
polys.append(Rectangle(p_ll, 2 * size, 2 * size, angle=(-all_angles[i] / np.pi * 180),
fc=facecolors[i], ec=edgecolors[i], hatch="XXX"))
lines.append((node_geo, p2))
return lines, polys, {"offset", "patch_edgecolor", "patch_facecolor"}
def trafo_patches(coords, size, **kwargs):
"""
Creates a list of patches and line coordinates representing transformers each connecting two
nodes.
:param coords: list of connecting node coordinates (usually should be \
`[((x11, y11), (x12, y12)), ((x21, y21), (x22, y22)), ...]`)
:type coords: (N, (2, 2)) shaped iterable
:param size: size of the trafo patches
:type size: float
:param kwargs: additional keyword arguments (might contain parameters "patch_edgecolor" and\
"patch_facecolor")
:type kwargs:
:return: Return values are: \
- lines (list) - list of coordinates for lines connecting nodes and transformer patches\
- circles (list of Circle) - list containing the transformer patches (rings)
"""
edgecolor = kwargs.get("patch_edgecolor", "w")
facecolor = kwargs.get("patch_facecolor", (1, 0, 0, 0))
edgecolors = get_color_list(edgecolor, len(coords))
facecolors = get_color_list(facecolor, len(coords))
linewidths = kwargs.get("linewidths", 2.)
linewidths = get_linewidth_list(linewidths, len(coords), name_entries="trafos")
circles, lines = list(), list()
for i, (p1, p2) in enumerate(coords):
p1 = np.array(p1)
p2 = np.array(p2)
if np.all(p1 == p2):
continue
d = np.sqrt(np.sum((p1 - p2) ** 2))
if size is None:
size_this = np.sqrt(d) / 5
else:
size_this = size
off = size_this * 0.35
circ1 = (0.5 - off / d) * (p1 - p2) + p2
circ2 = (0.5 + off / d) * (p1 - p2) + p2
circles.append(Circle(circ1, size_this, fc=facecolors[i], ec=edgecolors[i],
lw=linewidths[i]))
circles.append(Circle(circ2, size_this, fc=facecolors[i], ec=edgecolors[i],
lw=linewidths[i]))
lp1 = (0.5 - off / d - size_this / d) * (p2 - p1) + p1
lp2 = (0.5 - off / d - size_this / d) * (p1 - p2) + p2
lines.append([p1, lp1])
lines.append([p2, lp2])
return lines, circles, {"patch_edgecolor", "patch_facecolor"}
|
examples/speaker_tasks/recognition/speaker_reco_infer.py | ahmetgunduz/NeMo | 4,145 | 11136899 | <reponame>ahmetgunduz/NeMo
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script serves two goals:
(1) Demonstrate how to use NeMo Models outside of PytorchLightning
(2) Shows example of batch speaker recognition inference
Usage:
python speaker_reco_infer.py --spkr_model='/path/to/.nemo/file' --train_manifest=/path/to/train/manifest/file'
--test_manifest=/path/to/train/manifest/file' --batch_size=32
train_manifest file is used to map the labels from which model was trained so it is mandatory to
pass the train manifest file
for finetuning tips see: https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Recognition_Verification.ipynb
"""
import json
import os
import sys
from argparse import ArgumentParser
import numpy as np
import torch
from tqdm import tqdm
from nemo.collections.asr.models import EncDecSpeakerLabelModel
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
can_gpu = torch.cuda.is_available()
def main():
parser = ArgumentParser()
parser.add_argument(
"--spkr_model", type=str, default="titanet_large", required=True, help="Pass your trained .nemo model",
)
parser.add_argument(
"--train_manifest", type=str, required=True, help="path to train manifest file to match labels"
)
parser.add_argument(
"--test_manifest", type=str, required=True, help="path to test manifest file to perform inference"
)
parser.add_argument("--batch_size", type=int, default=32)
args = parser.parse_args()
torch.set_grad_enabled(False)
if args.spkr_model.endswith('.nemo'):
logging.info(f"Using local speaker model from {args.spkr_model}")
speaker_model = EncDecSpeakerLabelModel.restore_from(restore_path=args.spkr_model)
else:
logging.error(f"Please pass a trained .nemo file")
sys.exit()
labels = []
with open(args.train_manifest, 'rb') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
item = json.loads(line)
labels.append(item['label'])
labels_map = sorted(set(labels))
label2id, id2label = {}, {}
for label_id, label in enumerate(labels_map):
label2id[label] = label_id
id2label[label_id] = label
speaker_model.setup_test_data(
test_data_layer_params={
'sample_rate': 16000,
'manifest_filepath': args.test_manifest,
'labels': labels_map,
'batch_size': args.batch_size,
'trim_silence': False,
'shuffle': False,
}
)
if can_gpu:
speaker_model = speaker_model.cuda()
speaker_model.eval()
speaker_model.test_dataloader()
all_labels = []
all_logits = []
for test_batch in tqdm(speaker_model.test_dataloader()):
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
with autocast():
audio_signal, audio_signal_len, labels, _ = test_batch
logits, _ = speaker_model.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
all_logits.extend(logits.cpu().numpy())
all_labels.extend(labels.cpu().numpy())
all_logits, true_labels = np.asarray(all_logits), np.asarray(all_labels)
infer_labels = all_logits.argmax(axis=1)
out_manifest = os.path.basename(args.test_manifest).split('.')[0] + '_infer.json'
out_manifest = os.path.join(os.path.dirname(args.test_manifest), out_manifest)
with open(args.test_manifest, 'rb') as f1, open(out_manifest, 'w') as f2:
lines = f1.readlines()
for idx, line in enumerate(lines):
line = line.strip()
item = json.loads(line)
item['infer'] = id2label[infer_labels[idx]]
json.dump(item, f2)
f2.write('\n')
logging.info("Inference labels have been written to {} manifest file".format(out_manifest))
if __name__ == '__main__':
main()
|
sympy/plotting/pygletplot/plot_mode.py | shipci/sympy | 319 | 11136909 | from __future__ import print_function, division
from sympy import Symbol, sympify
from plot_interval import PlotInterval
from plot_object import PlotObject
from util import parse_option_string
from sympy.geometry.entity import GeometryEntity
from sympy.core.compatibility import is_sequence
class PlotMode(PlotObject):
"""
Grandparent class for plotting
modes. Serves as interface for
registration, lookup, and init
of modes.
To create a new plot mode,
inherit from PlotModeBase
or one of its children, such
as PlotSurface or PlotCurve.
"""
## Class-level attributes
## used to register and lookup
## plot modes. See PlotModeBase
## for descriptions and usage.
i_vars, d_vars = '', ''
intervals = []
aliases = []
is_default = False
## Draw is the only method here which
## is meant to be overridden in child
## classes, and PlotModeBase provides
## a base implementation.
def draw(self):
raise NotImplementedError()
## Everything else in this file has to
## do with registration and retrieval
## of plot modes. This is where I've
## hidden much of the ugliness of automatic
## plot mode divination...
## Plot mode registry data structures
_mode_alias_list = []
_mode_map = {
1: {1: {}, 2: {}},
2: {1: {}, 2: {}},
3: {1: {}, 2: {}},
} # [d][i][alias_str]: class
_mode_default_map = {
1: {},
2: {},
3: {},
} # [d][i]: class
_i_var_max, _d_var_max = 2, 3
def __new__(cls, *args, **kwargs):
"""
This is the function which interprets
arguments given to Plot.__init__ and
Plot.__setattr__. Returns an initialized
instance of the appropriate child class.
"""
newargs, newkwargs = PlotMode._extract_options(args, kwargs)
mode_arg = newkwargs.get('mode', '')
# Interpret the arguments
d_vars, intervals = PlotMode._interpret_args(newargs)
i_vars = PlotMode._find_i_vars(d_vars, intervals)
i, d = max([len(i_vars), len(intervals)]), len(d_vars)
# Find the appropriate mode
subcls = PlotMode._get_mode(mode_arg, i, d)
# Create the object
o = object.__new__(subcls)
# Do some setup for the mode instance
o.d_vars = d_vars
o._fill_i_vars(i_vars)
o._fill_intervals(intervals)
o.options = newkwargs
return o
@staticmethod
def _get_mode(mode_arg, i_var_count, d_var_count):
"""
Tries to return an appropriate mode class.
Intended to be called only by __new__.
mode_arg
Can be a string or a class. If it is a
PlotMode subclass, it is simply returned.
If it is a string, it can an alias for
a mode or an empty string. In the latter
case, we try to find a default mode for
the i_var_count and d_var_count.
i_var_count
The number of independent variables
needed to evaluate the d_vars.
d_var_count
The number of dependent variables;
usually the number of functions to
be evaluated in plotting.
For example, a Cartesian function y = f(x) has
one i_var (x) and one d_var (y). A parametric
form x,y,z = f(u,v), f(u,v), f(u,v) has two
two i_vars (u,v) and three d_vars (x,y,z).
"""
# if the mode_arg is simply a PlotMode class,
# check that the mode supports the numbers
# of independent and dependent vars, then
# return it
try:
m = None
if issubclass(mode_arg, PlotMode):
m = mode_arg
except TypeError:
pass
if m:
if not m._was_initialized:
raise ValueError(("To use unregistered plot mode %s "
"you must first call %s._init_mode().")
% (m.__name__, m.__name__))
if d_var_count != m.d_var_count:
raise ValueError(("%s can only plot functions "
"with %i dependent variables.")
% (m.__name__,
m.d_var_count))
if i_var_count > m.i_var_count:
raise ValueError(("%s cannot plot functions "
"with more than %i independent "
"variables.")
% (m.__name__,
m.i_var_count))
return m
# If it is a string, there are two possibilities.
if isinstance(mode_arg, str):
i, d = i_var_count, d_var_count
if i > PlotMode._i_var_max:
raise ValueError(var_count_error(True, True))
if d > PlotMode._d_var_max:
raise ValueError(var_count_error(False, True))
# If the string is '', try to find a suitable
# default mode
if not mode_arg:
return PlotMode._get_default_mode(i, d)
# Otherwise, interpret the string as a mode
# alias (e.g. 'cartesian', 'parametric', etc)
else:
return PlotMode._get_aliased_mode(mode_arg, i, d)
else:
raise ValueError("PlotMode argument must be "
"a class or a string")
@staticmethod
def _get_default_mode(i, d, i_vars=-1):
if i_vars == -1:
i_vars = i
try:
return PlotMode._mode_default_map[d][i]
except TypeError:
# Keep looking for modes in higher i var counts
# which support the given d var count until we
# reach the max i_var count.
if i < PlotMode._i_var_max:
return PlotMode._get_default_mode(i + 1, d, i_vars)
else:
raise ValueError(("Couldn't find a default mode "
"for %i independent and %i "
"dependent variables.") % (i_vars, d))
@staticmethod
def _get_aliased_mode(alias, i, d, i_vars=-1):
if i_vars == -1:
i_vars = i
if alias not in PlotMode._mode_alias_list:
raise ValueError(("Couldn't find a mode called"
" %s. Known modes: %s.")
% (alias, ", ".join(PlotMode._mode_alias_list)))
try:
return PlotMode._mode_map[d][i][alias]
except TypeError:
# Keep looking for modes in higher i var counts
# which support the given d var count and alias
# until we reach the max i_var count.
if i < PlotMode._i_var_max:
return PlotMode._get_aliased_mode(alias, i + 1, d, i_vars)
else:
raise ValueError(("Couldn't find a %s mode "
"for %i independent and %i "
"dependent variables.")
% (alias, i_vars, d))
@classmethod
def _register(cls):
"""
Called once for each user-usable plot mode.
For Cartesian2D, it is invoked after the
class definition: Cartesian2D._register()
"""
name = cls.__name__
#try:
cls._init_mode()
#except Exception as e:
# raise RuntimeError( ("Failed to initialize "
# "plot mode %s. Reason: %s")
# % (name, (str(e))) )
try:
i, d = cls.i_var_count, cls.d_var_count
# Add the mode to _mode_map under all
# given aliases
for a in cls.aliases:
if a not in PlotMode._mode_alias_list:
# Also track valid aliases, so
# we can quickly know when given
# an invalid one in _get_mode.
PlotMode._mode_alias_list.append(a)
PlotMode._mode_map[d][i][a] = cls
if cls.is_default:
# If this mode was marked as the
# default for this d,i combination,
# also set that.
PlotMode._mode_default_map[d][i] = cls
except Exception as e:
raise RuntimeError(("Failed to register "
"plot mode %s. Reason: %s")
% (name, (str(e))))
@classmethod
def _init_mode(cls):
"""
Initializes the plot mode based on
the 'mode-specific parameters' above.
Only intended to be called by
PlotMode._register(). To use a mode without
registering it, you can directly call
ModeSubclass._init_mode().
"""
def symbols_list(symbol_str):
return [Symbol(s) for s in symbol_str]
# Convert the vars strs into
# lists of symbols.
cls.i_vars = symbols_list(cls.i_vars)
cls.d_vars = symbols_list(cls.d_vars)
# Var count is used often, calculate
# it once here
cls.i_var_count = len(cls.i_vars)
cls.d_var_count = len(cls.d_vars)
if cls.i_var_count > PlotMode._i_var_max:
raise ValueError(var_count_error(True, False))
if cls.d_var_count > PlotMode._d_var_max:
raise ValueError(var_count_error(False, False))
# Try to use first alias as primary_alias
if len(cls.aliases) > 0:
cls.primary_alias = cls.aliases[0]
else:
cls.primary_alias = cls.__name__
di = cls.intervals
if len(di) != cls.i_var_count:
raise ValueError("Plot mode must provide a "
"default interval for each i_var.")
for i in range(cls.i_var_count):
# default intervals must be given [min,max,steps]
# (no var, but they must be in the same order as i_vars)
if len(di[i]) != 3:
raise ValueError("length should be equal to 3")
# Initialize an incomplete interval,
# to later be filled with a var when
# the mode is instantiated.
di[i] = PlotInterval(None, *di[i])
# To prevent people from using modes
# without these required fields set up.
cls._was_initialized = True
_was_initialized = False
## Initializer Helper Methods
@staticmethod
def _find_i_vars(functions, intervals):
i_vars = []
# First, collect i_vars in the
# order they are given in any
# intervals.
for i in intervals:
if i.v is None:
continue
elif i.v in i_vars:
raise ValueError(("Multiple intervals given "
"for %s.") % (str(i.v)))
i_vars.append(i.v)
# Then, find any remaining
# i_vars in given functions
# (aka d_vars)
for f in functions:
for a in f.free_symbols:
if a not in i_vars:
i_vars.append(a)
return i_vars
def _fill_i_vars(self, i_vars):
# copy default i_vars
self.i_vars = [Symbol(str(i)) for i in self.i_vars]
# replace with given i_vars
for i in range(len(i_vars)):
self.i_vars[i] = i_vars[i]
def _fill_intervals(self, intervals):
# copy default intervals
self.intervals = [PlotInterval(i) for i in self.intervals]
# track i_vars used so far
v_used = []
# fill copy of default
# intervals with given info
for i in range(len(intervals)):
self.intervals[i].fill_from(intervals[i])
if self.intervals[i].v is not None:
v_used.append(self.intervals[i].v)
# Find any orphan intervals and
# assign them i_vars
for i in range(len(self.intervals)):
if self.intervals[i].v is None:
u = [v for v in self.i_vars if v not in v_used]
if len(u) == 0:
raise ValueError("length should not be equal to 0")
self.intervals[i].v = u[0]
v_used.append(u[0])
@staticmethod
def _interpret_args(args):
interval_wrong_order = "PlotInterval %s was given before any function(s)."
interpret_error = "Could not interpret %s as a function or interval."
functions, intervals = [], []
if isinstance(args[0], GeometryEntity):
for coords in list(args[0].arbitrary_point()):
functions.append(coords)
intervals.append(PlotInterval.try_parse(args[0].plot_interval()))
else:
for a in args:
i = PlotInterval.try_parse(a)
if i is not None:
if len(functions) == 0:
raise ValueError(interval_wrong_order % (str(i)))
else:
intervals.append(i)
else:
if is_sequence(a, include=str):
raise ValueError(interpret_error % (str(a)))
try:
f = sympify(a)
functions.append(f)
except TypeError:
raise ValueError(interpret_error % str(a))
return functions, intervals
@staticmethod
def _extract_options(args, kwargs):
newkwargs, newargs = {}, []
for a in args:
if isinstance(a, str):
newkwargs = dict(newkwargs, **parse_option_string(a))
else:
newargs.append(a)
newkwargs = dict(newkwargs, **kwargs)
return newargs, newkwargs
def var_count_error(is_independent, is_plotting):
"""
Used to format an error message which differs
slightly in 4 places.
"""
if is_plotting:
v = "Plotting"
else:
v = "Registering plot modes"
if is_independent:
n, s = PlotMode._i_var_max, "independent"
else:
n, s = PlotMode._d_var_max, "dependent"
return ("%s with more than %i %s variables "
"is not supported.") % (v, n, s)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.