max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
PytorchRouting/Examples/run_experiments.py | oleksost/RoutingNetworks | 63 | 8700 | <filename>PytorchRouting/Examples/run_experiments.py
"""
This file defines some simple experiments to illustrate how Pytorch-Routing functions.
"""
import numpy as np
import tqdm
import torch
from PytorchRouting.DecisionLayers import REINFORCE, QLearning, SARSA, ActorCritic, GumbelSoftmax, PerTaskAssignment, \
WPL, AAC, AdvantageLearning, RELAX, EGreedyREINFORCE, EGreedyAAC
from PytorchRouting.Examples.Models import PerTask_all_fc, RoutedAllFC, PerTask_1_fc, PerDecisionSingleAgent, \
Dispatched
from PytorchRouting.Examples.Datasets import CIFAR100MTL
def compute_batch(model, batch):
samples, labels, tasks = batch
out, meta = model(samples, tasks=tasks)
correct_predictions = (out.max(dim=1)[1].squeeze() == labels.squeeze()).cpu().numpy()
accuracy = correct_predictions.sum()
oh_labels = one_hot(labels, out.size()[-1])
module_loss, decision_loss = model.loss(out, meta, oh_labels)
return module_loss, decision_loss, accuracy
def one_hot(indices, width):
indices = indices.squeeze().unsqueeze(1)
oh = torch.zeros(indices.size()[0], width).to(indices.device)
oh.scatter_(1, indices, 1)
return oh
def run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio):
print('Loaded dataset and constructed model. Starting Training ...')
for epoch in range(50):
optimizers = []
parameters = []
if epoch in learning_rates:
try:
optimizers.append(torch.optim.SGD(model.routing_parameters(),
lr=routing_module_learning_rate_ratio*learning_rates[epoch]))
optimizers.append(torch.optim.SGD(model.module_parameters(),
lr=learning_rates[epoch]))
parameters = model.module_parameters() + model.module_parameters()
except AttributeError:
optimizers.append(torch.optim.SGD(model.parameters(), lr=learning_rates[epoch]))
parameters = model.parameters()
train_log, test_log = np.zeros((3,)), np.zeros((3,))
train_samples_seen, test_samples_seen = 0, 0
dataset.enter_train_mode()
model.train()
# while True:
pbar = tqdm.tqdm(unit=' samples')
while True:
try:
batch = dataset.get_batch()
except StopIteration:
break
train_samples_seen += len(batch[0])
pbar.update(len(batch[0]))
module_loss, decision_loss, accuracy = compute_batch(model, batch)
(module_loss + decision_loss).backward()
torch.nn.utils.clip_grad_norm_(parameters, 40., norm_type=2)
for opt in optimizers:
opt.step()
model.zero_grad()
train_log += np.array([module_loss.tolist(), decision_loss.tolist(), accuracy])
pbar.close()
dataset.enter_test_mode()
model.eval()
model.start_logging_selections()
while True:
try:
batch = dataset.get_batch()
except StopIteration:
break
test_samples_seen += len(batch[0])
module_loss, decision_loss, accuracy = compute_batch(model, batch)
test_log += np.array([module_loss.tolist(), decision_loss.tolist(), accuracy])
print('Epoch {} finished after {} train and {} test samples..\n'
' Training averages: Model loss: {}, Routing loss: {}, Accuracy: {}\n'
' Testing averages: Model loss: {}, Routing loss: {}, Accuracy: {}'.format(
epoch + 1, train_samples_seen, test_samples_seen,
*(train_log/train_samples_seen).round(3), *(test_log/test_samples_seen).round(3)))
model.stop_logging_selections_and_report()
if __name__ == '__main__':
# MNIST
# dataset = MNIST_MTL(64, data_files=['./Datasets/mnist.pkl.gz'])
# model = PerTask_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
# model = WPL_routed_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
cuda = False
# cuda = True
# CIFAR
dataset = CIFAR100MTL(10, data_files=['./Datasets/cifar-100-py/train', './Datasets/cifar-100-py/test'], cuda=cuda)
model = RoutedAllFC(WPL, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(RELAX, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(EGreedyREINFORCE, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = PerDecisionSingleAgent(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = Dispatched(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
learning_rates = {0: 3e-3, 5: 1e-3, 10: 3e-4}
routing_module_learning_rate_ratio = 0.3
if cuda:
model.cuda()
run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio)
'''
WPL_routed_all_fc(3, 512, 5, dataset.num_tasks, dataset.num_tasks)
Training averages: Model loss: 0.427, Routing loss: 8.864, Accuracy: 0.711
Testing averages: Model loss: 0.459, Routing loss: 9.446, Accuracy: 0.674
'''
| <filename>PytorchRouting/Examples/run_experiments.py
"""
This file defines some simple experiments to illustrate how Pytorch-Routing functions.
"""
import numpy as np
import tqdm
import torch
from PytorchRouting.DecisionLayers import REINFORCE, QLearning, SARSA, ActorCritic, GumbelSoftmax, PerTaskAssignment, \
WPL, AAC, AdvantageLearning, RELAX, EGreedyREINFORCE, EGreedyAAC
from PytorchRouting.Examples.Models import PerTask_all_fc, RoutedAllFC, PerTask_1_fc, PerDecisionSingleAgent, \
Dispatched
from PytorchRouting.Examples.Datasets import CIFAR100MTL
def compute_batch(model, batch):
samples, labels, tasks = batch
out, meta = model(samples, tasks=tasks)
correct_predictions = (out.max(dim=1)[1].squeeze() == labels.squeeze()).cpu().numpy()
accuracy = correct_predictions.sum()
oh_labels = one_hot(labels, out.size()[-1])
module_loss, decision_loss = model.loss(out, meta, oh_labels)
return module_loss, decision_loss, accuracy
def one_hot(indices, width):
indices = indices.squeeze().unsqueeze(1)
oh = torch.zeros(indices.size()[0], width).to(indices.device)
oh.scatter_(1, indices, 1)
return oh
def run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio):
print('Loaded dataset and constructed model. Starting Training ...')
for epoch in range(50):
optimizers = []
parameters = []
if epoch in learning_rates:
try:
optimizers.append(torch.optim.SGD(model.routing_parameters(),
lr=routing_module_learning_rate_ratio*learning_rates[epoch]))
optimizers.append(torch.optim.SGD(model.module_parameters(),
lr=learning_rates[epoch]))
parameters = model.module_parameters() + model.module_parameters()
except AttributeError:
optimizers.append(torch.optim.SGD(model.parameters(), lr=learning_rates[epoch]))
parameters = model.parameters()
train_log, test_log = np.zeros((3,)), np.zeros((3,))
train_samples_seen, test_samples_seen = 0, 0
dataset.enter_train_mode()
model.train()
# while True:
pbar = tqdm.tqdm(unit=' samples')
while True:
try:
batch = dataset.get_batch()
except StopIteration:
break
train_samples_seen += len(batch[0])
pbar.update(len(batch[0]))
module_loss, decision_loss, accuracy = compute_batch(model, batch)
(module_loss + decision_loss).backward()
torch.nn.utils.clip_grad_norm_(parameters, 40., norm_type=2)
for opt in optimizers:
opt.step()
model.zero_grad()
train_log += np.array([module_loss.tolist(), decision_loss.tolist(), accuracy])
pbar.close()
dataset.enter_test_mode()
model.eval()
model.start_logging_selections()
while True:
try:
batch = dataset.get_batch()
except StopIteration:
break
test_samples_seen += len(batch[0])
module_loss, decision_loss, accuracy = compute_batch(model, batch)
test_log += np.array([module_loss.tolist(), decision_loss.tolist(), accuracy])
print('Epoch {} finished after {} train and {} test samples..\n'
' Training averages: Model loss: {}, Routing loss: {}, Accuracy: {}\n'
' Testing averages: Model loss: {}, Routing loss: {}, Accuracy: {}'.format(
epoch + 1, train_samples_seen, test_samples_seen,
*(train_log/train_samples_seen).round(3), *(test_log/test_samples_seen).round(3)))
model.stop_logging_selections_and_report()
if __name__ == '__main__':
# MNIST
# dataset = MNIST_MTL(64, data_files=['./Datasets/mnist.pkl.gz'])
# model = PerTask_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
# model = WPL_routed_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
cuda = False
# cuda = True
# CIFAR
dataset = CIFAR100MTL(10, data_files=['./Datasets/cifar-100-py/train', './Datasets/cifar-100-py/test'], cuda=cuda)
model = RoutedAllFC(WPL, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(RELAX, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(EGreedyREINFORCE, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = PerDecisionSingleAgent(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = Dispatched(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
learning_rates = {0: 3e-3, 5: 1e-3, 10: 3e-4}
routing_module_learning_rate_ratio = 0.3
if cuda:
model.cuda()
run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio)
'''
WPL_routed_all_fc(3, 512, 5, dataset.num_tasks, dataset.num_tasks)
Training averages: Model loss: 0.427, Routing loss: 8.864, Accuracy: 0.711
Testing averages: Model loss: 0.459, Routing loss: 9.446, Accuracy: 0.674
'''
| en | 0.528215 | This file defines some simple experiments to illustrate how Pytorch-Routing functions. # while True: # MNIST # dataset = MNIST_MTL(64, data_files=['./Datasets/mnist.pkl.gz']) # model = PerTask_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks) # model = WPL_routed_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks) # cuda = True # CIFAR # model = RoutedAllFC(RELAX, 3, 128, 5, dataset.num_tasks, dataset.num_tasks) # model = RoutedAllFC(EGreedyREINFORCE, 3, 128, 5, dataset.num_tasks, dataset.num_tasks) # model = RoutedAllFC(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks) # model = PerDecisionSingleAgent(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks) # model = Dispatched(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks) WPL_routed_all_fc(3, 512, 5, dataset.num_tasks, dataset.num_tasks) Training averages: Model loss: 0.427, Routing loss: 8.864, Accuracy: 0.711 Testing averages: Model loss: 0.459, Routing loss: 9.446, Accuracy: 0.674 | 3.020533 | 3 |
output/models/ms_data/regex/re_g22_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 8701 | from output.models.ms_data.regex.re_g22_xsd.re_g22 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| from output.models.ms_data.regex.re_g22_xsd.re_g22 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| none | 1 | 1.167351 | 1 |
|
code/image-manipulation.py | rgeirhos/object-recognition | 33 | 8702 | #!/usr/bin/env python
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from scipy.misc import toimage
import numpy as np
import wrapper as wr
###########################################################
# IMAGE IO
###########################################################
def imload_rgb(path):
"""Load and return an RGB image in the range [0, 1]."""
return imread(path) / 255.0
def save_img(image, imgname, use_JPEG=False):
"""Save image as either .jpeg or .png"""
if use_JPEG:
imsave(imgname+".JPEG", image)
else:
toimage(image,
cmin=0.0, cmax=1.0).save(imgname+".png")
###########################################################
# IMAGE MANIPULATION
###########################################################
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level)
def grayscale_contrast(image, contrast_level):
"""Convert to grayscale. Adjust contrast.
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
return adjust_contrast(rgb2gray(image), contrast_level)
def uniform_noise(image, width, contrast_level, rng):
"""Convert to grayscale. Adjust contrast. Apply uniform noise.
parameters:
- image: a numpy.ndarray
- width: a scalar indicating width of additive uniform noise
-> then noise will be in range [-width, width]
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
image = grayscale_contrast(image, contrast_level)
return apply_uniform_noise(image, -width, width, rng)
###########################################################
# HELPER FUNCTIONS
###########################################################
def apply_uniform_noise(image, low, high, rng=None):
"""Apply uniform noise to an image, clip outside values to 0 and 1.
parameters:
- image: a numpy.ndarray
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
nrow = image.shape[0]
ncol = image.shape[1]
image = image + get_uniform_noise(low, high, nrow, ncol, rng)
#clip values
image = np.where(image < 0, 0, image)
image = np.where(image > 1, 1, image)
assert is_in_bounds(image, 0, 1), "values <0 or >1 occurred"
return image
def get_uniform_noise(low, high, nrow, ncol, rng=None):
"""Return uniform noise within [low, high) of size (nrow, ncol).
parameters:
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- nrow: number of rows of desired noise
- ncol: number of columns of desired noise
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
if rng is None:
return np.random.uniform(low=low, high=high,
size=(nrow, ncol))
else:
return rng.uniform(low=low, high=high,
size=(nrow, ncol))
def is_in_bounds(mat, low, high):
"""Return wether all values in 'mat' fall between low and high.
parameters:
- mat: a numpy.ndarray
- low: lower bound (inclusive)
- high: upper bound (inclusive)
"""
return np.all(np.logical_and(mat >= 0, mat <= 1))
def eidolon_partially_coherent_disarray(image, reach, coherence, grain):
"""Return parametrically distorted images (produced by Eidolon factory.
For more information on the effect of different distortions, please
have a look at the paper: Koenderink et al., JoV 2017,
Eidolons: Novel stimuli for vision research).
- image: a numpy.ndarray
- reach: float, controlling the strength of the manipulation
- coherence: a float within [0, 1] with 1 = full coherence
- grain: float, controlling how fine-grained the distortion is
"""
return wr.partially_coherent_disarray(wr.data_to_pic(image),
reach, coherence, grain)
###########################################################
# MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES
###########################################################
if __name__ == "__main__":
print("""This main method should generate manipulated
images in the directory where it was executed.""")
use_JPEG = False # either JPEG or PNG
img = imload_rgb("test_image.JPEG")
###################################################
# A) Example for color-experiment:
# - convert to grayscale
###################################################
img_grayscale = rgb2gray(img)
save_img(img_grayscale, "test_image_grayscale", use_JPEG)
###################################################
# B) Example for contrast-experiment:
# - convert to grayscale and
# - reduce contrast to nominal contrast of 10%
###################################################
contrast_level_1 = 0.1
img_low_contrast = grayscale_contrast(image=img,
contrast_level=contrast_level_1)
save_img(img_low_contrast, "test_image_low_contrast", use_JPEG)
###################################################
# C) Example for noise-experiment:
# - convert to graycale and
# - reduce contrast to 30% and
# - apply uniform noise with width 0.1
###################################################
noise_width = 0.1
contrast_level_2 = 0.3
rng = np.random.RandomState(seed=42)
img_noisy = uniform_noise(image=img, width=noise_width,
contrast_level=contrast_level_2,
rng=rng)
save_img(img_noisy, "test_image_noisy", use_JPEG)
###################################################
# D) Example for eidolon-experiment:
# - use partially_coherent_disarray
###################################################
grain = 10.0
coherence = 1.0
reach = 8.0
img_eidolon = eidolon_partially_coherent_disarray(img, reach,
coherence, grain)
save_img(img_eidolon, "test_image_eidolon", use_JPEG)
| #!/usr/bin/env python
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from scipy.misc import toimage
import numpy as np
import wrapper as wr
###########################################################
# IMAGE IO
###########################################################
def imload_rgb(path):
"""Load and return an RGB image in the range [0, 1]."""
return imread(path) / 255.0
def save_img(image, imgname, use_JPEG=False):
"""Save image as either .jpeg or .png"""
if use_JPEG:
imsave(imgname+".JPEG", image)
else:
toimage(image,
cmin=0.0, cmax=1.0).save(imgname+".png")
###########################################################
# IMAGE MANIPULATION
###########################################################
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level)
def grayscale_contrast(image, contrast_level):
"""Convert to grayscale. Adjust contrast.
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
return adjust_contrast(rgb2gray(image), contrast_level)
def uniform_noise(image, width, contrast_level, rng):
"""Convert to grayscale. Adjust contrast. Apply uniform noise.
parameters:
- image: a numpy.ndarray
- width: a scalar indicating width of additive uniform noise
-> then noise will be in range [-width, width]
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
image = grayscale_contrast(image, contrast_level)
return apply_uniform_noise(image, -width, width, rng)
###########################################################
# HELPER FUNCTIONS
###########################################################
def apply_uniform_noise(image, low, high, rng=None):
"""Apply uniform noise to an image, clip outside values to 0 and 1.
parameters:
- image: a numpy.ndarray
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
nrow = image.shape[0]
ncol = image.shape[1]
image = image + get_uniform_noise(low, high, nrow, ncol, rng)
#clip values
image = np.where(image < 0, 0, image)
image = np.where(image > 1, 1, image)
assert is_in_bounds(image, 0, 1), "values <0 or >1 occurred"
return image
def get_uniform_noise(low, high, nrow, ncol, rng=None):
"""Return uniform noise within [low, high) of size (nrow, ncol).
parameters:
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- nrow: number of rows of desired noise
- ncol: number of columns of desired noise
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
if rng is None:
return np.random.uniform(low=low, high=high,
size=(nrow, ncol))
else:
return rng.uniform(low=low, high=high,
size=(nrow, ncol))
def is_in_bounds(mat, low, high):
"""Return wether all values in 'mat' fall between low and high.
parameters:
- mat: a numpy.ndarray
- low: lower bound (inclusive)
- high: upper bound (inclusive)
"""
return np.all(np.logical_and(mat >= 0, mat <= 1))
def eidolon_partially_coherent_disarray(image, reach, coherence, grain):
"""Return parametrically distorted images (produced by Eidolon factory.
For more information on the effect of different distortions, please
have a look at the paper: Koenderink et al., JoV 2017,
Eidolons: Novel stimuli for vision research).
- image: a numpy.ndarray
- reach: float, controlling the strength of the manipulation
- coherence: a float within [0, 1] with 1 = full coherence
- grain: float, controlling how fine-grained the distortion is
"""
return wr.partially_coherent_disarray(wr.data_to_pic(image),
reach, coherence, grain)
###########################################################
# MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES
###########################################################
if __name__ == "__main__":
print("""This main method should generate manipulated
images in the directory where it was executed.""")
use_JPEG = False # either JPEG or PNG
img = imload_rgb("test_image.JPEG")
###################################################
# A) Example for color-experiment:
# - convert to grayscale
###################################################
img_grayscale = rgb2gray(img)
save_img(img_grayscale, "test_image_grayscale", use_JPEG)
###################################################
# B) Example for contrast-experiment:
# - convert to grayscale and
# - reduce contrast to nominal contrast of 10%
###################################################
contrast_level_1 = 0.1
img_low_contrast = grayscale_contrast(image=img,
contrast_level=contrast_level_1)
save_img(img_low_contrast, "test_image_low_contrast", use_JPEG)
###################################################
# C) Example for noise-experiment:
# - convert to graycale and
# - reduce contrast to 30% and
# - apply uniform noise with width 0.1
###################################################
noise_width = 0.1
contrast_level_2 = 0.3
rng = np.random.RandomState(seed=42)
img_noisy = uniform_noise(image=img, width=noise_width,
contrast_level=contrast_level_2,
rng=rng)
save_img(img_noisy, "test_image_noisy", use_JPEG)
###################################################
# D) Example for eidolon-experiment:
# - use partially_coherent_disarray
###################################################
grain = 10.0
coherence = 1.0
reach = 8.0
img_eidolon = eidolon_partially_coherent_disarray(img, reach,
coherence, grain)
save_img(img_eidolon, "test_image_eidolon", use_JPEG)
| en | 0.358418 | #!/usr/bin/env python ########################################################### # IMAGE IO ########################################################### Load and return an RGB image in the range [0, 1]. Save image as either .jpeg or .png ########################################################### # IMAGE MANIPULATION ########################################################### Return the image scaled to a certain contrast level in [0, 1]. parameters: - image: a numpy.ndarray - contrast_level: a scalar in [0, 1]; with 1 -> full contrast Convert to grayscale. Adjust contrast. parameters: - image: a numpy.ndarray - contrast_level: a scalar in [0, 1]; with 1 -> full contrast Convert to grayscale. Adjust contrast. Apply uniform noise. parameters: - image: a numpy.ndarray - width: a scalar indicating width of additive uniform noise -> then noise will be in range [-width, width] - contrast_level: a scalar in [0, 1]; with 1 -> full contrast - rng: a np.random.RandomState(seed=XYZ) to make it reproducible ########################################################### # HELPER FUNCTIONS ########################################################### Apply uniform noise to an image, clip outside values to 0 and 1. parameters: - image: a numpy.ndarray - low: lower bound of noise within [low, high) - high: upper bound of noise within [low, high) - rng: a np.random.RandomState(seed=XYZ) to make it reproducible #clip values Return uniform noise within [low, high) of size (nrow, ncol). parameters: - low: lower bound of noise within [low, high) - high: upper bound of noise within [low, high) - nrow: number of rows of desired noise - ncol: number of columns of desired noise - rng: a np.random.RandomState(seed=XYZ) to make it reproducible Return wether all values in 'mat' fall between low and high. parameters: - mat: a numpy.ndarray - low: lower bound (inclusive) - high: upper bound (inclusive) Return parametrically distorted images (produced by Eidolon factory. For more information on the effect of different distortions, please have a look at the paper: Koenderink et al., JoV 2017, Eidolons: Novel stimuli for vision research). - image: a numpy.ndarray - reach: float, controlling the strength of the manipulation - coherence: a float within [0, 1] with 1 = full coherence - grain: float, controlling how fine-grained the distortion is ########################################################### # MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES ########################################################### This main method should generate manipulated images in the directory where it was executed. # either JPEG or PNG ################################################### # A) Example for color-experiment: # - convert to grayscale ################################################### ################################################### # B) Example for contrast-experiment: # - convert to grayscale and # - reduce contrast to nominal contrast of 10% ################################################### ################################################### # C) Example for noise-experiment: # - convert to graycale and # - reduce contrast to 30% and # - apply uniform noise with width 0.1 ################################################### ################################################### # D) Example for eidolon-experiment: # - use partially_coherent_disarray ################################################### | 2.657595 | 3 |
students/K33402/Akhmetzhanov Alisher/lr2/main/forms.py | AlishKZ/ITMO_ICT_WebDevelopment_2020-2021 | 0 | 8703 | from django.db.models import fields
from main.models import RoomReservation, UserRoom
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate, login
from django.contrib.auth import get_user_model
class ReservateRoomForm(forms.Form):
begin_date = forms.DateField()
end_date = forms.DateField()
class AddCommentForm(forms.Form):
text = forms.CharField(max_length=410)
accommodation = forms.ModelChoiceField(queryset=UserRoom.objects.all())
class EditReservationForm(forms.ModelForm):
class Meta:
model = RoomReservation
fields = ['begin_date', 'end_date']
| from django.db.models import fields
from main.models import RoomReservation, UserRoom
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate, login
from django.contrib.auth import get_user_model
class ReservateRoomForm(forms.Form):
begin_date = forms.DateField()
end_date = forms.DateField()
class AddCommentForm(forms.Form):
text = forms.CharField(max_length=410)
accommodation = forms.ModelChoiceField(queryset=UserRoom.objects.all())
class EditReservationForm(forms.ModelForm):
class Meta:
model = RoomReservation
fields = ['begin_date', 'end_date']
| none | 1 | 2.055691 | 2 |
|
emmet-core/emmet/core/vasp/calc_types.py | espottesmith/emmet | 0 | 8704 | <filename>emmet-core/emmet/core/vasp/calc_types.py<gh_stars>0
""" Module to define various calculation types as Enums for VASP """
import datetime
from itertools import groupby, product
from pathlib import Path
from typing import Dict, Iterator, List
import bson
import numpy as np
from monty.json import MSONable
from monty.serialization import loadfn
from pydantic import BaseModel
from pymatgen.analysis.structure_matcher import ElementComparator, StructureMatcher
from pymatgen.core.structure import Structure
from typing_extensions import Literal
from emmet.core import SETTINGS
from emmet.core.utils import ValueEnum
_RUN_TYPE_DATA = loadfn(str(Path(__file__).parent.joinpath("run_types.yaml").resolve()))
_TASK_TYPES = [
"NSCF Line",
"NSCF Uniform",
"Dielectric",
"DFPT",
"DFPT Dielectric",
"NMR Nuclear Shielding",
"NMR Electric Field Gradient",
"Static",
"Structure Optimization",
"Deformation",
]
_RUN_TYPES = (
[
rt
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ [
f"{rt}+U"
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ ["LDA", "LDA+U"]
)
RunType = ValueEnum( # type: ignore
"RunType", dict({"_".join(rt.split()).replace("+", "_"): rt for rt in _RUN_TYPES})
)
RunType.__doc__ = "VASP calculation run types"
TaskType = ValueEnum("TaskType", {"_".join(tt.split()): tt for tt in _TASK_TYPES}) # type: ignore
TaskType.__doc__ = "VASP calculation task types"
CalcType = ValueEnum( # type: ignore
"CalcType",
{
f"{'_'.join(rt.split()).replace('+','_')}_{'_'.join(tt.split())}": f"{rt} {tt}"
for rt, tt in product(_RUN_TYPES, _TASK_TYPES)
},
)
CalcType.__doc__ = "VASP calculation types"
def run_type(parameters: Dict) -> RunType:
"""
Determines the run_type from the VASP parameters dict
This is adapted from pymatgen to be far less unstable
Args:
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
if parameters.get("LDAU", False):
is_hubbard = "+U"
else:
is_hubbard = ""
def _variant_equal(v1, v2) -> bool:
"""
helper function to deal with strings
"""
if isinstance(v1, str) and isinstance(v2, str):
return v1.strip().upper() == v2.strip().upper()
else:
return v1 == v2
# This is to force an order of evaluation
for functional_class in ["HF", "VDW", "METAGGA", "GGA"]:
for special_type, params in _RUN_TYPE_DATA[functional_class].items():
if all(
[
_variant_equal(parameters.get(param, None), value)
for param, value in params.items()
]
):
return RunType(f"{special_type}{is_hubbard}")
return RunType(f"LDA{is_hubbard}")
def task_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict]
) -> TaskType:
"""
Determines the task type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
"""
calc_type = []
incar = inputs.get("incar", {})
if incar.get("ICHARG", 0) > 10:
try:
kpts = inputs.get("kpoints") or {}
kpt_labels = kpts.get("labels") or []
num_kpt_labels = len(list(filter(None.__ne__, kpt_labels)))
except Exception as e:
raise Exception(
"Couldn't identify total number of kpt labels: {}".format(e)
)
if num_kpt_labels > 0:
calc_type.append("NSCF Line")
else:
calc_type.append("NSCF Uniform")
elif incar.get("LEPSILON", False):
if incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
calc_type.append("Dielectric")
elif incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
elif incar.get("LCHIMAG", False):
calc_type.append("NMR Nuclear Shielding")
elif incar.get("LEFG", False):
calc_type.append("NMR Electric Field Gradient")
elif incar.get("NSW", 1) == 0:
calc_type.append("Static")
elif incar.get("ISIF", 2) == 3 and incar.get("IBRION", 0) > 0:
calc_type.append("Structure Optimization")
elif incar.get("ISIF", 3) == 2 and incar.get("IBRION", 0) > 0:
calc_type.append("Deformation")
return TaskType(" ".join(calc_type))
def calc_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict],
parameters: Dict,
) -> CalcType:
"""
Determines the calc type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
rt = run_type(parameters).value
tt = task_type(inputs).value
return CalcType(f"{rt} {tt}")
| <filename>emmet-core/emmet/core/vasp/calc_types.py<gh_stars>0
""" Module to define various calculation types as Enums for VASP """
import datetime
from itertools import groupby, product
from pathlib import Path
from typing import Dict, Iterator, List
import bson
import numpy as np
from monty.json import MSONable
from monty.serialization import loadfn
from pydantic import BaseModel
from pymatgen.analysis.structure_matcher import ElementComparator, StructureMatcher
from pymatgen.core.structure import Structure
from typing_extensions import Literal
from emmet.core import SETTINGS
from emmet.core.utils import ValueEnum
_RUN_TYPE_DATA = loadfn(str(Path(__file__).parent.joinpath("run_types.yaml").resolve()))
_TASK_TYPES = [
"NSCF Line",
"NSCF Uniform",
"Dielectric",
"DFPT",
"DFPT Dielectric",
"NMR Nuclear Shielding",
"NMR Electric Field Gradient",
"Static",
"Structure Optimization",
"Deformation",
]
_RUN_TYPES = (
[
rt
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ [
f"{rt}+U"
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ ["LDA", "LDA+U"]
)
RunType = ValueEnum( # type: ignore
"RunType", dict({"_".join(rt.split()).replace("+", "_"): rt for rt in _RUN_TYPES})
)
RunType.__doc__ = "VASP calculation run types"
TaskType = ValueEnum("TaskType", {"_".join(tt.split()): tt for tt in _TASK_TYPES}) # type: ignore
TaskType.__doc__ = "VASP calculation task types"
CalcType = ValueEnum( # type: ignore
"CalcType",
{
f"{'_'.join(rt.split()).replace('+','_')}_{'_'.join(tt.split())}": f"{rt} {tt}"
for rt, tt in product(_RUN_TYPES, _TASK_TYPES)
},
)
CalcType.__doc__ = "VASP calculation types"
def run_type(parameters: Dict) -> RunType:
"""
Determines the run_type from the VASP parameters dict
This is adapted from pymatgen to be far less unstable
Args:
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
if parameters.get("LDAU", False):
is_hubbard = "+U"
else:
is_hubbard = ""
def _variant_equal(v1, v2) -> bool:
"""
helper function to deal with strings
"""
if isinstance(v1, str) and isinstance(v2, str):
return v1.strip().upper() == v2.strip().upper()
else:
return v1 == v2
# This is to force an order of evaluation
for functional_class in ["HF", "VDW", "METAGGA", "GGA"]:
for special_type, params in _RUN_TYPE_DATA[functional_class].items():
if all(
[
_variant_equal(parameters.get(param, None), value)
for param, value in params.items()
]
):
return RunType(f"{special_type}{is_hubbard}")
return RunType(f"LDA{is_hubbard}")
def task_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict]
) -> TaskType:
"""
Determines the task type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
"""
calc_type = []
incar = inputs.get("incar", {})
if incar.get("ICHARG", 0) > 10:
try:
kpts = inputs.get("kpoints") or {}
kpt_labels = kpts.get("labels") or []
num_kpt_labels = len(list(filter(None.__ne__, kpt_labels)))
except Exception as e:
raise Exception(
"Couldn't identify total number of kpt labels: {}".format(e)
)
if num_kpt_labels > 0:
calc_type.append("NSCF Line")
else:
calc_type.append("NSCF Uniform")
elif incar.get("LEPSILON", False):
if incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
calc_type.append("Dielectric")
elif incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
elif incar.get("LCHIMAG", False):
calc_type.append("NMR Nuclear Shielding")
elif incar.get("LEFG", False):
calc_type.append("NMR Electric Field Gradient")
elif incar.get("NSW", 1) == 0:
calc_type.append("Static")
elif incar.get("ISIF", 2) == 3 and incar.get("IBRION", 0) > 0:
calc_type.append("Structure Optimization")
elif incar.get("ISIF", 3) == 2 and incar.get("IBRION", 0) > 0:
calc_type.append("Deformation")
return TaskType(" ".join(calc_type))
def calc_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict],
parameters: Dict,
) -> CalcType:
"""
Determines the calc type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
rt = run_type(parameters).value
tt = task_type(inputs).value
return CalcType(f"{rt} {tt}")
| en | 0.646212 | Module to define various calculation types as Enums for VASP # type: ignore # type: ignore # type: ignore Determines the run_type from the VASP parameters dict This is adapted from pymatgen to be far less unstable Args: parameters: Dictionary of VASP parameters from Vasprun.xml helper function to deal with strings # This is to force an order of evaluation Determines the task type Args: inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries Determines the calc type Args: inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries parameters: Dictionary of VASP parameters from Vasprun.xml | 2.044944 | 2 |
sensors/__init__.py | dawnos/robotcar-to-rosbag | 0 | 8705 |
from mono_left import MonoLeft
from mono_right import MonoRight
from mono_rear import MonoRear
from stereo_left import StereoLeft
from stereo_right import StereoRight
from stereo_centre import StereoCentre
|
from mono_left import MonoLeft
from mono_right import MonoRight
from mono_rear import MonoRear
from stereo_left import StereoLeft
from stereo_right import StereoRight
from stereo_centre import StereoCentre
| none | 1 | 1.139627 | 1 |
|
models/train_classifier.py | YiWang-Evonne/disaster_response | 0 | 8706 | <filename>models/train_classifier.py
import sys
import pandas as pd
from sqlalchemy import create_engine
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import re
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import pickle
from sklearn.model_selection import GridSearchCV
def load_data(database_filepath):
"""
load data from sql db
:param database_filepath: sql db path
:return: pandas dataframe
"""
engine = create_engine("sqlite:///"+database_filepath)
df = pd.read_sql_table('modeling_data', engine)
yvar = [item for item in list(df) if item not in ['message', 'original', 'genre', 'id']]
X = df['message']
Y = df[yvar]
return X.values, Y.values, list(Y)
def tokenize(text):
"""
processing the text input
:param text: text inputs
:return:
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
build model pipeline
:return: model pipeline
"""
model_pipeline = Pipeline([
('features', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('clf', RandomForestClassifier())
])
return model_pipeline
def model_gridsearch(model, parameters):
cv = GridSearchCV(model, param_grid=parameters, verbose=3)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
evaluate model performances
:param model: model obj
:param X_test: test x
:param Y_test: test y
:param category_names: y names
:return:
"""
y_pred = model.predict(X_test)
print(classification_report(Y_test, y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
save model to local path
:param model: model obj
:param model_filepath: saving path
:return:
"""
with open(model_filepath, 'wb') as f:
pickle.dump(model, f)
def main():
"""
CLI to fit the model
:return:
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
# model.fit(X_train, Y_train)
parameters = {
'clf__n_estimators': [100, 400, 800],
# 'clf__criterion':["gini", "entropy"]
}
cv = model_gridsearch(model, parameters)
best_model_pipeline = cv.best_estimator_
print('Evaluating model...')
evaluate_model(best_model_pipeline, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(best_model_pipeline, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | <filename>models/train_classifier.py
import sys
import pandas as pd
from sqlalchemy import create_engine
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import re
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import pickle
from sklearn.model_selection import GridSearchCV
def load_data(database_filepath):
"""
load data from sql db
:param database_filepath: sql db path
:return: pandas dataframe
"""
engine = create_engine("sqlite:///"+database_filepath)
df = pd.read_sql_table('modeling_data', engine)
yvar = [item for item in list(df) if item not in ['message', 'original', 'genre', 'id']]
X = df['message']
Y = df[yvar]
return X.values, Y.values, list(Y)
def tokenize(text):
"""
processing the text input
:param text: text inputs
:return:
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
build model pipeline
:return: model pipeline
"""
model_pipeline = Pipeline([
('features', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('clf', RandomForestClassifier())
])
return model_pipeline
def model_gridsearch(model, parameters):
cv = GridSearchCV(model, param_grid=parameters, verbose=3)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
evaluate model performances
:param model: model obj
:param X_test: test x
:param Y_test: test y
:param category_names: y names
:return:
"""
y_pred = model.predict(X_test)
print(classification_report(Y_test, y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
save model to local path
:param model: model obj
:param model_filepath: saving path
:return:
"""
with open(model_filepath, 'wb') as f:
pickle.dump(model, f)
def main():
"""
CLI to fit the model
:return:
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
# model.fit(X_train, Y_train)
parameters = {
'clf__n_estimators': [100, 400, 800],
# 'clf__criterion':["gini", "entropy"]
}
cv = model_gridsearch(model, parameters)
best_model_pipeline = cv.best_estimator_
print('Evaluating model...')
evaluate_model(best_model_pipeline, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(best_model_pipeline, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | en | 0.493456 | load data from sql db :param database_filepath: sql db path :return: pandas dataframe processing the text input :param text: text inputs :return: build model pipeline :return: model pipeline evaluate model performances :param model: model obj :param X_test: test x :param Y_test: test y :param category_names: y names :return: save model to local path :param model: model obj :param model_filepath: saving path :return: CLI to fit the model :return: # model.fit(X_train, Y_train) # 'clf__criterion':["gini", "entropy"] | 2.671436 | 3 |
terra/terra/emails.py | dymaxionlabs/platform | 0 | 8707 | <reponame>dymaxionlabs/platform
import os
from datetime import date
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext as _
from mailchimp3 import MailChimp
class Email:
from_email = settings.DEFAULT_FROM_EMAIL
subject = None
template_name = 'basic'
preview_text = ''
templates_basedir = os.path.join(settings.BASE_DIR, 'templates')
def __init__(self, recipients, language_code='en'):
self.recipients = recipients
self.language_code = language_code
def send_mail(self):
send_mail(self.subject,
self.body,
self.from_email,
self.recipients,
html_message=self.html_body)
@property
def body(self):
return render_to_string(self.body_template, self.template_params)
@property
def html_body(self):
return self._reformat_mailchimp_template(
render_to_string(self.htmlbody_template, self.template_params))
@property
def body_template(self):
return os.path.join(
self.templates_basedir,
'{name}.{lc}.txt'.format(name=self.template_name,
lc=self.language_code))
@property
def htmlbody_template(self):
return os.path.join(
self.templates_basedir,
'{name}.{lc}.html'.format(name=self.template_name,
lc=self.language_code))
@property
def template_params(self):
return {}
def _reformat_mailchimp_template(self, html):
"""
Replaces MailChimp variables for Django template variables, and do
some post-processing.
"""
for var, newvar in self.mc_variables.items():
html = html.replace(str(var), str(newvar))
return html
@property
def mc_variables(self):
return {
'*|MC:SUBJECT|*': self.subject,
'*|MC_PREVIEW_TEXT|*': self.preview_text,
'*|CURRENT_YEAR|*': date.today().year,
'*|LIST:COMPANY|*': settings.COMPANY_NAME,
'*|HTML:LIST_ADDRESS_HTML|*': settings.LIST_ADDRESS_HTML,
'*|UNSUB|*': '%unsubscribe_url%',
# Unused variables (for now):
'*|IFNOT:ARCHIVE_PAGE|*': '',
'*|LIST:DESCRIPTION|*': '',
'*|END:IF|*': '',
}
class EarlyAccessBetaEmail(Email):
template_name = 'early_access_beta'
@property
def signup_url(self):
return '{base_url}/signup?beta=1&email={email}'.format(
base_url=settings.WEBCLIENT_URL, email= self.recipients[0])
@property
def subject(self):
with translation.override(self.language_code):
return _('validate your email')
@property
def template_params(self):
return {**super().template_params, 'signup_url': self.signup_url}
@property
def mc_variables(self):
return {**super().mc_variables, '*|SIGNUP_URL|*': self.signup_url}
class WelcomeEmail(Email):
template_name = 'welcome'
link = '{base_url}/login'.format(base_url=settings.WEBCLIENT_URL)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
@property
def subject(self):
with translation.override(self.language_code):
return _('your account is ready') % {'name': self.first_name}
@property
def template_params(self):
return {
**super().template_params,
'first_name': self.first_name,
'link': self.link,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|FNAME|*': self.first_name,
'*|TEXT:LINK|*': self.link,
}
@property
def first_name(self):
return self.user.first_name or self.user.username
class TrainingCompletedEmail(Email):
template_name = 'training_completed'
def __init__(self, estimator, *args, **kwargs):
super().__init__(*args, **kwargs)
self.estimator = estimator
self.link = '{web_client_url}/models/new/od/select?id={uuid}'.format(
web_client_url = settings.WEBCLIENT_URL, uuid = estimator.uuid
)
@property
def subject(self):
with translation.override(self.language_code):
return _('training of your model completed')
@property
def template_params(self):
return {
**super().template_params,
'name': self.estimator_name,
'num_classes': self.num_classes,
'link': self.link,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|NAME|*': self.estimator_name,
'*|NUM_CLASSES|*': self.num_classes,
'*|LINK|*': self.link,
}
@property
def estimator_name(self):
return self.estimator.name
@property
def num_classes(self):
return len(self.estimator.classes)
class PredictionCompletedEmail(Email):
template_name = 'prediction_completed'
def __init__(self, estimator, *args, **kwargs):
super().__init__(*args, **kwargs)
self.estimator = estimator
@property
def subject(self):
with translation.override(self.language_code):
return _('prediction of your model completed')
@property
def template_params(self):
return {
**super().template_params,
'name': self.estimator_name,
'num_classes': self.num_classes,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|NAME|*': self.estimator_name,
'*|NUM_CLASSES|*': self.num_classes,
}
@property
def estimator_name(self):
return self.estimator.name
@property
def num_classes(self):
return len(self.estimator.classes)
def notify(subject, body='.'):
send_mail(subject, body, '<EMAIL>',
['<EMAIL>'])
| import os
from datetime import date
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext as _
from mailchimp3 import MailChimp
class Email:
from_email = settings.DEFAULT_FROM_EMAIL
subject = None
template_name = 'basic'
preview_text = ''
templates_basedir = os.path.join(settings.BASE_DIR, 'templates')
def __init__(self, recipients, language_code='en'):
self.recipients = recipients
self.language_code = language_code
def send_mail(self):
send_mail(self.subject,
self.body,
self.from_email,
self.recipients,
html_message=self.html_body)
@property
def body(self):
return render_to_string(self.body_template, self.template_params)
@property
def html_body(self):
return self._reformat_mailchimp_template(
render_to_string(self.htmlbody_template, self.template_params))
@property
def body_template(self):
return os.path.join(
self.templates_basedir,
'{name}.{lc}.txt'.format(name=self.template_name,
lc=self.language_code))
@property
def htmlbody_template(self):
return os.path.join(
self.templates_basedir,
'{name}.{lc}.html'.format(name=self.template_name,
lc=self.language_code))
@property
def template_params(self):
return {}
def _reformat_mailchimp_template(self, html):
"""
Replaces MailChimp variables for Django template variables, and do
some post-processing.
"""
for var, newvar in self.mc_variables.items():
html = html.replace(str(var), str(newvar))
return html
@property
def mc_variables(self):
return {
'*|MC:SUBJECT|*': self.subject,
'*|MC_PREVIEW_TEXT|*': self.preview_text,
'*|CURRENT_YEAR|*': date.today().year,
'*|LIST:COMPANY|*': settings.COMPANY_NAME,
'*|HTML:LIST_ADDRESS_HTML|*': settings.LIST_ADDRESS_HTML,
'*|UNSUB|*': '%unsubscribe_url%',
# Unused variables (for now):
'*|IFNOT:ARCHIVE_PAGE|*': '',
'*|LIST:DESCRIPTION|*': '',
'*|END:IF|*': '',
}
class EarlyAccessBetaEmail(Email):
template_name = 'early_access_beta'
@property
def signup_url(self):
return '{base_url}/signup?beta=1&email={email}'.format(
base_url=settings.WEBCLIENT_URL, email= self.recipients[0])
@property
def subject(self):
with translation.override(self.language_code):
return _('validate your email')
@property
def template_params(self):
return {**super().template_params, 'signup_url': self.signup_url}
@property
def mc_variables(self):
return {**super().mc_variables, '*|SIGNUP_URL|*': self.signup_url}
class WelcomeEmail(Email):
template_name = 'welcome'
link = '{base_url}/login'.format(base_url=settings.WEBCLIENT_URL)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
@property
def subject(self):
with translation.override(self.language_code):
return _('your account is ready') % {'name': self.first_name}
@property
def template_params(self):
return {
**super().template_params,
'first_name': self.first_name,
'link': self.link,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|FNAME|*': self.first_name,
'*|TEXT:LINK|*': self.link,
}
@property
def first_name(self):
return self.user.first_name or self.user.username
class TrainingCompletedEmail(Email):
template_name = 'training_completed'
def __init__(self, estimator, *args, **kwargs):
super().__init__(*args, **kwargs)
self.estimator = estimator
self.link = '{web_client_url}/models/new/od/select?id={uuid}'.format(
web_client_url = settings.WEBCLIENT_URL, uuid = estimator.uuid
)
@property
def subject(self):
with translation.override(self.language_code):
return _('training of your model completed')
@property
def template_params(self):
return {
**super().template_params,
'name': self.estimator_name,
'num_classes': self.num_classes,
'link': self.link,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|NAME|*': self.estimator_name,
'*|NUM_CLASSES|*': self.num_classes,
'*|LINK|*': self.link,
}
@property
def estimator_name(self):
return self.estimator.name
@property
def num_classes(self):
return len(self.estimator.classes)
class PredictionCompletedEmail(Email):
template_name = 'prediction_completed'
def __init__(self, estimator, *args, **kwargs):
super().__init__(*args, **kwargs)
self.estimator = estimator
@property
def subject(self):
with translation.override(self.language_code):
return _('prediction of your model completed')
@property
def template_params(self):
return {
**super().template_params,
'name': self.estimator_name,
'num_classes': self.num_classes,
}
@property
def mc_variables(self):
return {
**super().mc_variables,
'*|NAME|*': self.estimator_name,
'*|NUM_CLASSES|*': self.num_classes,
}
@property
def estimator_name(self):
return self.estimator.name
@property
def num_classes(self):
return len(self.estimator.classes)
def notify(subject, body='.'):
send_mail(subject, body, '<EMAIL>',
['<EMAIL>']) | en | 0.588827 | Replaces MailChimp variables for Django template variables, and do some post-processing. # Unused variables (for now): | 2.300007 | 2 |
experimental/attentive_uncertainty/toy_regression/datasets.py | miksu/edward2 | 0 | 8708 | # coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses real and synthetic datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import collections
import tensorflow as tf
NPRegressionDescription = collections.namedtuple(
"NPRegressionDescription",
("context_x", "context_y", "target_x", "target_y"))
class GPCurvesReader(object):
"""Generates curves using a Gaussian Process (GP).
Supports vector inputs (x) and vector outputs (y). Kernel is
mean-squared exponential, using the x-value l2 coordinate distance scaled by
some factor chosen randomly in a range. Outputs are independent gaussian
processes.
"""
def __init__(self,
batch_size,
max_num_context,
x_size=1,
y_size=1,
l1_scale=0.6,
sigma_scale=1.0,
random_kernel_parameters=False,
testing=False):
"""Creates a regression dataset of functions sampled from a GP.
Args:
batch_size: An integer.
max_num_context: The max number of observations in the context.
x_size: Integer >= 1 for length of "x values" vector.
y_size: Integer >= 1 for length of "y values" vector.
l1_scale: Float; typical scale for kernel distance function.
sigma_scale: Float; typical scale for variance.
random_kernel_parameters: If `True`, the kernel parameters (l1 and sigma)
are sampled uniformly within [0.1, l1_scale] and [0.1, sigma_scale].
testing: Boolean that indicates whether we are testing. If so there are
more targets for visualization.
"""
self._batch_size = batch_size
self._max_num_context = max_num_context
self._x_size = x_size
self._y_size = y_size
self._l1_scale = l1_scale
self._sigma_scale = sigma_scale
self._random_kernel_parameters = random_kernel_parameters
self._testing = testing
def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):
"""Applies the Gaussian kernel to generate curve data.
Args:
xdata: Tensor of shape [B, num_total_points, x_size] with
the values of the x-axis data.
l1: Tensor of shape [B, y_size, x_size], the scale
parameter of the Gaussian kernel.
sigma_f: Tensor of shape [B, y_size], the magnitude
of the std.
sigma_noise: Float, std of the noise that we add for stability.
Returns:
The kernel, a float tensor of shape
[B, y_size, num_total_points, num_total_points].
"""
num_total_points = tf.shape(xdata)[1]
# Expand and take the difference
xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]
xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]
diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]
# [B, y_size, num_total_points, num_total_points, x_size]
norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])
norm = tf.reduce_sum(
norm, -1) # [B, data_size, num_total_points, num_total_points]
# [B, y_size, num_total_points, num_total_points]
kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5 * norm)
# Add some noise to the diagonal to make the cholesky work.
kernel += (sigma_noise**2) * tf.eye(num_total_points)
return kernel
def generate_curves(self, num_context=None):
"""Builds the op delivering the data.
Generated functions are `float32` with x values between -2 and 2.
Args:
num_context: Number of context points. If None, chosen randomly.
Returns:
A `CNPRegressionDescription` namedtuple.
"""
if num_context is None:
num_context = tf.random_uniform(
shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32)
# If we are testing we want to have more targets and have them evenly
# distributed in order to plot the function.
if self._testing:
num_target = 400
num_total_points = num_target
x_values = tf.tile(
tf.expand_dims(tf.range(-2., 2., 1. / 100, dtype=tf.float32), axis=0),
[self._batch_size, 1])
x_values = tf.expand_dims(x_values, axis=-1)
# During training the number of target points and their x-positions are
# selected at random
else:
num_target = tf.random_uniform(shape=(), minval=0,
maxval=self._max_num_context - num_context,
dtype=tf.int32)
num_total_points = num_context + num_target
x_values = tf.random_uniform(
[self._batch_size, num_total_points, self._x_size], -2, 2)
# Set kernel parameters
# Either choose a set of random parameters for the mini-batch
if self._random_kernel_parameters:
l1 = tf.random_uniform([self._batch_size, self._y_size,
self._x_size], 0.1, self._l1_scale)
sigma_f = tf.random_uniform([self._batch_size, self._y_size],
0.1, self._sigma_scale)
# Or use the same fixed parameters for all mini-batches
else:
l1 = tf.ones(shape=[self._batch_size, self._y_size,
self._x_size]) * self._l1_scale
sigma_f = tf.ones(shape=[self._batch_size,
self._y_size]) * self._sigma_scale
# Pass the x_values through the Gaussian kernel
# [batch_size, y_size, num_total_points, num_total_points]
kernel = self._gaussian_kernel(x_values, l1, sigma_f)
# Calculate Cholesky, using double precision for better stability:
cholesky = tf.cast(tf.cholesky(tf.cast(kernel, tf.float64)), tf.float32)
# Sample a curve
# [batch_size, y_size, num_total_points, 1]
y_values = tf.matmul(
cholesky,
tf.random_normal([self._batch_size, self._y_size, num_total_points, 1]))
# [batch_size, num_total_points, y_size]
y_values = tf.transpose(tf.squeeze(y_values, 3), [0, 2, 1])
if self._testing:
# Select the targets
target_x = x_values
target_y = y_values
# Select the observations
idx = tf.random_shuffle(tf.range(num_target))
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
else:
# Select the targets which will consist of the context points as well as
# some new target points
target_x = x_values[:, :num_target + num_context, :]
target_y = y_values[:, :num_target + num_context, :]
# Select the observations
context_x = x_values[:, :num_context, :]
context_y = y_values[:, :num_context, :]
return NPRegressionDescription(
context_x=context_x,
context_y=context_y,
target_x=target_x,
target_y=target_y)
| # coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses real and synthetic datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import collections
import tensorflow as tf
NPRegressionDescription = collections.namedtuple(
"NPRegressionDescription",
("context_x", "context_y", "target_x", "target_y"))
class GPCurvesReader(object):
"""Generates curves using a Gaussian Process (GP).
Supports vector inputs (x) and vector outputs (y). Kernel is
mean-squared exponential, using the x-value l2 coordinate distance scaled by
some factor chosen randomly in a range. Outputs are independent gaussian
processes.
"""
def __init__(self,
batch_size,
max_num_context,
x_size=1,
y_size=1,
l1_scale=0.6,
sigma_scale=1.0,
random_kernel_parameters=False,
testing=False):
"""Creates a regression dataset of functions sampled from a GP.
Args:
batch_size: An integer.
max_num_context: The max number of observations in the context.
x_size: Integer >= 1 for length of "x values" vector.
y_size: Integer >= 1 for length of "y values" vector.
l1_scale: Float; typical scale for kernel distance function.
sigma_scale: Float; typical scale for variance.
random_kernel_parameters: If `True`, the kernel parameters (l1 and sigma)
are sampled uniformly within [0.1, l1_scale] and [0.1, sigma_scale].
testing: Boolean that indicates whether we are testing. If so there are
more targets for visualization.
"""
self._batch_size = batch_size
self._max_num_context = max_num_context
self._x_size = x_size
self._y_size = y_size
self._l1_scale = l1_scale
self._sigma_scale = sigma_scale
self._random_kernel_parameters = random_kernel_parameters
self._testing = testing
def _gaussian_kernel(self, xdata, l1, sigma_f, sigma_noise=2e-2):
"""Applies the Gaussian kernel to generate curve data.
Args:
xdata: Tensor of shape [B, num_total_points, x_size] with
the values of the x-axis data.
l1: Tensor of shape [B, y_size, x_size], the scale
parameter of the Gaussian kernel.
sigma_f: Tensor of shape [B, y_size], the magnitude
of the std.
sigma_noise: Float, std of the noise that we add for stability.
Returns:
The kernel, a float tensor of shape
[B, y_size, num_total_points, num_total_points].
"""
num_total_points = tf.shape(xdata)[1]
# Expand and take the difference
xdata1 = tf.expand_dims(xdata, axis=1) # [B, 1, num_total_points, x_size]
xdata2 = tf.expand_dims(xdata, axis=2) # [B, num_total_points, 1, x_size]
diff = xdata1 - xdata2 # [B, num_total_points, num_total_points, x_size]
# [B, y_size, num_total_points, num_total_points, x_size]
norm = tf.square(diff[:, None, :, :, :] / l1[:, :, None, None, :])
norm = tf.reduce_sum(
norm, -1) # [B, data_size, num_total_points, num_total_points]
# [B, y_size, num_total_points, num_total_points]
kernel = tf.square(sigma_f)[:, :, None, None] * tf.exp(-0.5 * norm)
# Add some noise to the diagonal to make the cholesky work.
kernel += (sigma_noise**2) * tf.eye(num_total_points)
return kernel
def generate_curves(self, num_context=None):
"""Builds the op delivering the data.
Generated functions are `float32` with x values between -2 and 2.
Args:
num_context: Number of context points. If None, chosen randomly.
Returns:
A `CNPRegressionDescription` namedtuple.
"""
if num_context is None:
num_context = tf.random_uniform(
shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32)
# If we are testing we want to have more targets and have them evenly
# distributed in order to plot the function.
if self._testing:
num_target = 400
num_total_points = num_target
x_values = tf.tile(
tf.expand_dims(tf.range(-2., 2., 1. / 100, dtype=tf.float32), axis=0),
[self._batch_size, 1])
x_values = tf.expand_dims(x_values, axis=-1)
# During training the number of target points and their x-positions are
# selected at random
else:
num_target = tf.random_uniform(shape=(), minval=0,
maxval=self._max_num_context - num_context,
dtype=tf.int32)
num_total_points = num_context + num_target
x_values = tf.random_uniform(
[self._batch_size, num_total_points, self._x_size], -2, 2)
# Set kernel parameters
# Either choose a set of random parameters for the mini-batch
if self._random_kernel_parameters:
l1 = tf.random_uniform([self._batch_size, self._y_size,
self._x_size], 0.1, self._l1_scale)
sigma_f = tf.random_uniform([self._batch_size, self._y_size],
0.1, self._sigma_scale)
# Or use the same fixed parameters for all mini-batches
else:
l1 = tf.ones(shape=[self._batch_size, self._y_size,
self._x_size]) * self._l1_scale
sigma_f = tf.ones(shape=[self._batch_size,
self._y_size]) * self._sigma_scale
# Pass the x_values through the Gaussian kernel
# [batch_size, y_size, num_total_points, num_total_points]
kernel = self._gaussian_kernel(x_values, l1, sigma_f)
# Calculate Cholesky, using double precision for better stability:
cholesky = tf.cast(tf.cholesky(tf.cast(kernel, tf.float64)), tf.float32)
# Sample a curve
# [batch_size, y_size, num_total_points, 1]
y_values = tf.matmul(
cholesky,
tf.random_normal([self._batch_size, self._y_size, num_total_points, 1]))
# [batch_size, num_total_points, y_size]
y_values = tf.transpose(tf.squeeze(y_values, 3), [0, 2, 1])
if self._testing:
# Select the targets
target_x = x_values
target_y = y_values
# Select the observations
idx = tf.random_shuffle(tf.range(num_target))
context_x = tf.gather(x_values, idx[:num_context], axis=1)
context_y = tf.gather(y_values, idx[:num_context], axis=1)
else:
# Select the targets which will consist of the context points as well as
# some new target points
target_x = x_values[:, :num_target + num_context, :]
target_y = y_values[:, :num_target + num_context, :]
# Select the observations
context_x = x_values[:, :num_context, :]
context_y = y_values[:, :num_context, :]
return NPRegressionDescription(
context_x=context_x,
context_y=context_y,
target_x=target_x,
target_y=target_y)
| en | 0.777681 | # coding=utf-8 # Copyright 2019 The Edward2 Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Parses real and synthetic datasets. Generates curves using a Gaussian Process (GP). Supports vector inputs (x) and vector outputs (y). Kernel is mean-squared exponential, using the x-value l2 coordinate distance scaled by some factor chosen randomly in a range. Outputs are independent gaussian processes. Creates a regression dataset of functions sampled from a GP. Args: batch_size: An integer. max_num_context: The max number of observations in the context. x_size: Integer >= 1 for length of "x values" vector. y_size: Integer >= 1 for length of "y values" vector. l1_scale: Float; typical scale for kernel distance function. sigma_scale: Float; typical scale for variance. random_kernel_parameters: If `True`, the kernel parameters (l1 and sigma) are sampled uniformly within [0.1, l1_scale] and [0.1, sigma_scale]. testing: Boolean that indicates whether we are testing. If so there are more targets for visualization. Applies the Gaussian kernel to generate curve data. Args: xdata: Tensor of shape [B, num_total_points, x_size] with the values of the x-axis data. l1: Tensor of shape [B, y_size, x_size], the scale parameter of the Gaussian kernel. sigma_f: Tensor of shape [B, y_size], the magnitude of the std. sigma_noise: Float, std of the noise that we add for stability. Returns: The kernel, a float tensor of shape [B, y_size, num_total_points, num_total_points]. # Expand and take the difference # [B, 1, num_total_points, x_size] # [B, num_total_points, 1, x_size] # [B, num_total_points, num_total_points, x_size] # [B, y_size, num_total_points, num_total_points, x_size] # [B, data_size, num_total_points, num_total_points] # [B, y_size, num_total_points, num_total_points] # Add some noise to the diagonal to make the cholesky work. Builds the op delivering the data. Generated functions are `float32` with x values between -2 and 2. Args: num_context: Number of context points. If None, chosen randomly. Returns: A `CNPRegressionDescription` namedtuple. # If we are testing we want to have more targets and have them evenly # distributed in order to plot the function. # During training the number of target points and their x-positions are # selected at random # Set kernel parameters # Either choose a set of random parameters for the mini-batch # Or use the same fixed parameters for all mini-batches # Pass the x_values through the Gaussian kernel # [batch_size, y_size, num_total_points, num_total_points] # Calculate Cholesky, using double precision for better stability: # Sample a curve # [batch_size, y_size, num_total_points, 1] # [batch_size, num_total_points, y_size] # Select the targets # Select the observations # Select the targets which will consist of the context points as well as # some new target points # Select the observations | 2.537671 | 3 |
critiquebrainz/frontend/views/index.py | shagun6/critiquebrainz | 0 | 8709 | <filename>critiquebrainz/frontend/views/index.py
from flask import Blueprint, render_template
from flask_babel import format_number
import critiquebrainz.db.users as db_users
import critiquebrainz.db.review as db_review
from bs4 import BeautifulSoup
from markdown import markdown
DEFAULT_CACHE_EXPIRATION = 10 * 60 # seconds
frontend_bp = Blueprint('frontend', __name__)
@frontend_bp.route('/')
def index():
# Popular reviews
popular_reviews = db_review.get_popular(6)
for review in popular_reviews:
# Preparing text for preview
preview = markdown(review['text'], safe_mode="escape")
review['preview'] = ''.join(BeautifulSoup(preview, "html.parser").findAll(text=True))
# Recent reviews
recent_reviews, _ = db_review.list_reviews(sort='created', limit=9)
# Statistics
review_count = format_number(db_review.get_count(is_draft=False))
user_count = format_number(db_users.total_count())
return render_template('index/index.html', popular_reviews=popular_reviews, recent_reviews=recent_reviews,
reviews_total=review_count, users_total=user_count)
@frontend_bp.route('/about')
def about():
return render_template('index/about.html')
@frontend_bp.route('/guidelines')
def guidelines():
return render_template('index/guidelines.html')
| <filename>critiquebrainz/frontend/views/index.py
from flask import Blueprint, render_template
from flask_babel import format_number
import critiquebrainz.db.users as db_users
import critiquebrainz.db.review as db_review
from bs4 import BeautifulSoup
from markdown import markdown
DEFAULT_CACHE_EXPIRATION = 10 * 60 # seconds
frontend_bp = Blueprint('frontend', __name__)
@frontend_bp.route('/')
def index():
# Popular reviews
popular_reviews = db_review.get_popular(6)
for review in popular_reviews:
# Preparing text for preview
preview = markdown(review['text'], safe_mode="escape")
review['preview'] = ''.join(BeautifulSoup(preview, "html.parser").findAll(text=True))
# Recent reviews
recent_reviews, _ = db_review.list_reviews(sort='created', limit=9)
# Statistics
review_count = format_number(db_review.get_count(is_draft=False))
user_count = format_number(db_users.total_count())
return render_template('index/index.html', popular_reviews=popular_reviews, recent_reviews=recent_reviews,
reviews_total=review_count, users_total=user_count)
@frontend_bp.route('/about')
def about():
return render_template('index/about.html')
@frontend_bp.route('/guidelines')
def guidelines():
return render_template('index/guidelines.html')
| en | 0.69376 | # seconds # Popular reviews # Preparing text for preview # Recent reviews # Statistics | 2.165501 | 2 |
Enigma/Enigma-master/GBS/gbsHelper.py | Q-Alpha/Hackathon2020 | 12 | 8710 | import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.utils import random_interferometer
from strawberryfields.apps import data, sample, subgraph, plot
import plotly
import networkx as nx
import numpy as np
class GBS:
def __init__(self, samples =[], min_pho = 16, max_pho = 30, subgraph_size = 8, max_count = 2000):
self.samples = samples
self.min_pho = min_pho
self.max_pho = max_pho
self.subgraph_size = subgraph_size
self.max_count = max_count
def graphDensity(self, samples, min_pho, max_pho, subgraph_size, max_count):
dense = subgraph.search(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)
dense_freq = []
for k in range(subgraph_size, min_pho+1):
dense_freq.append([k,len(dense[k])])
return dense, dense_freq
def graphFreqScore(self, d_freqs, max_freq):
x,y = [], []
for i in range(len(d_freqs)):
for j in range(len(d_freqs[i])):
n,f = d_freqs[i][j][0],d_freqs[i][j][1]
x.append(n*f)
N = len(d_freq[i])
y.append((1/max_freq)*(np.sum(x)/N))
x = []
min_y = np.min(y)
y = [min_y/x for x in y]
return y, y.index(max(y))
def runJob(self, eng):
num_subsystem = 8
prog = sf.Program(num_subsystem, name="remote_job")
U = random_interferometer(4)
with prog.context as q:
# Initial squeezed states
# Allowed values are r=1.0 or r=0.0
ops.S2gate(1.0) | (q[0], q[4])
ops.S2gate(1.0) | (q[1], q[5])
ops.S2gate(1.0) | (q[3], q[7])
# Interferometer on the signal modes (0-3)
ops.Interferometer(U) | (q[0], q[1], q[2], q[3])
ops.BSgate(0.543, 0.123) | (q[2], q[0])
ops.Rgate(0.453) | q[1]
ops.MZgate(0.65, -0.54) | (q[2], q[3])
# *Same* interferometer on the idler modes (4-7)
ops.Interferometer(U) | (q[4], q[5], q[6], q[7])
ops.BSgate(0.543, 0.123) | (q[6], q[4])
ops.Rgate(0.453) | q[5]
ops.MZgate(0.65, -0.54) | (q[6], q[7])
ops.MeasureFock() | q
eng = eng
results =eng.run(prog, shots=10)
# state = results.state
# measurements = results.samples
return results.samples
| import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.utils import random_interferometer
from strawberryfields.apps import data, sample, subgraph, plot
import plotly
import networkx as nx
import numpy as np
class GBS:
def __init__(self, samples =[], min_pho = 16, max_pho = 30, subgraph_size = 8, max_count = 2000):
self.samples = samples
self.min_pho = min_pho
self.max_pho = max_pho
self.subgraph_size = subgraph_size
self.max_count = max_count
def graphDensity(self, samples, min_pho, max_pho, subgraph_size, max_count):
dense = subgraph.search(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)
dense_freq = []
for k in range(subgraph_size, min_pho+1):
dense_freq.append([k,len(dense[k])])
return dense, dense_freq
def graphFreqScore(self, d_freqs, max_freq):
x,y = [], []
for i in range(len(d_freqs)):
for j in range(len(d_freqs[i])):
n,f = d_freqs[i][j][0],d_freqs[i][j][1]
x.append(n*f)
N = len(d_freq[i])
y.append((1/max_freq)*(np.sum(x)/N))
x = []
min_y = np.min(y)
y = [min_y/x for x in y]
return y, y.index(max(y))
def runJob(self, eng):
num_subsystem = 8
prog = sf.Program(num_subsystem, name="remote_job")
U = random_interferometer(4)
with prog.context as q:
# Initial squeezed states
# Allowed values are r=1.0 or r=0.0
ops.S2gate(1.0) | (q[0], q[4])
ops.S2gate(1.0) | (q[1], q[5])
ops.S2gate(1.0) | (q[3], q[7])
# Interferometer on the signal modes (0-3)
ops.Interferometer(U) | (q[0], q[1], q[2], q[3])
ops.BSgate(0.543, 0.123) | (q[2], q[0])
ops.Rgate(0.453) | q[1]
ops.MZgate(0.65, -0.54) | (q[2], q[3])
# *Same* interferometer on the idler modes (4-7)
ops.Interferometer(U) | (q[4], q[5], q[6], q[7])
ops.BSgate(0.543, 0.123) | (q[6], q[4])
ops.Rgate(0.453) | q[5]
ops.MZgate(0.65, -0.54) | (q[6], q[7])
ops.MeasureFock() | q
eng = eng
results =eng.run(prog, shots=10)
# state = results.state
# measurements = results.samples
return results.samples
| en | 0.697348 | # Initial squeezed states # Allowed values are r=1.0 or r=0.0 # Interferometer on the signal modes (0-3) # *Same* interferometer on the idler modes (4-7) # state = results.state # measurements = results.samples | 2.302253 | 2 |
happy/HappyNodeJoin.py | jenniexie/happy | 0 | 8711 | <reponame>jenniexie/happy
#!/usr/bin/env python
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyNodeJoin class through which a virtual node join a network.
#
# When a node joins a network, an TAP interface is created in the node and in
# the network. Then TUN is setup on the node.
#
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyLink import HappyLink
from happy.HappyNetwork import HappyNetwork
from happy.HappyNode import HappyNode
import happy.HappyLinkAdd
import happy.HappyNodeAddress
import happy.HappyNodeRoute
options = {}
options["quiet"] = False
options["node_id"] = None
options["tap"] = False
options["network_id"] = None
options["fix_hw_addr"] = None
options["customized_eui64"] = None
def option():
return options.copy()
class HappyNodeJoin(HappyLink, HappyNode, HappyNetwork):
"""
Assigns a virtual node to a specific network.
happy-node-join [-h --help] [-q --quiet] [-i --id <NODE_NAME>]
[-n --network <NETWORK_NAME>] [-m --mac <HW_ADDR>]
[-c --customizedeui64 <CUST_EUI64>] [-p --tap]
-i --id Required. Node to be added to a network. Find using
happy-node-list or happy-state.
-n --network Required. Network to add the node to. Find using
happy-network-list or happy-state.
-m --mac The MAC hardware address for the node.
-c --customizedeui64 The EUI64 address for the node.
-p --tap Configure the link between the node and the network as an
L2 TAP device with a virtual bridge. Omit this parameter to
default to an L3 TUN configuration for normal IP routing.
Example:
$ happy-node-join ThreadNode HomeThread
Adds the ThreadNode node to the HomeThread network.
$ happy-node-join -i onhub -n HomeWiFi -m 5
Adds the onhub node to the HomeWiFi network with a MAC hardware address of
00:00:00:00:00:05.
$ happy-node-join -i onhub -n HomeWiFi -c 00:00:00:00:00:00:00:05
Adds the onhub node to the HomeWiFi network with an EUI64 address of
fc00:db20:35b:7399::5.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyNetwork.__init__(self)
HappyNode.__init__(self)
HappyLink.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tap = opts["tap"]
self.network_id = opts["network_id"]
self.fix_hw_addr = opts["fix_hw_addr"]
self.customized_eui64 = opts["customized_eui64"]
if not self.fix_hw_addr and opts["customized_eui64"]:
self.fix_hw_addr = self.customized_eui64[6:]
self.customized_eui64 = self.customized_eui64.replace(':', '-')
def __pre_check(self):
# Check if the name of the node is given
if not self.node_id:
emsg = "Missing name of the virtual node that should join a network."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if the name of the network is given
if not self.network_id:
emsg = "Missing name of the virtual network that be joined by a virtual node."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if node exists
if not self._nodeExists():
emsg = "virtual node %s does not exist." % (self.node_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if network exists
if not self._networkExists():
emsg = "virtual network %s does not exist." % (self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if node already joined that network
if self.network_id in self.getNodeNetworkIds():
emsg = "virtual node %s is already part of %s network." % (self.node_id, self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
self.fix_hw_addr = self.fixHwAddr(self.fix_hw_addr)
# Check if HW MAC address is valid
if self.fix_hw_addr is not None and self.fix_hw_addr.count(":") != 5:
emsg = "virtual node %s get invalid MAC HW address %s." % (self.node_id, self.fix_hw_addr)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __create_link(self):
options = happy.HappyLinkAdd.option()
options["quiet"] = self.quiet
options["type"] = self.getNetworkType()
options["tap"] = self.tap
link = happy.HappyLinkAdd.HappyLinkAdd(options)
ret = link.run()
self.link_id = ret.Data()
self.readState()
def __post_check_1(self):
# Ensure that the link is saved in the state
if self.link_id not in self.getLinkIds():
emsg = "Link %s does not exist." % (self.link_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __get_node_interface_info(self):
self.link_type = self.getLinkType(self.link_id)
self.link_network_end = self.getLinkNetworkEnd(self.link_id)
self.link_node_end = self.getLinkNodeEnd(self.link_id)
self.node_interface_name = self.getNodeInterfaceName(self.node_id, self.link_type)
def __connect_to_network(self):
self.moveInterfaceToNamespace(self.link_network_end, self.network_id)
# Attach to bridge
cmd = "brctl addif " + self.uniquePrefix(self.network_id) + " " + self.link_network_end
cmd = self.runAsRoot(cmd)
ret = self.CallAtNetwork(self.network_id, cmd)
def __connect_to_node(self):
if not self.isNodeLocal(self.node_id):
if self.getLinkTap(self.link_id):
self.moveLwipInterfaceToNamespace(self.link_id, self.node_id)
else:
self.moveInterfaceToNamespace(self.link_node_end, self.node_id)
cmd = "ip link set " + self.link_node_end
cmd += " name " + self.node_interface_name
if self.fix_hw_addr is not None:
cmd += " address " + self.fix_hw_addr
cmd = self.runAsRoot(cmd)
ret = self.CallAtNode(self.node_id, cmd)
def __nmconf(self):
if not self.isNodeLocal(self.node_id):
return
if not self.tap:
cmd = "nmcli dev disconnect iface " + self.node_interface_name
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
def __check_node_hw_addr(self):
hw_addr = self.getHwAddress(self.node_interface_name, self.node_id)
hw_addr_int = IP.mac48_string_to_int(hw_addr)
if (hw_addr_int & (1 << 41)):
hw_addr_int = hw_addr_int & ~(1 << 41)
new_hw_addr = IP.mac48_string_to_int(hw_addr_int)
cmd = "ip link set " + self.node_interface_name + " address " + str(new_hw_addr)
cmd = self.runAsRoot(cmd)
r = self.CallAtNode(self.node_id, cmd)
def __post_check_2(self):
return
def __bring_up_interface(self):
self.bringLinkUp(self.link_id, self.node_interface_name, self.node_id, self.network_id)
def __add_new_interface_state(self):
self.setLinkNetworkNodeHw(self.link_id, self.network_id, self.node_id, self.fix_hw_addr)
new_network_interface = {}
self.setNetworkLink(self.network_id, self.link_id, new_network_interface)
new_node_interface = {}
new_node_interface["link"] = self.link_id
new_node_interface["type"] = self.link_type
new_node_interface["ip"] = {}
if self.customized_eui64:
new_node_interface["customized_eui64"] = self.customized_eui64
self.setNodeInterface(self.node_id, self.node_interface_name, new_node_interface)
def __assign_network_addresses(self):
network_prefixes = self.getNetworkPrefixes(self.network_id)
for prefix in network_prefixes:
options = happy.HappyNodeAddress.option()
options["quiet"] = self.quiet
options["node_id"] = self.node_id
options["interface"] = self.node_interface_name
if IP.isIpv6(prefix):
nid = self.getInterfaceId(self.node_interface_name, self.node_id)
else:
nid = self.getNextNetworkIPv4Id(prefix, self.network_id)
options["address"] = self.getNodeAddressOnPrefix(prefix, nid)
options["add"] = True
addrctrl = happy.HappyNodeAddress.HappyNodeAddress(options)
ret = addrctrl.run()
def __load_network_routes(self):
routes = self.getNetworkRoutes(self.network_id)
for route_to in routes.keys():
route_record = self.getNetworkRoute(route_to, self.network_id)
options = happy.HappyNodeRoute.option()
options["quiet"] = self.quiet
options["add"] = True
options["node_id"] = self.node_id
options["to"] = route_to
options["via"] = route_record["via"]
options["prefix"] = route_record["prefix"]
noder = happy.HappyNodeRoute.HappyNodeRoute(options)
ret = noder.run()
def run(self):
with self.getStateLockManager():
self.__pre_check()
self.__create_link()
self.__post_check_1()
self.__get_node_interface_info()
self.__connect_to_network()
self.__connect_to_node()
self.__nmconf()
self.__check_node_hw_addr()
self.__bring_up_interface()
self.__post_check_2()
self.__add_new_interface_state()
self.writeState()
self.__assign_network_addresses()
self.__load_network_routes()
return ReturnMsg(0)
| #!/usr/bin/env python
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyNodeJoin class through which a virtual node join a network.
#
# When a node joins a network, an TAP interface is created in the node and in
# the network. Then TUN is setup on the node.
#
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyLink import HappyLink
from happy.HappyNetwork import HappyNetwork
from happy.HappyNode import HappyNode
import happy.HappyLinkAdd
import happy.HappyNodeAddress
import happy.HappyNodeRoute
options = {}
options["quiet"] = False
options["node_id"] = None
options["tap"] = False
options["network_id"] = None
options["fix_hw_addr"] = None
options["customized_eui64"] = None
def option():
return options.copy()
class HappyNodeJoin(HappyLink, HappyNode, HappyNetwork):
"""
Assigns a virtual node to a specific network.
happy-node-join [-h --help] [-q --quiet] [-i --id <NODE_NAME>]
[-n --network <NETWORK_NAME>] [-m --mac <HW_ADDR>]
[-c --customizedeui64 <CUST_EUI64>] [-p --tap]
-i --id Required. Node to be added to a network. Find using
happy-node-list or happy-state.
-n --network Required. Network to add the node to. Find using
happy-network-list or happy-state.
-m --mac The MAC hardware address for the node.
-c --customizedeui64 The EUI64 address for the node.
-p --tap Configure the link between the node and the network as an
L2 TAP device with a virtual bridge. Omit this parameter to
default to an L3 TUN configuration for normal IP routing.
Example:
$ happy-node-join ThreadNode HomeThread
Adds the ThreadNode node to the HomeThread network.
$ happy-node-join -i onhub -n HomeWiFi -m 5
Adds the onhub node to the HomeWiFi network with a MAC hardware address of
00:00:00:00:00:05.
$ happy-node-join -i onhub -n HomeWiFi -c 00:00:00:00:00:00:00:05
Adds the onhub node to the HomeWiFi network with an EUI64 address of
fc00:db20:35b:7399::5.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyNetwork.__init__(self)
HappyNode.__init__(self)
HappyLink.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tap = opts["tap"]
self.network_id = opts["network_id"]
self.fix_hw_addr = opts["fix_hw_addr"]
self.customized_eui64 = opts["customized_eui64"]
if not self.fix_hw_addr and opts["customized_eui64"]:
self.fix_hw_addr = self.customized_eui64[6:]
self.customized_eui64 = self.customized_eui64.replace(':', '-')
def __pre_check(self):
# Check if the name of the node is given
if not self.node_id:
emsg = "Missing name of the virtual node that should join a network."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if the name of the network is given
if not self.network_id:
emsg = "Missing name of the virtual network that be joined by a virtual node."
self.logger.error("[localhost] HappyNodeJoin: %s" % (emsg))
self.exit()
# Check if node exists
if not self._nodeExists():
emsg = "virtual node %s does not exist." % (self.node_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if network exists
if not self._networkExists():
emsg = "virtual network %s does not exist." % (self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
# Check if node already joined that network
if self.network_id in self.getNodeNetworkIds():
emsg = "virtual node %s is already part of %s network." % (self.node_id, self.network_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
self.fix_hw_addr = self.fixHwAddr(self.fix_hw_addr)
# Check if HW MAC address is valid
if self.fix_hw_addr is not None and self.fix_hw_addr.count(":") != 5:
emsg = "virtual node %s get invalid MAC HW address %s." % (self.node_id, self.fix_hw_addr)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __create_link(self):
options = happy.HappyLinkAdd.option()
options["quiet"] = self.quiet
options["type"] = self.getNetworkType()
options["tap"] = self.tap
link = happy.HappyLinkAdd.HappyLinkAdd(options)
ret = link.run()
self.link_id = ret.Data()
self.readState()
def __post_check_1(self):
# Ensure that the link is saved in the state
if self.link_id not in self.getLinkIds():
emsg = "Link %s does not exist." % (self.link_id)
self.logger.error("[%s] HappyNodeJoin: %s" % (self.node_id, emsg))
self.exit()
def __get_node_interface_info(self):
self.link_type = self.getLinkType(self.link_id)
self.link_network_end = self.getLinkNetworkEnd(self.link_id)
self.link_node_end = self.getLinkNodeEnd(self.link_id)
self.node_interface_name = self.getNodeInterfaceName(self.node_id, self.link_type)
def __connect_to_network(self):
self.moveInterfaceToNamespace(self.link_network_end, self.network_id)
# Attach to bridge
cmd = "brctl addif " + self.uniquePrefix(self.network_id) + " " + self.link_network_end
cmd = self.runAsRoot(cmd)
ret = self.CallAtNetwork(self.network_id, cmd)
def __connect_to_node(self):
if not self.isNodeLocal(self.node_id):
if self.getLinkTap(self.link_id):
self.moveLwipInterfaceToNamespace(self.link_id, self.node_id)
else:
self.moveInterfaceToNamespace(self.link_node_end, self.node_id)
cmd = "ip link set " + self.link_node_end
cmd += " name " + self.node_interface_name
if self.fix_hw_addr is not None:
cmd += " address " + self.fix_hw_addr
cmd = self.runAsRoot(cmd)
ret = self.CallAtNode(self.node_id, cmd)
def __nmconf(self):
if not self.isNodeLocal(self.node_id):
return
if not self.tap:
cmd = "nmcli dev disconnect iface " + self.node_interface_name
cmd = self.runAsRoot(cmd)
ret = self.CallAtHost(cmd)
def __check_node_hw_addr(self):
hw_addr = self.getHwAddress(self.node_interface_name, self.node_id)
hw_addr_int = IP.mac48_string_to_int(hw_addr)
if (hw_addr_int & (1 << 41)):
hw_addr_int = hw_addr_int & ~(1 << 41)
new_hw_addr = IP.mac48_string_to_int(hw_addr_int)
cmd = "ip link set " + self.node_interface_name + " address " + str(new_hw_addr)
cmd = self.runAsRoot(cmd)
r = self.CallAtNode(self.node_id, cmd)
def __post_check_2(self):
return
def __bring_up_interface(self):
self.bringLinkUp(self.link_id, self.node_interface_name, self.node_id, self.network_id)
def __add_new_interface_state(self):
self.setLinkNetworkNodeHw(self.link_id, self.network_id, self.node_id, self.fix_hw_addr)
new_network_interface = {}
self.setNetworkLink(self.network_id, self.link_id, new_network_interface)
new_node_interface = {}
new_node_interface["link"] = self.link_id
new_node_interface["type"] = self.link_type
new_node_interface["ip"] = {}
if self.customized_eui64:
new_node_interface["customized_eui64"] = self.customized_eui64
self.setNodeInterface(self.node_id, self.node_interface_name, new_node_interface)
def __assign_network_addresses(self):
network_prefixes = self.getNetworkPrefixes(self.network_id)
for prefix in network_prefixes:
options = happy.HappyNodeAddress.option()
options["quiet"] = self.quiet
options["node_id"] = self.node_id
options["interface"] = self.node_interface_name
if IP.isIpv6(prefix):
nid = self.getInterfaceId(self.node_interface_name, self.node_id)
else:
nid = self.getNextNetworkIPv4Id(prefix, self.network_id)
options["address"] = self.getNodeAddressOnPrefix(prefix, nid)
options["add"] = True
addrctrl = happy.HappyNodeAddress.HappyNodeAddress(options)
ret = addrctrl.run()
def __load_network_routes(self):
routes = self.getNetworkRoutes(self.network_id)
for route_to in routes.keys():
route_record = self.getNetworkRoute(route_to, self.network_id)
options = happy.HappyNodeRoute.option()
options["quiet"] = self.quiet
options["add"] = True
options["node_id"] = self.node_id
options["to"] = route_to
options["via"] = route_record["via"]
options["prefix"] = route_record["prefix"]
noder = happy.HappyNodeRoute.HappyNodeRoute(options)
ret = noder.run()
def run(self):
with self.getStateLockManager():
self.__pre_check()
self.__create_link()
self.__post_check_1()
self.__get_node_interface_info()
self.__connect_to_network()
self.__connect_to_node()
self.__nmconf()
self.__check_node_hw_addr()
self.__bring_up_interface()
self.__post_check_2()
self.__add_new_interface_state()
self.writeState()
self.__assign_network_addresses()
self.__load_network_routes()
return ReturnMsg(0) | en | 0.760091 | #!/usr/bin/env python # # Copyright (c) 2015-2017 Nest Labs, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ## # @file # Implements HappyNodeJoin class through which a virtual node join a network. # # When a node joins a network, an TAP interface is created in the node and in # the network. Then TUN is setup on the node. # Assigns a virtual node to a specific network. happy-node-join [-h --help] [-q --quiet] [-i --id <NODE_NAME>] [-n --network <NETWORK_NAME>] [-m --mac <HW_ADDR>] [-c --customizedeui64 <CUST_EUI64>] [-p --tap] -i --id Required. Node to be added to a network. Find using happy-node-list or happy-state. -n --network Required. Network to add the node to. Find using happy-network-list or happy-state. -m --mac The MAC hardware address for the node. -c --customizedeui64 The EUI64 address for the node. -p --tap Configure the link between the node and the network as an L2 TAP device with a virtual bridge. Omit this parameter to default to an L3 TUN configuration for normal IP routing. Example: $ happy-node-join ThreadNode HomeThread Adds the ThreadNode node to the HomeThread network. $ happy-node-join -i onhub -n HomeWiFi -m 5 Adds the onhub node to the HomeWiFi network with a MAC hardware address of 00:00:00:00:00:05. $ happy-node-join -i onhub -n HomeWiFi -c 00:00:00:00:00:00:00:05 Adds the onhub node to the HomeWiFi network with an EUI64 address of fc00:db20:35b:7399::5. return: 0 success 1 fail # Check if the name of the node is given # Check if the name of the network is given # Check if node exists # Check if network exists # Check if node already joined that network # Check if HW MAC address is valid # Ensure that the link is saved in the state # Attach to bridge | 2.234889 | 2 |
__init__.py | SDRAST/Data_Reduction | 0 | 8712 | # -*- coding: utf-8 -*-
"""
Modules to support data reduction in Python.
The main purpose of the base module ``Data_Reduction`` is to provide a
suplerclass with a good set of attributes and methods to cover all common needs.
The base module is also able to read data from a text file as a ``numpy``
structured array. This is done with a class called ``DataGetterMixin`` which
must be invoked after the base class has been initiated.
The module function ``examine_text_data_file()`` reveals the structure of the
file(s) that provide the data..
Examples
========
Here we initiate a base class after mixing in the data getter. The first line o
the file has column names but the first three columns are all under one
name ``UTC`` so we specify column widths to consider the first three columns
to be one column. We use the names from the first line of the file, which
could have been done with an ``open()``, ``readline()``, and ``close()``::
mixIn(Observation, DataGetterMixin)
obs = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs.open_datafile('t12127.10',
delimiter=[17,16,3,11,7,9,8,2,6],
skip_header=1,
names="UTC Epoch Chan Tsys Int Az El Diode Level".split())
Now the data getter is already mixed in to Observation so we don't need to do
it again. In this case we specify the names of the columns, changing ``Int`` to
``Integr``::
obs2 = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs2.open_datafile('t12127.10', skip_header=1,
names="Year DOY UTC Epoch Chan Tsys Integr Az El Diode Level".split())
The class Map inherits from DataGetterMixin, so no explicit mixin required::
obsmap = Map(dss=84, date="2020/163", project="SolarPatrol")
obsmap.initialize('sim-venus.dat', source="Venus")
Let's examine ``obsmap``. We have only one signal column::
In [3]: obsmap.channel.keys()
Out[3]: dict_keys(['xl'])
In [4]: obsmap.channel['xl'].keys()
Out[4]: dict_keys(['freq', 'bw', 'pol', 'ifmode', 'atten', 'power'])
"""
# standard Python modules
import datetime
import glob
import h5py
import logging
import math
import matplotlib.dates as MPLd
import numpy as NP
import os
import re
import readline
import scipy.interpolate
import scipy.fftpack
import Astronomy as A
import Astronomy.DSN_coordinates as coords
import Astronomy.Ephem as AE
import DatesTimes as DT
import local_dirs
import Math.clusters as VQ # vector quantization
import support
# enable raw_input Tab completion
readline.parse_and_bind("tab: complete")
logger = logging.getLogger(__name__) # module logger
class Observation(object):
"""
superclass for a data structure and methods
Attributes
==========
aliases - (dict) data keys to replace those in original data
channel - (dict) signal paths, e.g., different freqs and pols
data - (dict) original data, e.g., read from file or database
DOY - (int) day of year of observation
end - (float) UNIX time at the end
latitude - (float) from obs
logger - (logging.Logger)
longitude - (float) from obs
name - (str) user assigned, defaults to YEAR/DOY
numdata - (int) number of data samples
obs - (AE.DSS) observatory
session - (Session) set of observations, parent to Observation
session_path - (str) directory for session files
start - (float) UNIX time at the beginning
year - (int) year of observation
**Reserved Column Names**
These column names are recognized. They are also the keys for attribute
``data``.
These quantities must be present in some form::
unixtime (float) UNIX time in sec
chan_name (str) channel name
integr (float) integration (exposure) in sec
azel (float,float) azimuth and elevation in decimal deg
power (float) power level if only a single channel
Optional::
diode (float) 0 or power in K (integers OK)
level (float) (unidentified -- in ``tlog`` table)
cryotemp (float) cryostat temp in K
windspeed (float) km/hr
winddir (float) deg
ambtemp (float) deg C
pressure (float) mbar
Columns to be computed::
mpldatenum (float) matplotlib ``datenum``
Alternative for ``power``::
tsys (float) system temperature (calibrated power)
top (float) alternative for ``tsys`` (used in DSN)
vfc_counts (int) VFC counts (rate times ``integr``)
Any column with a name which is not a reserved name is assumed to be
power-like data from the channel with that name, unless that name is in a
list provided to the argument ``ignore`` in the method ``get_data_channels``
of the class ``DataGetterMixin``.
Alternative for ``unixtime``::
year (int) year of observation
doy (int) day of year
utc (str) HH:MM:SS
timestr (str) something like 2020/06/14/14:22:21.00
Alternative for ``chan_name``::
chan (int) index in receiver channel names
Alternative for ``azel``::
radec (float,float) precessed right ascension in decimal hours and
precessed declination in decimal deg
radec1950 (float,float) mean right ascension in decimal hours and
mean declination in decimal deg at epoch
radec2000 (float,float) mean right ascension in decimal hours and
mean declination at epoch in decimal deg
az (float) azimuth in decimal deg
el (float) elevation in decimal deg
ra (float) precessed right ascension in decimal hours
dec (float) precessed declination in decimal deg
ra1950 (float) mean right ascension in decimal hours at epoch
dec1950 (float) mean declination in decimal deg at epoch
ra2000 (float) mean right ascension in decimal hours at epoch
dec2000 (float) mean declination in decimal deg at epoch
Notes
=====
* The ``data`` structure is a dict.
* The value of a ``data`` item is either a numpy array or a object
like ``float``, ``int``, or ``str``.
* The keys have reserved words defined above and will be lowercase.
* Items with other keys may be added, typically by a child class.
* Coordinates shall be in pairs, `e.g. ``azel``, ``radec``. (This way you
never get one without the other.)
"""
reserved = ['unixtime','chan_name','integr','az','el','year','doy','utc',
'timestr','chan','tsys','top','diode','level','cryotemp',
'windspeed','winddir','ambtemp','pressure',
'ra','dec','ra1950','dec1950','ra2000','dec2000']
power_keys = ['tsys', 'top', 'vfc_counts', 'power']
def __init__(self, parent=None, name=None, dss=None,
date=None, project=None):
"""
Create a base Observation object.
This is not meant to be initialized by itself. A subclass generally
determines how data are read in. However, method ``initialize()``
provides a basic data read capability using ``numpy.genfromtxt()``
and creates the object's data structure.
Args:
parent (Session): session to which this observation belongs
name (str): an identifier; default is station ID + "obs"
dss (int): station number
date (str): "YEAR/DOY"
project (str): directory under /usr/local/projects
"""
self.logger = logging.getLogger(logger.name+".Observation")
self.session = parent
# observatory must be specified
if dss:
self.obs = coords.DSS(dss)
self.longitude = self.obs.long*180/math.pi # deg
self.latitude = self.obs.lat*180/math.pi # deg
else:
self.logger.error("__init__: requires observatory location")
raise Exception("Where were the data taken?")
# give the object a name
if name:
self.name = name
else:
self.name = "DSS"+str(dss)+"obs"
self.logger = logging.getLogger(logger.name+".Observation")
# the observation was part of some project
if project:
self.project = project
else:
self.logger.error("__init__: requires a project")
raise Exception("Where are the session's working files?")
# the observation was done on some date
if date:
y,d = date.split('/')
self.year = int(y);
self.DOY = int(d)
projdatapath, self.sessionpath, rawdatapath = \
get_obs_dirs(project, dss, self.year, self.DOY,
datafmt=None)
self.logger.debug("__init__: session path: %s", self.sessionpath)
else:
self.logger.error("__init__: requires a date")
raise Exception("When were the date taken?")
# accomodate subclass arguments
self.aliases = {}
# what I really want to do here is see if this was called by a subclass,
# in which case I do not try to get the channel info until this
# initialization has finished.
#
#if hasattr(self, "get_data_channels"):
# channels = self, get_data_channels()
# self.make_channels(channels)
#else:
# self.logger.info("__init__: initialize() may now be called")
def splitkey(self, longlat):
"""
Checks for presence of coordinates in pairs or singles
@param longlat : "azel", or "radec", or "radecEPOC"
@type longlat : str
"""
longitude = longlat[:2] # 'az' or 'ra'
if len(longlat) > 5: # has epoch
epoch = longlat[-4:]
longitude += epoch
latitude = longlat[2:-4]+epoch
else: # date of observation
latitude = longlat[2:]
epoch = None
return longitude, latitude, epoch
def check_for(self, data, longlat):
"""
Checks for separate coordinates and splits if coord pairs
Args:
data (dict): attribute ``data``
longlat (str): "azel", or "radec", or "radecEPOC"
"""
longitude, latitude, epoch = self.splitkey(longlat)
if longitude in data.dtype.names and \
latitude in data.dtype.names:
self.logger.debug("check_for: data has %s and %s", longitude, latitude)
self.data[longitude] = data[longitude]
self.data[latitude] = data[latitude]
return True
elif longlat in data.dtype.names:
self.logger.debug("check_for: data has %s", longlat)
self.data[longitude],self.data[latitude] = map(None, *data[longlat])
self.logger.debug("check_for: added %s and %s to data",
longitude, latitude)
return True
else:
# coords need to be computed from other coords
return False
def unpack_to_complex(self, rawdata):
"""
Converts a sequence of alternating real/imag samples to complex
@param rawdata : alternating real and imaginary bytes
@type rawdata : numpy array of signed int8
@return: numpy array of complex
"""
datalen = len(rawdata)
real = rawdata[0:datalen:2]
imag = rawdata[1:datalen:2]
data = real + 1j*imag
return data
def sideband_separate(self, data):
"""
Converts a complex spectrum array and returns two reals with USB and LSB
This applies a Hilbert transform to the complex data.
"""
usb = (data.real + scipy.fftpack.hilbert(data).imag)
lsb = (scipy.fftpack.hilbert(data).real + data.imag)
return lsb,usb
class Channel(support.PropertiedClass):
"""
Class for a signal path
"""
def __init__(self, parent, name, freq=None, bw=None, pol=None, IFtype=None,
atten=None):
"""
Notes
=====
The properties can be accessed as if the class were a dict.
Arguments
=========
freq:float or int: center frequency in MHz
bw:float or int: bandwidth in MHz
pol:str: polarization code
"""
support.PropertiedClass.__init__(self)
self.parent = parent
self.logger = logging.getLogger(self.parent.name+".Channel")
self.logger.debug("__init__: created %s", self.logger.name)
self.logger.debug("__init__: parent is %s", self.parent)
self.name = name
self.data['freq'] = freq
self.data['bw'] = bw
self.data['pol'] = pol
self.data['ifmode'] = IFtype
self.data['atten'] = atten
class DataGetterMixin(object):
"""
Class for getting data from a CSV file.
"""
def initialize(self, filename, delimiter=" ", names=True, skip_header=0,
source=None):
"""
Get the data and make a data structure for the observations.
This is not included by default in ``__init__()`` to keep it simple for
subclasses.
Args:
filename (str): name only, required; the path is provided
delimiter (str): what separates the columns
names (bool): the first line has column names
skip_header (int) : number of rows to skip
"""
# get the data
data = self.open_datafile(filename, delimiter=delimiter, names=names,
skip_header=skip_header)
# get the signal columns and names
metadata, signals = self.get_data_channels(data)
# create Channel objects for the signal properties
self.make_channels(signals)
# create the data structure
self.make_data_struct(data, metadata, signals)
# compute the offsets from the source center for each data point
if source:
self.get_offsets(source=source)
else:
self.logger.warning("initialize: no source specified; no offsets")
def open_datafile(self, filename, delimiter=" ", names=True, skip_header=0):
"""
Opens and reads a data file
This is used by ``Malargue`` (one data files) and ``GAVRT`` (one data file
for each signal).
Args:
filename (str): text data file name
delimiter (str): separator between columns (default: whitespace)
names (bool): file row has column names (default: True)
skip_header (int): number of rows to skip at beginning of file
Returns:
ndarray:
"""
data = NP.genfromtxt(self.sessionpath+filename,
delimiter=delimiter,
dtype=None,
names=names,
case_sensitive='lower',
skip_header=skip_header,
encoding=None)
return data
def get_data_channels(self, data, ignore=None):
"""
Gets or sets the names of the signal columns
Column names are separated into metadata and signals. Names in
``ignore`` re ignored. Names in ``aliases`` are replaced.
Args:
data (ndarray): data read from text file
ignore (list of str): columns to ignore; default None
Returns:
(list of str, list of str): metadata, signals
"""
names = data.dtype.names
metadata = []
signals = []
for name in names:
if ignore:
if name in ignore:
pass
if name.casefold() in map(str.casefold, self.aliases):
key = self.aliases[name].lower() # we use only lower case names
else:
key = name.lower()
self.logger.debug("get_data_channels: doing %s for %s", key, name)
if key in map(str.casefold, Observation.reserved):
if key.casefold() in ['top', 'tsys']:
signals.append(key)
else:
metadata.append(key)
else:
signals.append(key)
self.logger.debug("get_data_channels: signals: %s", signals)
self.logger.debug("get_data_channels: metadata: %s", metadata)
return metadata, signals
def make_data_struct(self, data, metadata, signals):
"""
Takes a text table with headers and converts it into a numpy ``ndarray``.
That means that a column can be extracted using `data[label]`.
Args
====
data: (ndarray) the data from the text file
metadata: (list of str) the column names for metadata
signals: (list of str) the column names for power-like data
"""
# get the known columns:
self.data = {}
self.numdata = len(data)
#self.logger.debug("make_data_struct: using aliases: %s", self.aliases)
# get columns that are not metadata; each has power for a channel
for signal in signals:
#self.logger.debug("make_data_struct: for signal: %s", signal)
#if signal in self.aliases.items():
# get the key in 'data' which matches 'value' in 'aliases'
# power = data[next(key for key, value in self.aliases.items()
# if value == signal)][idx]
#else:
# power = data[signal]
#self.channel[signal]['power'] = power
self.channel[signal]['power'] = data[signal]
# get UNIX time
if 'unixtime' in metadata:
if 'unixtime' in data.dtype.names:
self.data['unixtime'] = data['unixtime']
else:
# look up the equivalent of UNIX time in the data table
self.data['unixtime'] = data[next(key
for key, value in self.aliases.items()
if value == 'unixtime')]
# compute other convenient forms of time
self.data['datetime'] = [] # Python datetime.date
self.data['date_num'] = [] # matplotlib.dates date number
for idx in list(range(self.numdata)):
if 'unixtime' in data.dtype.names:
tm = data['unixtime'][idx]
else:
tm = data[next(key for key, value in self.aliases.items()
if value == 'unixtime')][idx]
dt = datetime.datetime.utcfromtimestamp(tm)
self.data['datetime'].append(dt)
self.data['date_num'].append(MPLd.date2num(dt))
self.start = self.data['unixtime'][0]
self.end = self.data['unixtime'][-1]
else:
# figure out how to process the time data columns
pass
# compute alternate coordinates
if self.check_for(data, 'azel'):
# azel exists; compute radec if needed; then radec2000 if needed
if self.check_for(data, 'radec'):
pass
else:
self.radec_from_azel()
if self.check_for(data, 'radec2000'):
# ra2000 and dec2000 already exist
pass
else:
self.radec2000_from_radec()
elif self.check_for(data, 'radec2000'):
# coordinates exist; compute back to azimuth and elevation
if self.check_for(data, 'radec'):
pass
else:
# compute observed RA and dec
self.radec_from_radec2000()
if self.check_for(data, 'azel'):
pass
else:
self.azel_from_radec()
# in here check for 'radec'
else:
self.logger.error("no coordinates found in data")
raise Exception("check INFO logging for columns found")
self.start = self.data['unixtime'].min()
self.end = self.data['unixtime'].max()
def make_channels(self, signals, props=None):
"""
Assign properties to the channels.
The prop keys are "freq", "pol", and "IFtype".
Args:
props (dict of dicts): signal channel properties.
"""
self.channel = {}
for ch in signals:
chindex = signals.index(ch)
if props:
self.channel[ch] = self.Channel(self, ch,
freq =props[ch]['freq'],
bw =props[ch]['bw'],
pol =props[ch]['pol'],
IFtype=props[ch]['IFtype'],
atten =props[ch]['atten'])
else:
self.channel[ch] = self.Channel(self, ch)
class GriddingMixin(object):
"""
Class for all the data and methods associated with a raster scan map
It is expected that the parent class is a subclass of ``Observation`` already
by virtue of it being a superclass of subclass which inherits these methods.
Attrs:
cfg (dict):
data (numpy array): from ``Observation``
logger (logging.Logger): replaces ``Observation`` logger
name (str): replaces ``Observation`` name
session (Session):
source (str):
step (float): map step size
"""
def get_grid_stepsize(self, xy=None):
"""
Determine the stepsize of gridded data
This assumes xdec and dec data increase incrementally by 'stepsize'.
The sequences may repeat in a sawtooth-like series. The number of
'xdec' and 'dec' points is multiple times the gridsize.
Arguments:
xy (tuple or list) - X-array and Y-array (default Map.data)
"""
# get the absolute value of coordinate intervals
if xy:
dxdecs = abs(xy[0][1:] - xy[0][:-1])
ddecs = abs(xy[1][1:] - xy[1][:-1])
else:
dxdecs = abs(self.data['xdec_offset'][1:]-self.data['xdec_offset'][:-1])
ddecs = abs(self.data['dec_offset'][1:] -self.data['dec_offset'][:-1])
# form array of X,Y pairs
coords = NP.array(list(zip(dxdecs,ddecs)))
# expect two clusters (default)
cluster_pos = VQ.find_clusters(coords).round(4) # tenths of mdeg
# return the non-zero intervals
return cluster_pos[0].max(), cluster_pos[1].max()
def regrid(self, width=1.0, height=1.0, step=None, power_key=None):
"""
converts a map from observed coordinates to map coordinates
If ``step`` is not given then the step size will be the average step size
in X and the average step in Y. In this case, the effect is to make a
regular grid if the original positions were not exact, i.e., pointing error.
@param width : map width in deg
@type width : float
@param height : map height in deg
@type height : float
@param step : map step size in X and Y in deg
@type step : (float, float)
@param power_key : dict key of Z-value
@type power_key : str
"""
# what is the power-like quantity?
if power_key:
pass
else:
# take the first that matches
for key in Observation.power_keys:
if key in self.data:
power_key = key
self.logger.info("regrid: using '%s'", power_key)
break
else:
continue
if power_key:
pass
else:
self.logger.error("regrid: no power data key found")
return None
if step == None:
# use the original stepsize
self.xstep, self.ystep = self.get_grid_stepsize()
else:
self.xstep, self.ystep = step
self.data['grid_x'] = NP.arange(
-width/2, width/2+self.xstep/2, self.xstep/2)
self.data['grid_y'] = NP.arange(
-height/2,height/2+self.ystep/2, self.ystep/2)
self.logger.debug("regrid: grid shape is %dx%d", len(self.data['grid_x']),
len(self.data['grid_y']))
self.data['grid_z'] = {}
for chnl in self.channel:
self.logger.debug("regrid: processing %s", chnl)
points = list(zip(self.data['xdec_offset'],self.data['dec_offset']))
self.logger.debug("regrid: %d positions", len(points))
values = self.data[power_key][chnl]
self.logger.debug("regrid: %d values", len(values))
xi, yi = NP.meshgrid(self.data['grid_x'], self.data['grid_y'])
try:
self.data['grid_z'][chnl] = scipy.interpolate.griddata(points, values,
(xi, yi), method='nearest')
except ValueError as details:
self.logger.error("regrid: gridding failed: %s", str(details))
self.logger.debug("regrid: channel %s length of points is %d",
chnl, len(points))
self.logger.debug("regrid: channel %s length of values is %d", chnl,
len(values))
continue
def radec_from_azel(self):
"""
compute RA and dec from az and el
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
dt = self.data['datetime'][idx]
# format time as (YEAR, DOY.fff)
time_tuple = (dt.year,
DT.day_of_year(dt.year,dt.month,dt.day)
+ ( dt.hour
+ dt.minute/60.
+ dt.second/3600.
+ dt.microsecond/3600./1e6)/24.)
azimuth = self.data['az'][idx]
elevation = self.data['el'][idx]
# compute
ra,dec = A.AzEl_to_RaDec(azimuth, elevation,
self.latitude,
-self.longitude,
time_tuple)
RA.append(ra)
decs.append(dec)
RAdecs.append((RA,decs))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def radec2000_from_radec(self):
"""
compute RA2000 and dec2000 from observed RA and dec
"""
RA2000 = []; decs2000 = []; RAdec2000 = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra = self.data['ra']
dec = self.data['dec']
# compute
ra2000,dec2000 = A.apparent_to_J2000(MJD,UT,
ra, dec,
self.longitude, self.latitude)
RA2000.append(ra2000)
decs2000.append(dec2000)
RAdec2000.append((ra2000,dec2000))
self.data['ra2000'] = RA2000
self.data['dec2000'] = dec2000
self.data['radec2000'] = RAdec2000
def radec_from_radec2000(self):
"""
compute apparent RA and dec. from J2000 RA and dec
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra2000 = self.data['ra2000'][idx]
dec2000 = self.data['dec2000'][idx]
# compute
ra, dec = A.J2000_to_apparent(MJD, UT,
ra2000*math.pi/12, dec2000*math.pi/180)
RA.append(ra)
decs.append(dec)
RAdecs.append((ra,dec))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def azel_from_radec(self):
"""
compute azimuth and elevation from apparent right ascension and declination
"""
azs = []; els = []; azels = []
for idx in list(range(self.numdata)):
# setup
ra = self.data['ra'][idx]
dec = self.data['dec'][idx]
timetuple = self.data['datetime'][idx].timetuple()
year = timetuple.tm_year
doy = timetuple.tm_yday + (timetuple.tm_hour
+(timetuple.tm_min+timetuple.tm_sec/60)/60)/24
# compute
az, el = A.RaDec_to_AzEl(ra, dec,
self.latitude, self.longitude, (year,doy))
azs.append(az)
els.append(el)
azels.append((az,el))
self.data['az'] = azs
self.data['el'] = els
self.data['azel'] = azels
def get_offsets(self, source="Sun", xdec_ofst=0., dec_ofst=0.):
"""
Generates a map in coordinates relative to a source
If the source is the default, the position of the Sun will be computed for
the time of each sample. IT SEEMS LIKE A GOOD IDEA TO DO THIS FOR PLANETS
ALSO.
This adds elements with keys ``xdec_offset`` and ``dec_offset`` to the
attribute ``data``.
@param source : source at map center
@type source : ephem source instance
@param xdec_ofst : relative X-dec position of sample
@type xdec_ofst : float
@param dec_ofst : relative dec position of sample
@type dec_ofst : float
@return: (dxdecs,ddecs) in degrees
"""
if source.lower() == "sun":
src = AE.ephem.Sun()
else:
src = AE.calibrator(source)
self.data['dec_offset'] = []
self.data['xdec_offset'] = []
for count in range(len(self.data['unixtime'])):
dt = datetime.datetime.utcfromtimestamp(
self.data['unixtime'][count])
if type(src) == AE.Quasar:
pass
else:
src.compute(dt)
ra_center = src.ra*12/math.pi # hours
dec_center = src.dec*180/math.pi # degrees
decrad = src.dec
# right ascension increases to the left, cross-dec to the right
self.data['xdec_offset'].append(xdec_ofst -
(self.data['ra'][count] - ra_center)*15*math.cos(decrad) )
self.data['dec_offset'].append( dec_ofst +
self.data['dec'][count] - dec_center)
# change list to NP.array
self.data['xdec_offset'] = NP.array(self.data['xdec_offset'])
self.data['dec_offset'] = NP.array(self.data['dec_offset'])
class Map(Observation, GriddingMixin):
"""
Map class without special features for GAVRT and Malargue
Most of the methods are mixed in to avoid conflicting with subclasses
"""
def __init__(self, parent=None, name=None, dss=None, date=None, project=None):
"""
Create a Map object
Args:
parent (Session): an observing session to which this belongs
name (str): an identifier, like a scan number
dss (int): station where the data were taken
date (str): date of observation as "YEAR/DOY"
project (str): project for which this observation was made
"""
Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
class Recording(h5py.File):
"""
Class for raw data
This is typically the contents of a data file transcribed into a standard
format. It may be the data of one Observation object, or data for multiple
Observation objects, or contain part of the data for an Observation object.
If the data being curated are not in a standard project, and they are not
in a standard place,
"""
def __init__(self, session=None, path=None, date=None, dss=None, name=None):
"""
Initialize a metadata container and data directory
Args
====
session (Session): required, unless:
path (str) : location of raw data files
date
"""
self.logger = logging.getLogger(logger.name+".Recording")
if session:
self.session = session
if not name:
name = session.project + "-" + str(session.year) + "-" + \
('%03d' % session.doy) + "-dss" + str(session.dss)+".info"
self.year = session.year
self.doy = session.doy
self.dss = session.dss
self.project = session.project
self.session_dir = session.session_dir
elif path and name:
self.session = Session() # for its methods and attributes
self.session_dir = path
self.name = name
else:
raise RuntimeError("either a session or a path and filename required")
h5py.File.__init__(self, name, 'w')
self.attrs['project'] = self.project
self.attrs['dss'] = self.dss
self.attrs['year'] = self.year
self.attrs['doy'] = self.doy
class Session(object):
"""
Base class for an observing session on a given year and DOY
Public Attributes::
doy (int) - day of year for session
logger (logging.Logger) - logging.Logger object
parent (object) - a data reduction session (mult. observ. sessions)
year (int) -
doy (int) -
project (str) -
session_dir (str) - path to results from this session
A session usually refers to a telescope, date and project. This will
normally define a path to the session directory.
"""
def __init__(self, parent=None, date=None, project=None, dss=None,
path=None):
"""
initialize data reduction for one observing session
Args
====
parent: (object) optional class for a data reduction tool
date: (str) required, format YEAR/DOY
project: (str) required
dss (int) required
path (str) optional
If `path` is given for a non-standard observing files location, and it does
not exist, it will be created. Then the Recording and Observation instances
must be directed to where the files are.
"""
self.logger = logging.getLogger(logger.name+".Session")
if parent:
self.session = parent
if date and project and dss:
y,d = date.split('/')
self.year = int(y);
self.doy = int(d)
self.project = project
self.dss = dss
self.name = "'%s %4d/%03d'" % (self.project, self.year, self.doy)
else:
self.logger.error("__init__: missing DSS or year or DOY or project")
raise Exception("Where and when and for what project were the data taken?")
self.find_session_dir(path=path)
def find_session_dir(self, path=None):
"""
find or make the sessions directory
Args:
path (str) - explicit path to files
"""
self.logger.debug("find_session_dir: entered for path=%s", path)
if path:
self.session_dir = path
else:
obs_dir = local_dirs.projects_dir + self.project \
+"/Observations/dss"+str(self.dss)+"/"
self.session_dir = obs_dir+ "%4d" % self.year +"/"+ "%03d" % self.doy +"/"
if not os.path.exists(self.session_dir):
os.makedirs(self.session_dir, mode=0o775)
def select_data_files(self, datapath=None, name_pattern="", auto=True,
load_hdf=False):
"""
Provide the user with menu to select data files.
Finding the right data store is complicated as there are many kinds of data
files
* If datapath is ...RA_data/HDF5/... then the files could be .h5 (Ashish)
or .hdf5 (Dean).
* If datapath is ...RA_data/FITS/... then the extent is .fits.
* If datapath is ...project_data/... then the extent is .pkl
* If datapath is ...projects/... (default) then the extent is probably
.csv or .dat or .prd.
@param datapath : path to top of the tree where the DSS subdirectories are
@type datapath : str
@param name_pattern : pattern for selecting file names, e.g. source
@type name_pattern : str
@param load_hdf : use RA_data/HDF5 directory if True
@type load_hdf : bool
@para auto : take all files found
@type auto : bool
@return: list of str
"""
# Get the data files to be processed
self.logger.debug("select_data_files: looking in %s", datapath)
if name_pattern:
name,extent = os.path.splitext(name_pattern)
if extent.isalpha(): # a proper extent with no wildcards
# take name pattern as is
pass
else:
# only one * at front and back of pattern
name_pattern = "*"+name_pattern.rstrip('*')+"*"
else:
# no pattern specified. All files.
name_pattern = "*"
self.logger.debug("select_data_files: for pattern %s", name_pattern)
if datapath:
if re.search('HDF5', datapath):
load_hdf = True
elif re.search('project_data', datapath):
load_hdf = False
datafiles = support.text.select_files(datapath+name_pattern+"[0-9].pkl")
elif re.search('FITS', datapath):
datafiles = support.text.select_files(datapath+name_pattern+".fits")
if load_hdf:
full = datapath+name_pattern+".h*5"
else:
full = datapath+name_pattern
else:
full = self.session_dir + name_pattern
self.logger.debug("select_data_files: from: %s", full)
if auto:
datafiles = glob.glob(full)
else:
datafiles = support.text.select_files(full)
self.logger.debug("select_data_files: found %s", datafiles)
if datafiles == []:
self.logger.error(
"select_data_files: None found. Is the data directory mounted?")
raise RuntimeError('No data files found.')
if type(datafiles) == str:
datafiles = [datafiles]
self.logger.info("select_data_files: to be processed: %s", datafiles)
return datafiles
class Spectrum(Observation):
"""
Class for spectra
"""
def __init__(self):
"""
needs a spectrum attribute
"""
self.logger = logging.getLogger(logger.name+".Spectrum")
def get_num_chans(self, linefreq, bandwidth, max_vel_width):
"""
compute the base 2 number of output channels for the specified resolution
"""
kmpspMHz = 300000./linefreq
BW_kmps = bandwidth*kmpspMHz
est_num_chan_out = BW_kmps/max_vel_width
self.logger.debug("get_num_chans: estimated num chans out = %d",
est_num_chan_out)
return 2**int(math.ceil(math.log(est_num_chan_out,2)))
def reduce_spectrum_channels(self, refval, refpix, delta,
num_chan=1024, axis=0):
"""
Reduce the number of channels in the spectrum.
The default option is to reduce the spectrum to a specified number of
channels with a default of 1024. The input spectrum is presumed to have
2**N channels so that num_chan/num_chan_in is an integer.
If 'spectrum' is an N-D array, then the spectrum axis is given by 'axis'
which defaults to 0.
'delta' is negative for lower sideband or reversed double sideband spectra.
@param spectrum : spectrum values
@type spectrum : list or nparray
@param refval : X-axis value at the reference pixel of 'spectrum'
@type refval : float
@param refpix : reference pixel for 'spectrum'
@type refpix : int
@param delta : interval between pixels on the X-axis
@type delta : float
@param num_chan : optional number of channels to be returned (default: 2^10)
@type num_chan : int
@return: numpy.array
"""
if math.log(num_chan,2) % 1:
raise RuntimeError("num_chan = %d is not a power of 2", num_chan)
if type(self.spectrum) == NP.ndarray:
num_chans_in = self.spectrum.shape[axis]
else:
num_chans_in = len(self.spectrum)
if math.log(num_chans_in,2) % 1:
raise RuntimeError("input spectrum length = %d is not a power of 2",
num_chans_in)
self.logger.debug("reduce_spectrum_channels: %d channels in", num_chans_in)
num_chan_avg = num_chans_in/num_chan
newrefpix = refpix/num_chan_avg
self.logger.debug("reduce_spectrum_channels: refpix from %d to %d",
refpix, newrefpix)
newdelta = delta*num_chan_avg
self.logger.debug("reduce_spectrum_channels: delta from %.3f to %.3f",
delta, newdelta)
newrefval = refval + delta*(num_chan_avg/2 - 1)
self.logger.debug("reduce_spectrum_channels: refval from %.3f to %.3f",
refval, newrefval)
self.logger.debug("reduce_spectrum_channels: averaging %d channels", num_chan_avg)
specout = NP.array([spectrum[index*num_chan_avg:(index+1)*num_chan_avg].mean()
for index in range(num_chan)])
self.logger.debug("reduce_spectrum_channels: %d channels out", num_chan)
return specout, newrefval, newrefpix, newdelta
def get_freq_array(self, bandwidth, n_chans):
"""
Create an array of frequencies for the channels of a backend
@param bandwidth : bandwidth
@type bandwidth : float
@param n_chans : number of channels
@type n_chans : int
@return: frequency of each channel in same units as bandwidth
"""
return NP.arange(n_chans)*float(bandwidth)/n_chans
def freq_to_chan(frequency,bandwidth,n_chans):
"""
Returns the channel number where a given frequency is to be found.
@param frequency : frequency of channel in sane units as bandwidth.
@type frequency : float
@param bandwidth : upper limit of spectrometer passband
@type bandwidth : float
@param n_chans : number of channels in the spectrometer
@type n_chans : int
@return: channel number (int)
"""
if frequency < 0:
frequency = bandwidth + frequency
if frequency > bandwidth:
raise RuntimeError("that frequency is too high.")
return round(float(frequency)/bandwidth*n_chans) % n_chans
def get_smoothed_bandshape(self, degree = None, poly_order=15):
"""
Do a Gaussian smoothing of the spectrum and then fit a polynomial.
Optionally, the raw and smoothed data and the fitted polynomial can be
plotted.
Note
====
``numpy.polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False)``
Least squares polynomial fit.
Fit a polynomial::
p(x) = p[0] * x**deg + ... + p[deg]
of degree deg to points (x, y).
Returns a vector of coefficients p that minimises the squared error.
@param spectrum : input data
@type spectrum : list of float
@param degree : number of samples to smoothed (Gaussian FWHM)
@type degree : int
@param poly_order : order of the polynomial
@type poly_order : int
@param plot : plotting option
@type plot : boolean
@return: (polynomial_coefficient, smoothed_spectrum)
"""
if degree == None:
degree = len(self.spectrum)/100
# normalize the spectrum so max is 1 and convert to dB.
max_lev = NP.max(self.spectrum)
norm_spec = NP.array(self.spectrum)/float(max_lev)
norm_spec_db = 10*NP.log10(norm_spec)
# do a Gaussian smoothing
norm_spec_db_smoothed = smoothListGaussian(norm_spec_db, degree=degree)
# deal with the edges by making them equal to the smoothed end points
norm_spec_db_smoothed_resized = NP.ones(len(self.spectrum))
# left end
norm_spec_db_smoothed_resized[0:degree] = norm_spec_db_smoothed[0]
# middle
norm_spec_db_smoothed_resized[degree:degree+len(norm_spec_db_smoothed)] = \
norm_spec_db_smoothed
# right end
norm_spec_db_smoothed_resized[degree+len(norm_spec_db_smoothed):] = \
norm_spec_db_smoothed[-1]
return poly, norm_spec_db_smoothed_resized
# ------------------------ module functions -------------------------------
def examine_text_data_file(filename):
"""
Examine a file to guide ``genfromtxt()``
Things to look for::
* Is there a header line with column names? If not, use argument ``names``.
* Is the number of names equal to the number of columns? If not::
- use argument ``names`` and ``skip_header=1``, or
- use argument ``delimiter`` with a list of column widths
and ``skip_header=1``.
"""
print(examine_text_data_file.__doc__)
fd = open(filename, "r")
lines = fd.readlines()
fd.close()
topline = lines[0].strip().split()
print(" 1 2 3 4 5 6 7")
print("01234567890123456789012345678901234567890123456789012345678901234567890123456789")
print(lines[0].strip())
print(lines[1].strip())
print(" ...")
print(lines[-1].strip())
data = NP.genfromtxt(filename, dtype=None, names=None, skip_header=1, encoding=None)
print("%d datatypes:" % len(data.dtype.fields))
for item in data.dtype.fields:
print(item, data.dtype.fields[item])
def get_obs_dirs(project, station, year, DOY, datafmt=None):
"""
Returns the directories where data and working files are kept
@param project : project code string, e.g., RRL
@type project : str
@param station : DSN station number
@type station : int
@param year : year of observation
@type year : int
@param DOY : day of year of observations
@type DOY : int
@param datafmt : raw data format
@type datafmt : str
"""
#logger.debug("get_obs_dirs: type %s for %s, DSS%d, %4d/%03d",
# datafmt, project, station, year, DOY)
obspath = "dss%2d/%4d/%03d/" % (station,year,DOY)
if project:
projdatapath = "/usr/local/project_data/"+project+"/"+obspath
projworkpath = "/usr/local/projects/"+project+"/Observations/"+obspath
else:
projdatapath = ""
projworkpath = ""
if datafmt:
rawdatapath = "/usr/local/RA_data/"+datafmt+"/"+obspath
else:
rawdatapath = ""
return projdatapath, projworkpath, rawdatapath
# --------- old stuff to be discarded still needed for now ---------------
def old_get_obs_session(project=None, dss=None, date=None, path='proj'):
"""
Provides project, station, year and DOY, asking as needed.
It follows one of several possible paths to get to the session::
proj - path through /usr/local/projects/<project>
hdf5 - path through /usr/local/RA_data/HDF5
fits - path through /usr/local/RA_data/FITS
wvsr - path through /data
@param project : optional name as defined in /usr/local/projects
@type project : str
@param dss : optional station number
@type dss : int
@param date : optional YYYY/DDD
@type date : str
@return: project, DSS, year, DOY.
"""
def get_directory(path):
"""
"""
# only one trailing /
path = path.rstrip('/')+"/*"
logger.debug("get_obs_session:get_directory: from %s", path)
names = glob.glob(path)
if names:
dirs = []
for name in names:
if os.path.isdir(name):
dirs.append(os.path.basename(name))
dirs.sort()
for name in dirs:
print((name), end=' ')
return input('\n>')
else:
return []
def from_wvsr_dir():
"""
this needs to be completed and tested on crab14 or an auto host
"""
session = get_directory(local_dirs.wvsr_dir)
return session
cwd = os.getcwd()
# get the project
if project:
pass
else:
os.chdir(local_dirs.projects_dir)
project = get_directory(local_dirs.projects_dir)
logger.debug("from_wvsr_dir: project is %s", project)
projectpath = local_dirs.projects_dir+project
# get the station
if path[:4].lower() == 'wvsr':
# special call
print("from_wvsr_dir()")
if path[:4].lower() == 'proj':
os.chdir(projectpath+"/Observations/")
elif path[:4].lower() == 'hdf5':
os.chdir(local_dirs.hdf5_dir)
elif path[:4].lower() == 'fits':
os.chdir(local_dirs.fits_dir)
# get the station
if dss:
pass
else:
# This seems odd but get_directory() needs '/' and int does not
station = get_directory(os.getcwd()+"/").rstrip('/')
dss = int(station[-2:])
stationpath = os.getcwd()+"/dss"+str(dss)
# get the date
if date:
items = date.split('/')
year = int(items[0])
DOY = int(items[1])
else:
year = int(get_directory(stationpath))
yearpath = stationpath+"/"+str(year)
DOY = int(get_directory(yearpath))
os.chdir(cwd)
return project, dss, year, DOY
| # -*- coding: utf-8 -*-
"""
Modules to support data reduction in Python.
The main purpose of the base module ``Data_Reduction`` is to provide a
suplerclass with a good set of attributes and methods to cover all common needs.
The base module is also able to read data from a text file as a ``numpy``
structured array. This is done with a class called ``DataGetterMixin`` which
must be invoked after the base class has been initiated.
The module function ``examine_text_data_file()`` reveals the structure of the
file(s) that provide the data..
Examples
========
Here we initiate a base class after mixing in the data getter. The first line o
the file has column names but the first three columns are all under one
name ``UTC`` so we specify column widths to consider the first three columns
to be one column. We use the names from the first line of the file, which
could have been done with an ``open()``, ``readline()``, and ``close()``::
mixIn(Observation, DataGetterMixin)
obs = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs.open_datafile('t12127.10',
delimiter=[17,16,3,11,7,9,8,2,6],
skip_header=1,
names="UTC Epoch Chan Tsys Int Az El Diode Level".split())
Now the data getter is already mixed in to Observation so we don't need to do
it again. In this case we specify the names of the columns, changing ``Int`` to
``Integr``::
obs2 = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs2.open_datafile('t12127.10', skip_header=1,
names="Year DOY UTC Epoch Chan Tsys Integr Az El Diode Level".split())
The class Map inherits from DataGetterMixin, so no explicit mixin required::
obsmap = Map(dss=84, date="2020/163", project="SolarPatrol")
obsmap.initialize('sim-venus.dat', source="Venus")
Let's examine ``obsmap``. We have only one signal column::
In [3]: obsmap.channel.keys()
Out[3]: dict_keys(['xl'])
In [4]: obsmap.channel['xl'].keys()
Out[4]: dict_keys(['freq', 'bw', 'pol', 'ifmode', 'atten', 'power'])
"""
# standard Python modules
import datetime
import glob
import h5py
import logging
import math
import matplotlib.dates as MPLd
import numpy as NP
import os
import re
import readline
import scipy.interpolate
import scipy.fftpack
import Astronomy as A
import Astronomy.DSN_coordinates as coords
import Astronomy.Ephem as AE
import DatesTimes as DT
import local_dirs
import Math.clusters as VQ # vector quantization
import support
# enable raw_input Tab completion
readline.parse_and_bind("tab: complete")
logger = logging.getLogger(__name__) # module logger
class Observation(object):
"""
superclass for a data structure and methods
Attributes
==========
aliases - (dict) data keys to replace those in original data
channel - (dict) signal paths, e.g., different freqs and pols
data - (dict) original data, e.g., read from file or database
DOY - (int) day of year of observation
end - (float) UNIX time at the end
latitude - (float) from obs
logger - (logging.Logger)
longitude - (float) from obs
name - (str) user assigned, defaults to YEAR/DOY
numdata - (int) number of data samples
obs - (AE.DSS) observatory
session - (Session) set of observations, parent to Observation
session_path - (str) directory for session files
start - (float) UNIX time at the beginning
year - (int) year of observation
**Reserved Column Names**
These column names are recognized. They are also the keys for attribute
``data``.
These quantities must be present in some form::
unixtime (float) UNIX time in sec
chan_name (str) channel name
integr (float) integration (exposure) in sec
azel (float,float) azimuth and elevation in decimal deg
power (float) power level if only a single channel
Optional::
diode (float) 0 or power in K (integers OK)
level (float) (unidentified -- in ``tlog`` table)
cryotemp (float) cryostat temp in K
windspeed (float) km/hr
winddir (float) deg
ambtemp (float) deg C
pressure (float) mbar
Columns to be computed::
mpldatenum (float) matplotlib ``datenum``
Alternative for ``power``::
tsys (float) system temperature (calibrated power)
top (float) alternative for ``tsys`` (used in DSN)
vfc_counts (int) VFC counts (rate times ``integr``)
Any column with a name which is not a reserved name is assumed to be
power-like data from the channel with that name, unless that name is in a
list provided to the argument ``ignore`` in the method ``get_data_channels``
of the class ``DataGetterMixin``.
Alternative for ``unixtime``::
year (int) year of observation
doy (int) day of year
utc (str) HH:MM:SS
timestr (str) something like 2020/06/14/14:22:21.00
Alternative for ``chan_name``::
chan (int) index in receiver channel names
Alternative for ``azel``::
radec (float,float) precessed right ascension in decimal hours and
precessed declination in decimal deg
radec1950 (float,float) mean right ascension in decimal hours and
mean declination in decimal deg at epoch
radec2000 (float,float) mean right ascension in decimal hours and
mean declination at epoch in decimal deg
az (float) azimuth in decimal deg
el (float) elevation in decimal deg
ra (float) precessed right ascension in decimal hours
dec (float) precessed declination in decimal deg
ra1950 (float) mean right ascension in decimal hours at epoch
dec1950 (float) mean declination in decimal deg at epoch
ra2000 (float) mean right ascension in decimal hours at epoch
dec2000 (float) mean declination in decimal deg at epoch
Notes
=====
* The ``data`` structure is a dict.
* The value of a ``data`` item is either a numpy array or a object
like ``float``, ``int``, or ``str``.
* The keys have reserved words defined above and will be lowercase.
* Items with other keys may be added, typically by a child class.
* Coordinates shall be in pairs, `e.g. ``azel``, ``radec``. (This way you
never get one without the other.)
"""
reserved = ['unixtime','chan_name','integr','az','el','year','doy','utc',
'timestr','chan','tsys','top','diode','level','cryotemp',
'windspeed','winddir','ambtemp','pressure',
'ra','dec','ra1950','dec1950','ra2000','dec2000']
power_keys = ['tsys', 'top', 'vfc_counts', 'power']
def __init__(self, parent=None, name=None, dss=None,
date=None, project=None):
"""
Create a base Observation object.
This is not meant to be initialized by itself. A subclass generally
determines how data are read in. However, method ``initialize()``
provides a basic data read capability using ``numpy.genfromtxt()``
and creates the object's data structure.
Args:
parent (Session): session to which this observation belongs
name (str): an identifier; default is station ID + "obs"
dss (int): station number
date (str): "YEAR/DOY"
project (str): directory under /usr/local/projects
"""
self.logger = logging.getLogger(logger.name+".Observation")
self.session = parent
# observatory must be specified
if dss:
self.obs = coords.DSS(dss)
self.longitude = self.obs.long*180/math.pi # deg
self.latitude = self.obs.lat*180/math.pi # deg
else:
self.logger.error("__init__: requires observatory location")
raise Exception("Where were the data taken?")
# give the object a name
if name:
self.name = name
else:
self.name = "DSS"+str(dss)+"obs"
self.logger = logging.getLogger(logger.name+".Observation")
# the observation was part of some project
if project:
self.project = project
else:
self.logger.error("__init__: requires a project")
raise Exception("Where are the session's working files?")
# the observation was done on some date
if date:
y,d = date.split('/')
self.year = int(y);
self.DOY = int(d)
projdatapath, self.sessionpath, rawdatapath = \
get_obs_dirs(project, dss, self.year, self.DOY,
datafmt=None)
self.logger.debug("__init__: session path: %s", self.sessionpath)
else:
self.logger.error("__init__: requires a date")
raise Exception("When were the date taken?")
# accomodate subclass arguments
self.aliases = {}
# what I really want to do here is see if this was called by a subclass,
# in which case I do not try to get the channel info until this
# initialization has finished.
#
#if hasattr(self, "get_data_channels"):
# channels = self, get_data_channels()
# self.make_channels(channels)
#else:
# self.logger.info("__init__: initialize() may now be called")
def splitkey(self, longlat):
"""
Checks for presence of coordinates in pairs or singles
@param longlat : "azel", or "radec", or "radecEPOC"
@type longlat : str
"""
longitude = longlat[:2] # 'az' or 'ra'
if len(longlat) > 5: # has epoch
epoch = longlat[-4:]
longitude += epoch
latitude = longlat[2:-4]+epoch
else: # date of observation
latitude = longlat[2:]
epoch = None
return longitude, latitude, epoch
def check_for(self, data, longlat):
"""
Checks for separate coordinates and splits if coord pairs
Args:
data (dict): attribute ``data``
longlat (str): "azel", or "radec", or "radecEPOC"
"""
longitude, latitude, epoch = self.splitkey(longlat)
if longitude in data.dtype.names and \
latitude in data.dtype.names:
self.logger.debug("check_for: data has %s and %s", longitude, latitude)
self.data[longitude] = data[longitude]
self.data[latitude] = data[latitude]
return True
elif longlat in data.dtype.names:
self.logger.debug("check_for: data has %s", longlat)
self.data[longitude],self.data[latitude] = map(None, *data[longlat])
self.logger.debug("check_for: added %s and %s to data",
longitude, latitude)
return True
else:
# coords need to be computed from other coords
return False
def unpack_to_complex(self, rawdata):
"""
Converts a sequence of alternating real/imag samples to complex
@param rawdata : alternating real and imaginary bytes
@type rawdata : numpy array of signed int8
@return: numpy array of complex
"""
datalen = len(rawdata)
real = rawdata[0:datalen:2]
imag = rawdata[1:datalen:2]
data = real + 1j*imag
return data
def sideband_separate(self, data):
"""
Converts a complex spectrum array and returns two reals with USB and LSB
This applies a Hilbert transform to the complex data.
"""
usb = (data.real + scipy.fftpack.hilbert(data).imag)
lsb = (scipy.fftpack.hilbert(data).real + data.imag)
return lsb,usb
class Channel(support.PropertiedClass):
"""
Class for a signal path
"""
def __init__(self, parent, name, freq=None, bw=None, pol=None, IFtype=None,
atten=None):
"""
Notes
=====
The properties can be accessed as if the class were a dict.
Arguments
=========
freq:float or int: center frequency in MHz
bw:float or int: bandwidth in MHz
pol:str: polarization code
"""
support.PropertiedClass.__init__(self)
self.parent = parent
self.logger = logging.getLogger(self.parent.name+".Channel")
self.logger.debug("__init__: created %s", self.logger.name)
self.logger.debug("__init__: parent is %s", self.parent)
self.name = name
self.data['freq'] = freq
self.data['bw'] = bw
self.data['pol'] = pol
self.data['ifmode'] = IFtype
self.data['atten'] = atten
class DataGetterMixin(object):
"""
Class for getting data from a CSV file.
"""
def initialize(self, filename, delimiter=" ", names=True, skip_header=0,
source=None):
"""
Get the data and make a data structure for the observations.
This is not included by default in ``__init__()`` to keep it simple for
subclasses.
Args:
filename (str): name only, required; the path is provided
delimiter (str): what separates the columns
names (bool): the first line has column names
skip_header (int) : number of rows to skip
"""
# get the data
data = self.open_datafile(filename, delimiter=delimiter, names=names,
skip_header=skip_header)
# get the signal columns and names
metadata, signals = self.get_data_channels(data)
# create Channel objects for the signal properties
self.make_channels(signals)
# create the data structure
self.make_data_struct(data, metadata, signals)
# compute the offsets from the source center for each data point
if source:
self.get_offsets(source=source)
else:
self.logger.warning("initialize: no source specified; no offsets")
def open_datafile(self, filename, delimiter=" ", names=True, skip_header=0):
"""
Opens and reads a data file
This is used by ``Malargue`` (one data files) and ``GAVRT`` (one data file
for each signal).
Args:
filename (str): text data file name
delimiter (str): separator between columns (default: whitespace)
names (bool): file row has column names (default: True)
skip_header (int): number of rows to skip at beginning of file
Returns:
ndarray:
"""
data = NP.genfromtxt(self.sessionpath+filename,
delimiter=delimiter,
dtype=None,
names=names,
case_sensitive='lower',
skip_header=skip_header,
encoding=None)
return data
def get_data_channels(self, data, ignore=None):
"""
Gets or sets the names of the signal columns
Column names are separated into metadata and signals. Names in
``ignore`` re ignored. Names in ``aliases`` are replaced.
Args:
data (ndarray): data read from text file
ignore (list of str): columns to ignore; default None
Returns:
(list of str, list of str): metadata, signals
"""
names = data.dtype.names
metadata = []
signals = []
for name in names:
if ignore:
if name in ignore:
pass
if name.casefold() in map(str.casefold, self.aliases):
key = self.aliases[name].lower() # we use only lower case names
else:
key = name.lower()
self.logger.debug("get_data_channels: doing %s for %s", key, name)
if key in map(str.casefold, Observation.reserved):
if key.casefold() in ['top', 'tsys']:
signals.append(key)
else:
metadata.append(key)
else:
signals.append(key)
self.logger.debug("get_data_channels: signals: %s", signals)
self.logger.debug("get_data_channels: metadata: %s", metadata)
return metadata, signals
def make_data_struct(self, data, metadata, signals):
"""
Takes a text table with headers and converts it into a numpy ``ndarray``.
That means that a column can be extracted using `data[label]`.
Args
====
data: (ndarray) the data from the text file
metadata: (list of str) the column names for metadata
signals: (list of str) the column names for power-like data
"""
# get the known columns:
self.data = {}
self.numdata = len(data)
#self.logger.debug("make_data_struct: using aliases: %s", self.aliases)
# get columns that are not metadata; each has power for a channel
for signal in signals:
#self.logger.debug("make_data_struct: for signal: %s", signal)
#if signal in self.aliases.items():
# get the key in 'data' which matches 'value' in 'aliases'
# power = data[next(key for key, value in self.aliases.items()
# if value == signal)][idx]
#else:
# power = data[signal]
#self.channel[signal]['power'] = power
self.channel[signal]['power'] = data[signal]
# get UNIX time
if 'unixtime' in metadata:
if 'unixtime' in data.dtype.names:
self.data['unixtime'] = data['unixtime']
else:
# look up the equivalent of UNIX time in the data table
self.data['unixtime'] = data[next(key
for key, value in self.aliases.items()
if value == 'unixtime')]
# compute other convenient forms of time
self.data['datetime'] = [] # Python datetime.date
self.data['date_num'] = [] # matplotlib.dates date number
for idx in list(range(self.numdata)):
if 'unixtime' in data.dtype.names:
tm = data['unixtime'][idx]
else:
tm = data[next(key for key, value in self.aliases.items()
if value == 'unixtime')][idx]
dt = datetime.datetime.utcfromtimestamp(tm)
self.data['datetime'].append(dt)
self.data['date_num'].append(MPLd.date2num(dt))
self.start = self.data['unixtime'][0]
self.end = self.data['unixtime'][-1]
else:
# figure out how to process the time data columns
pass
# compute alternate coordinates
if self.check_for(data, 'azel'):
# azel exists; compute radec if needed; then radec2000 if needed
if self.check_for(data, 'radec'):
pass
else:
self.radec_from_azel()
if self.check_for(data, 'radec2000'):
# ra2000 and dec2000 already exist
pass
else:
self.radec2000_from_radec()
elif self.check_for(data, 'radec2000'):
# coordinates exist; compute back to azimuth and elevation
if self.check_for(data, 'radec'):
pass
else:
# compute observed RA and dec
self.radec_from_radec2000()
if self.check_for(data, 'azel'):
pass
else:
self.azel_from_radec()
# in here check for 'radec'
else:
self.logger.error("no coordinates found in data")
raise Exception("check INFO logging for columns found")
self.start = self.data['unixtime'].min()
self.end = self.data['unixtime'].max()
def make_channels(self, signals, props=None):
"""
Assign properties to the channels.
The prop keys are "freq", "pol", and "IFtype".
Args:
props (dict of dicts): signal channel properties.
"""
self.channel = {}
for ch in signals:
chindex = signals.index(ch)
if props:
self.channel[ch] = self.Channel(self, ch,
freq =props[ch]['freq'],
bw =props[ch]['bw'],
pol =props[ch]['pol'],
IFtype=props[ch]['IFtype'],
atten =props[ch]['atten'])
else:
self.channel[ch] = self.Channel(self, ch)
class GriddingMixin(object):
"""
Class for all the data and methods associated with a raster scan map
It is expected that the parent class is a subclass of ``Observation`` already
by virtue of it being a superclass of subclass which inherits these methods.
Attrs:
cfg (dict):
data (numpy array): from ``Observation``
logger (logging.Logger): replaces ``Observation`` logger
name (str): replaces ``Observation`` name
session (Session):
source (str):
step (float): map step size
"""
def get_grid_stepsize(self, xy=None):
"""
Determine the stepsize of gridded data
This assumes xdec and dec data increase incrementally by 'stepsize'.
The sequences may repeat in a sawtooth-like series. The number of
'xdec' and 'dec' points is multiple times the gridsize.
Arguments:
xy (tuple or list) - X-array and Y-array (default Map.data)
"""
# get the absolute value of coordinate intervals
if xy:
dxdecs = abs(xy[0][1:] - xy[0][:-1])
ddecs = abs(xy[1][1:] - xy[1][:-1])
else:
dxdecs = abs(self.data['xdec_offset'][1:]-self.data['xdec_offset'][:-1])
ddecs = abs(self.data['dec_offset'][1:] -self.data['dec_offset'][:-1])
# form array of X,Y pairs
coords = NP.array(list(zip(dxdecs,ddecs)))
# expect two clusters (default)
cluster_pos = VQ.find_clusters(coords).round(4) # tenths of mdeg
# return the non-zero intervals
return cluster_pos[0].max(), cluster_pos[1].max()
def regrid(self, width=1.0, height=1.0, step=None, power_key=None):
"""
converts a map from observed coordinates to map coordinates
If ``step`` is not given then the step size will be the average step size
in X and the average step in Y. In this case, the effect is to make a
regular grid if the original positions were not exact, i.e., pointing error.
@param width : map width in deg
@type width : float
@param height : map height in deg
@type height : float
@param step : map step size in X and Y in deg
@type step : (float, float)
@param power_key : dict key of Z-value
@type power_key : str
"""
# what is the power-like quantity?
if power_key:
pass
else:
# take the first that matches
for key in Observation.power_keys:
if key in self.data:
power_key = key
self.logger.info("regrid: using '%s'", power_key)
break
else:
continue
if power_key:
pass
else:
self.logger.error("regrid: no power data key found")
return None
if step == None:
# use the original stepsize
self.xstep, self.ystep = self.get_grid_stepsize()
else:
self.xstep, self.ystep = step
self.data['grid_x'] = NP.arange(
-width/2, width/2+self.xstep/2, self.xstep/2)
self.data['grid_y'] = NP.arange(
-height/2,height/2+self.ystep/2, self.ystep/2)
self.logger.debug("regrid: grid shape is %dx%d", len(self.data['grid_x']),
len(self.data['grid_y']))
self.data['grid_z'] = {}
for chnl in self.channel:
self.logger.debug("regrid: processing %s", chnl)
points = list(zip(self.data['xdec_offset'],self.data['dec_offset']))
self.logger.debug("regrid: %d positions", len(points))
values = self.data[power_key][chnl]
self.logger.debug("regrid: %d values", len(values))
xi, yi = NP.meshgrid(self.data['grid_x'], self.data['grid_y'])
try:
self.data['grid_z'][chnl] = scipy.interpolate.griddata(points, values,
(xi, yi), method='nearest')
except ValueError as details:
self.logger.error("regrid: gridding failed: %s", str(details))
self.logger.debug("regrid: channel %s length of points is %d",
chnl, len(points))
self.logger.debug("regrid: channel %s length of values is %d", chnl,
len(values))
continue
def radec_from_azel(self):
"""
compute RA and dec from az and el
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
dt = self.data['datetime'][idx]
# format time as (YEAR, DOY.fff)
time_tuple = (dt.year,
DT.day_of_year(dt.year,dt.month,dt.day)
+ ( dt.hour
+ dt.minute/60.
+ dt.second/3600.
+ dt.microsecond/3600./1e6)/24.)
azimuth = self.data['az'][idx]
elevation = self.data['el'][idx]
# compute
ra,dec = A.AzEl_to_RaDec(azimuth, elevation,
self.latitude,
-self.longitude,
time_tuple)
RA.append(ra)
decs.append(dec)
RAdecs.append((RA,decs))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def radec2000_from_radec(self):
"""
compute RA2000 and dec2000 from observed RA and dec
"""
RA2000 = []; decs2000 = []; RAdec2000 = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra = self.data['ra']
dec = self.data['dec']
# compute
ra2000,dec2000 = A.apparent_to_J2000(MJD,UT,
ra, dec,
self.longitude, self.latitude)
RA2000.append(ra2000)
decs2000.append(dec2000)
RAdec2000.append((ra2000,dec2000))
self.data['ra2000'] = RA2000
self.data['dec2000'] = dec2000
self.data['radec2000'] = RAdec2000
def radec_from_radec2000(self):
"""
compute apparent RA and dec. from J2000 RA and dec
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra2000 = self.data['ra2000'][idx]
dec2000 = self.data['dec2000'][idx]
# compute
ra, dec = A.J2000_to_apparent(MJD, UT,
ra2000*math.pi/12, dec2000*math.pi/180)
RA.append(ra)
decs.append(dec)
RAdecs.append((ra,dec))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def azel_from_radec(self):
"""
compute azimuth and elevation from apparent right ascension and declination
"""
azs = []; els = []; azels = []
for idx in list(range(self.numdata)):
# setup
ra = self.data['ra'][idx]
dec = self.data['dec'][idx]
timetuple = self.data['datetime'][idx].timetuple()
year = timetuple.tm_year
doy = timetuple.tm_yday + (timetuple.tm_hour
+(timetuple.tm_min+timetuple.tm_sec/60)/60)/24
# compute
az, el = A.RaDec_to_AzEl(ra, dec,
self.latitude, self.longitude, (year,doy))
azs.append(az)
els.append(el)
azels.append((az,el))
self.data['az'] = azs
self.data['el'] = els
self.data['azel'] = azels
def get_offsets(self, source="Sun", xdec_ofst=0., dec_ofst=0.):
"""
Generates a map in coordinates relative to a source
If the source is the default, the position of the Sun will be computed for
the time of each sample. IT SEEMS LIKE A GOOD IDEA TO DO THIS FOR PLANETS
ALSO.
This adds elements with keys ``xdec_offset`` and ``dec_offset`` to the
attribute ``data``.
@param source : source at map center
@type source : ephem source instance
@param xdec_ofst : relative X-dec position of sample
@type xdec_ofst : float
@param dec_ofst : relative dec position of sample
@type dec_ofst : float
@return: (dxdecs,ddecs) in degrees
"""
if source.lower() == "sun":
src = AE.ephem.Sun()
else:
src = AE.calibrator(source)
self.data['dec_offset'] = []
self.data['xdec_offset'] = []
for count in range(len(self.data['unixtime'])):
dt = datetime.datetime.utcfromtimestamp(
self.data['unixtime'][count])
if type(src) == AE.Quasar:
pass
else:
src.compute(dt)
ra_center = src.ra*12/math.pi # hours
dec_center = src.dec*180/math.pi # degrees
decrad = src.dec
# right ascension increases to the left, cross-dec to the right
self.data['xdec_offset'].append(xdec_ofst -
(self.data['ra'][count] - ra_center)*15*math.cos(decrad) )
self.data['dec_offset'].append( dec_ofst +
self.data['dec'][count] - dec_center)
# change list to NP.array
self.data['xdec_offset'] = NP.array(self.data['xdec_offset'])
self.data['dec_offset'] = NP.array(self.data['dec_offset'])
class Map(Observation, GriddingMixin):
"""
Map class without special features for GAVRT and Malargue
Most of the methods are mixed in to avoid conflicting with subclasses
"""
def __init__(self, parent=None, name=None, dss=None, date=None, project=None):
"""
Create a Map object
Args:
parent (Session): an observing session to which this belongs
name (str): an identifier, like a scan number
dss (int): station where the data were taken
date (str): date of observation as "YEAR/DOY"
project (str): project for which this observation was made
"""
Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
class Recording(h5py.File):
"""
Class for raw data
This is typically the contents of a data file transcribed into a standard
format. It may be the data of one Observation object, or data for multiple
Observation objects, or contain part of the data for an Observation object.
If the data being curated are not in a standard project, and they are not
in a standard place,
"""
def __init__(self, session=None, path=None, date=None, dss=None, name=None):
"""
Initialize a metadata container and data directory
Args
====
session (Session): required, unless:
path (str) : location of raw data files
date
"""
self.logger = logging.getLogger(logger.name+".Recording")
if session:
self.session = session
if not name:
name = session.project + "-" + str(session.year) + "-" + \
('%03d' % session.doy) + "-dss" + str(session.dss)+".info"
self.year = session.year
self.doy = session.doy
self.dss = session.dss
self.project = session.project
self.session_dir = session.session_dir
elif path and name:
self.session = Session() # for its methods and attributes
self.session_dir = path
self.name = name
else:
raise RuntimeError("either a session or a path and filename required")
h5py.File.__init__(self, name, 'w')
self.attrs['project'] = self.project
self.attrs['dss'] = self.dss
self.attrs['year'] = self.year
self.attrs['doy'] = self.doy
class Session(object):
"""
Base class for an observing session on a given year and DOY
Public Attributes::
doy (int) - day of year for session
logger (logging.Logger) - logging.Logger object
parent (object) - a data reduction session (mult. observ. sessions)
year (int) -
doy (int) -
project (str) -
session_dir (str) - path to results from this session
A session usually refers to a telescope, date and project. This will
normally define a path to the session directory.
"""
def __init__(self, parent=None, date=None, project=None, dss=None,
path=None):
"""
initialize data reduction for one observing session
Args
====
parent: (object) optional class for a data reduction tool
date: (str) required, format YEAR/DOY
project: (str) required
dss (int) required
path (str) optional
If `path` is given for a non-standard observing files location, and it does
not exist, it will be created. Then the Recording and Observation instances
must be directed to where the files are.
"""
self.logger = logging.getLogger(logger.name+".Session")
if parent:
self.session = parent
if date and project and dss:
y,d = date.split('/')
self.year = int(y);
self.doy = int(d)
self.project = project
self.dss = dss
self.name = "'%s %4d/%03d'" % (self.project, self.year, self.doy)
else:
self.logger.error("__init__: missing DSS or year or DOY or project")
raise Exception("Where and when and for what project were the data taken?")
self.find_session_dir(path=path)
def find_session_dir(self, path=None):
"""
find or make the sessions directory
Args:
path (str) - explicit path to files
"""
self.logger.debug("find_session_dir: entered for path=%s", path)
if path:
self.session_dir = path
else:
obs_dir = local_dirs.projects_dir + self.project \
+"/Observations/dss"+str(self.dss)+"/"
self.session_dir = obs_dir+ "%4d" % self.year +"/"+ "%03d" % self.doy +"/"
if not os.path.exists(self.session_dir):
os.makedirs(self.session_dir, mode=0o775)
def select_data_files(self, datapath=None, name_pattern="", auto=True,
load_hdf=False):
"""
Provide the user with menu to select data files.
Finding the right data store is complicated as there are many kinds of data
files
* If datapath is ...RA_data/HDF5/... then the files could be .h5 (Ashish)
or .hdf5 (Dean).
* If datapath is ...RA_data/FITS/... then the extent is .fits.
* If datapath is ...project_data/... then the extent is .pkl
* If datapath is ...projects/... (default) then the extent is probably
.csv or .dat or .prd.
@param datapath : path to top of the tree where the DSS subdirectories are
@type datapath : str
@param name_pattern : pattern for selecting file names, e.g. source
@type name_pattern : str
@param load_hdf : use RA_data/HDF5 directory if True
@type load_hdf : bool
@para auto : take all files found
@type auto : bool
@return: list of str
"""
# Get the data files to be processed
self.logger.debug("select_data_files: looking in %s", datapath)
if name_pattern:
name,extent = os.path.splitext(name_pattern)
if extent.isalpha(): # a proper extent with no wildcards
# take name pattern as is
pass
else:
# only one * at front and back of pattern
name_pattern = "*"+name_pattern.rstrip('*')+"*"
else:
# no pattern specified. All files.
name_pattern = "*"
self.logger.debug("select_data_files: for pattern %s", name_pattern)
if datapath:
if re.search('HDF5', datapath):
load_hdf = True
elif re.search('project_data', datapath):
load_hdf = False
datafiles = support.text.select_files(datapath+name_pattern+"[0-9].pkl")
elif re.search('FITS', datapath):
datafiles = support.text.select_files(datapath+name_pattern+".fits")
if load_hdf:
full = datapath+name_pattern+".h*5"
else:
full = datapath+name_pattern
else:
full = self.session_dir + name_pattern
self.logger.debug("select_data_files: from: %s", full)
if auto:
datafiles = glob.glob(full)
else:
datafiles = support.text.select_files(full)
self.logger.debug("select_data_files: found %s", datafiles)
if datafiles == []:
self.logger.error(
"select_data_files: None found. Is the data directory mounted?")
raise RuntimeError('No data files found.')
if type(datafiles) == str:
datafiles = [datafiles]
self.logger.info("select_data_files: to be processed: %s", datafiles)
return datafiles
class Spectrum(Observation):
"""
Class for spectra
"""
def __init__(self):
"""
needs a spectrum attribute
"""
self.logger = logging.getLogger(logger.name+".Spectrum")
def get_num_chans(self, linefreq, bandwidth, max_vel_width):
"""
compute the base 2 number of output channels for the specified resolution
"""
kmpspMHz = 300000./linefreq
BW_kmps = bandwidth*kmpspMHz
est_num_chan_out = BW_kmps/max_vel_width
self.logger.debug("get_num_chans: estimated num chans out = %d",
est_num_chan_out)
return 2**int(math.ceil(math.log(est_num_chan_out,2)))
def reduce_spectrum_channels(self, refval, refpix, delta,
num_chan=1024, axis=0):
"""
Reduce the number of channels in the spectrum.
The default option is to reduce the spectrum to a specified number of
channels with a default of 1024. The input spectrum is presumed to have
2**N channels so that num_chan/num_chan_in is an integer.
If 'spectrum' is an N-D array, then the spectrum axis is given by 'axis'
which defaults to 0.
'delta' is negative for lower sideband or reversed double sideband spectra.
@param spectrum : spectrum values
@type spectrum : list or nparray
@param refval : X-axis value at the reference pixel of 'spectrum'
@type refval : float
@param refpix : reference pixel for 'spectrum'
@type refpix : int
@param delta : interval between pixels on the X-axis
@type delta : float
@param num_chan : optional number of channels to be returned (default: 2^10)
@type num_chan : int
@return: numpy.array
"""
if math.log(num_chan,2) % 1:
raise RuntimeError("num_chan = %d is not a power of 2", num_chan)
if type(self.spectrum) == NP.ndarray:
num_chans_in = self.spectrum.shape[axis]
else:
num_chans_in = len(self.spectrum)
if math.log(num_chans_in,2) % 1:
raise RuntimeError("input spectrum length = %d is not a power of 2",
num_chans_in)
self.logger.debug("reduce_spectrum_channels: %d channels in", num_chans_in)
num_chan_avg = num_chans_in/num_chan
newrefpix = refpix/num_chan_avg
self.logger.debug("reduce_spectrum_channels: refpix from %d to %d",
refpix, newrefpix)
newdelta = delta*num_chan_avg
self.logger.debug("reduce_spectrum_channels: delta from %.3f to %.3f",
delta, newdelta)
newrefval = refval + delta*(num_chan_avg/2 - 1)
self.logger.debug("reduce_spectrum_channels: refval from %.3f to %.3f",
refval, newrefval)
self.logger.debug("reduce_spectrum_channels: averaging %d channels", num_chan_avg)
specout = NP.array([spectrum[index*num_chan_avg:(index+1)*num_chan_avg].mean()
for index in range(num_chan)])
self.logger.debug("reduce_spectrum_channels: %d channels out", num_chan)
return specout, newrefval, newrefpix, newdelta
def get_freq_array(self, bandwidth, n_chans):
"""
Create an array of frequencies for the channels of a backend
@param bandwidth : bandwidth
@type bandwidth : float
@param n_chans : number of channels
@type n_chans : int
@return: frequency of each channel in same units as bandwidth
"""
return NP.arange(n_chans)*float(bandwidth)/n_chans
def freq_to_chan(frequency,bandwidth,n_chans):
"""
Returns the channel number where a given frequency is to be found.
@param frequency : frequency of channel in sane units as bandwidth.
@type frequency : float
@param bandwidth : upper limit of spectrometer passband
@type bandwidth : float
@param n_chans : number of channels in the spectrometer
@type n_chans : int
@return: channel number (int)
"""
if frequency < 0:
frequency = bandwidth + frequency
if frequency > bandwidth:
raise RuntimeError("that frequency is too high.")
return round(float(frequency)/bandwidth*n_chans) % n_chans
def get_smoothed_bandshape(self, degree = None, poly_order=15):
"""
Do a Gaussian smoothing of the spectrum and then fit a polynomial.
Optionally, the raw and smoothed data and the fitted polynomial can be
plotted.
Note
====
``numpy.polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False)``
Least squares polynomial fit.
Fit a polynomial::
p(x) = p[0] * x**deg + ... + p[deg]
of degree deg to points (x, y).
Returns a vector of coefficients p that minimises the squared error.
@param spectrum : input data
@type spectrum : list of float
@param degree : number of samples to smoothed (Gaussian FWHM)
@type degree : int
@param poly_order : order of the polynomial
@type poly_order : int
@param plot : plotting option
@type plot : boolean
@return: (polynomial_coefficient, smoothed_spectrum)
"""
if degree == None:
degree = len(self.spectrum)/100
# normalize the spectrum so max is 1 and convert to dB.
max_lev = NP.max(self.spectrum)
norm_spec = NP.array(self.spectrum)/float(max_lev)
norm_spec_db = 10*NP.log10(norm_spec)
# do a Gaussian smoothing
norm_spec_db_smoothed = smoothListGaussian(norm_spec_db, degree=degree)
# deal with the edges by making them equal to the smoothed end points
norm_spec_db_smoothed_resized = NP.ones(len(self.spectrum))
# left end
norm_spec_db_smoothed_resized[0:degree] = norm_spec_db_smoothed[0]
# middle
norm_spec_db_smoothed_resized[degree:degree+len(norm_spec_db_smoothed)] = \
norm_spec_db_smoothed
# right end
norm_spec_db_smoothed_resized[degree+len(norm_spec_db_smoothed):] = \
norm_spec_db_smoothed[-1]
return poly, norm_spec_db_smoothed_resized
# ------------------------ module functions -------------------------------
def examine_text_data_file(filename):
"""
Examine a file to guide ``genfromtxt()``
Things to look for::
* Is there a header line with column names? If not, use argument ``names``.
* Is the number of names equal to the number of columns? If not::
- use argument ``names`` and ``skip_header=1``, or
- use argument ``delimiter`` with a list of column widths
and ``skip_header=1``.
"""
print(examine_text_data_file.__doc__)
fd = open(filename, "r")
lines = fd.readlines()
fd.close()
topline = lines[0].strip().split()
print(" 1 2 3 4 5 6 7")
print("01234567890123456789012345678901234567890123456789012345678901234567890123456789")
print(lines[0].strip())
print(lines[1].strip())
print(" ...")
print(lines[-1].strip())
data = NP.genfromtxt(filename, dtype=None, names=None, skip_header=1, encoding=None)
print("%d datatypes:" % len(data.dtype.fields))
for item in data.dtype.fields:
print(item, data.dtype.fields[item])
def get_obs_dirs(project, station, year, DOY, datafmt=None):
"""
Returns the directories where data and working files are kept
@param project : project code string, e.g., RRL
@type project : str
@param station : DSN station number
@type station : int
@param year : year of observation
@type year : int
@param DOY : day of year of observations
@type DOY : int
@param datafmt : raw data format
@type datafmt : str
"""
#logger.debug("get_obs_dirs: type %s for %s, DSS%d, %4d/%03d",
# datafmt, project, station, year, DOY)
obspath = "dss%2d/%4d/%03d/" % (station,year,DOY)
if project:
projdatapath = "/usr/local/project_data/"+project+"/"+obspath
projworkpath = "/usr/local/projects/"+project+"/Observations/"+obspath
else:
projdatapath = ""
projworkpath = ""
if datafmt:
rawdatapath = "/usr/local/RA_data/"+datafmt+"/"+obspath
else:
rawdatapath = ""
return projdatapath, projworkpath, rawdatapath
# --------- old stuff to be discarded still needed for now ---------------
def old_get_obs_session(project=None, dss=None, date=None, path='proj'):
"""
Provides project, station, year and DOY, asking as needed.
It follows one of several possible paths to get to the session::
proj - path through /usr/local/projects/<project>
hdf5 - path through /usr/local/RA_data/HDF5
fits - path through /usr/local/RA_data/FITS
wvsr - path through /data
@param project : optional name as defined in /usr/local/projects
@type project : str
@param dss : optional station number
@type dss : int
@param date : optional YYYY/DDD
@type date : str
@return: project, DSS, year, DOY.
"""
def get_directory(path):
"""
"""
# only one trailing /
path = path.rstrip('/')+"/*"
logger.debug("get_obs_session:get_directory: from %s", path)
names = glob.glob(path)
if names:
dirs = []
for name in names:
if os.path.isdir(name):
dirs.append(os.path.basename(name))
dirs.sort()
for name in dirs:
print((name), end=' ')
return input('\n>')
else:
return []
def from_wvsr_dir():
"""
this needs to be completed and tested on crab14 or an auto host
"""
session = get_directory(local_dirs.wvsr_dir)
return session
cwd = os.getcwd()
# get the project
if project:
pass
else:
os.chdir(local_dirs.projects_dir)
project = get_directory(local_dirs.projects_dir)
logger.debug("from_wvsr_dir: project is %s", project)
projectpath = local_dirs.projects_dir+project
# get the station
if path[:4].lower() == 'wvsr':
# special call
print("from_wvsr_dir()")
if path[:4].lower() == 'proj':
os.chdir(projectpath+"/Observations/")
elif path[:4].lower() == 'hdf5':
os.chdir(local_dirs.hdf5_dir)
elif path[:4].lower() == 'fits':
os.chdir(local_dirs.fits_dir)
# get the station
if dss:
pass
else:
# This seems odd but get_directory() needs '/' and int does not
station = get_directory(os.getcwd()+"/").rstrip('/')
dss = int(station[-2:])
stationpath = os.getcwd()+"/dss"+str(dss)
# get the date
if date:
items = date.split('/')
year = int(items[0])
DOY = int(items[1])
else:
year = int(get_directory(stationpath))
yearpath = stationpath+"/"+str(year)
DOY = int(get_directory(yearpath))
os.chdir(cwd)
return project, dss, year, DOY
| en | 0.713471 | # -*- coding: utf-8 -*- Modules to support data reduction in Python. The main purpose of the base module ``Data_Reduction`` is to provide a suplerclass with a good set of attributes and methods to cover all common needs. The base module is also able to read data from a text file as a ``numpy`` structured array. This is done with a class called ``DataGetterMixin`` which must be invoked after the base class has been initiated. The module function ``examine_text_data_file()`` reveals the structure of the file(s) that provide the data.. Examples ======== Here we initiate a base class after mixing in the data getter. The first line o the file has column names but the first three columns are all under one name ``UTC`` so we specify column widths to consider the first three columns to be one column. We use the names from the first line of the file, which could have been done with an ``open()``, ``readline()``, and ``close()``:: mixIn(Observation, DataGetterMixin) obs = Observation(dss=28, date="2012/127", project="SolarPatrol") obs.open_datafile('t12127.10', delimiter=[17,16,3,11,7,9,8,2,6], skip_header=1, names="UTC Epoch Chan Tsys Int Az El Diode Level".split()) Now the data getter is already mixed in to Observation so we don't need to do it again. In this case we specify the names of the columns, changing ``Int`` to ``Integr``:: obs2 = Observation(dss=28, date="2012/127", project="SolarPatrol") obs2.open_datafile('t12127.10', skip_header=1, names="Year DOY UTC Epoch Chan Tsys Integr Az El Diode Level".split()) The class Map inherits from DataGetterMixin, so no explicit mixin required:: obsmap = Map(dss=84, date="2020/163", project="SolarPatrol") obsmap.initialize('sim-venus.dat', source="Venus") Let's examine ``obsmap``. We have only one signal column:: In [3]: obsmap.channel.keys() Out[3]: dict_keys(['xl']) In [4]: obsmap.channel['xl'].keys() Out[4]: dict_keys(['freq', 'bw', 'pol', 'ifmode', 'atten', 'power']) # standard Python modules # vector quantization # enable raw_input Tab completion # module logger superclass for a data structure and methods Attributes ========== aliases - (dict) data keys to replace those in original data channel - (dict) signal paths, e.g., different freqs and pols data - (dict) original data, e.g., read from file or database DOY - (int) day of year of observation end - (float) UNIX time at the end latitude - (float) from obs logger - (logging.Logger) longitude - (float) from obs name - (str) user assigned, defaults to YEAR/DOY numdata - (int) number of data samples obs - (AE.DSS) observatory session - (Session) set of observations, parent to Observation session_path - (str) directory for session files start - (float) UNIX time at the beginning year - (int) year of observation **Reserved Column Names** These column names are recognized. They are also the keys for attribute ``data``. These quantities must be present in some form:: unixtime (float) UNIX time in sec chan_name (str) channel name integr (float) integration (exposure) in sec azel (float,float) azimuth and elevation in decimal deg power (float) power level if only a single channel Optional:: diode (float) 0 or power in K (integers OK) level (float) (unidentified -- in ``tlog`` table) cryotemp (float) cryostat temp in K windspeed (float) km/hr winddir (float) deg ambtemp (float) deg C pressure (float) mbar Columns to be computed:: mpldatenum (float) matplotlib ``datenum`` Alternative for ``power``:: tsys (float) system temperature (calibrated power) top (float) alternative for ``tsys`` (used in DSN) vfc_counts (int) VFC counts (rate times ``integr``) Any column with a name which is not a reserved name is assumed to be power-like data from the channel with that name, unless that name is in a list provided to the argument ``ignore`` in the method ``get_data_channels`` of the class ``DataGetterMixin``. Alternative for ``unixtime``:: year (int) year of observation doy (int) day of year utc (str) HH:MM:SS timestr (str) something like 2020/06/14/14:22:21.00 Alternative for ``chan_name``:: chan (int) index in receiver channel names Alternative for ``azel``:: radec (float,float) precessed right ascension in decimal hours and precessed declination in decimal deg radec1950 (float,float) mean right ascension in decimal hours and mean declination in decimal deg at epoch radec2000 (float,float) mean right ascension in decimal hours and mean declination at epoch in decimal deg az (float) azimuth in decimal deg el (float) elevation in decimal deg ra (float) precessed right ascension in decimal hours dec (float) precessed declination in decimal deg ra1950 (float) mean right ascension in decimal hours at epoch dec1950 (float) mean declination in decimal deg at epoch ra2000 (float) mean right ascension in decimal hours at epoch dec2000 (float) mean declination in decimal deg at epoch Notes ===== * The ``data`` structure is a dict. * The value of a ``data`` item is either a numpy array or a object like ``float``, ``int``, or ``str``. * The keys have reserved words defined above and will be lowercase. * Items with other keys may be added, typically by a child class. * Coordinates shall be in pairs, `e.g. ``azel``, ``radec``. (This way you never get one without the other.) Create a base Observation object. This is not meant to be initialized by itself. A subclass generally determines how data are read in. However, method ``initialize()`` provides a basic data read capability using ``numpy.genfromtxt()`` and creates the object's data structure. Args: parent (Session): session to which this observation belongs name (str): an identifier; default is station ID + "obs" dss (int): station number date (str): "YEAR/DOY" project (str): directory under /usr/local/projects # observatory must be specified # deg # deg # give the object a name # the observation was part of some project # the observation was done on some date # accomodate subclass arguments # what I really want to do here is see if this was called by a subclass, # in which case I do not try to get the channel info until this # initialization has finished. # #if hasattr(self, "get_data_channels"): # channels = self, get_data_channels() # self.make_channels(channels) #else: # self.logger.info("__init__: initialize() may now be called") Checks for presence of coordinates in pairs or singles @param longlat : "azel", or "radec", or "radecEPOC" @type longlat : str # 'az' or 'ra' # has epoch # date of observation Checks for separate coordinates and splits if coord pairs Args: data (dict): attribute ``data`` longlat (str): "azel", or "radec", or "radecEPOC" # coords need to be computed from other coords Converts a sequence of alternating real/imag samples to complex @param rawdata : alternating real and imaginary bytes @type rawdata : numpy array of signed int8 @return: numpy array of complex Converts a complex spectrum array and returns two reals with USB and LSB This applies a Hilbert transform to the complex data. Class for a signal path Notes ===== The properties can be accessed as if the class were a dict. Arguments ========= freq:float or int: center frequency in MHz bw:float or int: bandwidth in MHz pol:str: polarization code Class for getting data from a CSV file. Get the data and make a data structure for the observations. This is not included by default in ``__init__()`` to keep it simple for subclasses. Args: filename (str): name only, required; the path is provided delimiter (str): what separates the columns names (bool): the first line has column names skip_header (int) : number of rows to skip # get the data # get the signal columns and names # create Channel objects for the signal properties # create the data structure # compute the offsets from the source center for each data point Opens and reads a data file This is used by ``Malargue`` (one data files) and ``GAVRT`` (one data file for each signal). Args: filename (str): text data file name delimiter (str): separator between columns (default: whitespace) names (bool): file row has column names (default: True) skip_header (int): number of rows to skip at beginning of file Returns: ndarray: Gets or sets the names of the signal columns Column names are separated into metadata and signals. Names in ``ignore`` re ignored. Names in ``aliases`` are replaced. Args: data (ndarray): data read from text file ignore (list of str): columns to ignore; default None Returns: (list of str, list of str): metadata, signals # we use only lower case names Takes a text table with headers and converts it into a numpy ``ndarray``. That means that a column can be extracted using `data[label]`. Args ==== data: (ndarray) the data from the text file metadata: (list of str) the column names for metadata signals: (list of str) the column names for power-like data # get the known columns: #self.logger.debug("make_data_struct: using aliases: %s", self.aliases) # get columns that are not metadata; each has power for a channel #self.logger.debug("make_data_struct: for signal: %s", signal) #if signal in self.aliases.items(): # get the key in 'data' which matches 'value' in 'aliases' # power = data[next(key for key, value in self.aliases.items() # if value == signal)][idx] #else: # power = data[signal] #self.channel[signal]['power'] = power # get UNIX time # look up the equivalent of UNIX time in the data table # compute other convenient forms of time # Python datetime.date # matplotlib.dates date number # figure out how to process the time data columns # compute alternate coordinates # azel exists; compute radec if needed; then radec2000 if needed # ra2000 and dec2000 already exist # coordinates exist; compute back to azimuth and elevation # compute observed RA and dec # in here check for 'radec' Assign properties to the channels. The prop keys are "freq", "pol", and "IFtype". Args: props (dict of dicts): signal channel properties. Class for all the data and methods associated with a raster scan map It is expected that the parent class is a subclass of ``Observation`` already by virtue of it being a superclass of subclass which inherits these methods. Attrs: cfg (dict): data (numpy array): from ``Observation`` logger (logging.Logger): replaces ``Observation`` logger name (str): replaces ``Observation`` name session (Session): source (str): step (float): map step size Determine the stepsize of gridded data This assumes xdec and dec data increase incrementally by 'stepsize'. The sequences may repeat in a sawtooth-like series. The number of 'xdec' and 'dec' points is multiple times the gridsize. Arguments: xy (tuple or list) - X-array and Y-array (default Map.data) # get the absolute value of coordinate intervals # form array of X,Y pairs # expect two clusters (default) # tenths of mdeg # return the non-zero intervals converts a map from observed coordinates to map coordinates If ``step`` is not given then the step size will be the average step size in X and the average step in Y. In this case, the effect is to make a regular grid if the original positions were not exact, i.e., pointing error. @param width : map width in deg @type width : float @param height : map height in deg @type height : float @param step : map step size in X and Y in deg @type step : (float, float) @param power_key : dict key of Z-value @type power_key : str # what is the power-like quantity? # take the first that matches # use the original stepsize compute RA and dec from az and el # setup # format time as (YEAR, DOY.fff) # compute compute RA2000 and dec2000 from observed RA and dec # setup # compute compute apparent RA and dec. from J2000 RA and dec # setup # compute compute azimuth and elevation from apparent right ascension and declination # setup # compute Generates a map in coordinates relative to a source If the source is the default, the position of the Sun will be computed for the time of each sample. IT SEEMS LIKE A GOOD IDEA TO DO THIS FOR PLANETS ALSO. This adds elements with keys ``xdec_offset`` and ``dec_offset`` to the attribute ``data``. @param source : source at map center @type source : ephem source instance @param xdec_ofst : relative X-dec position of sample @type xdec_ofst : float @param dec_ofst : relative dec position of sample @type dec_ofst : float @return: (dxdecs,ddecs) in degrees # hours # degrees # right ascension increases to the left, cross-dec to the right # change list to NP.array Map class without special features for GAVRT and Malargue Most of the methods are mixed in to avoid conflicting with subclasses Create a Map object Args: parent (Session): an observing session to which this belongs name (str): an identifier, like a scan number dss (int): station where the data were taken date (str): date of observation as "YEAR/DOY" project (str): project for which this observation was made Class for raw data This is typically the contents of a data file transcribed into a standard format. It may be the data of one Observation object, or data for multiple Observation objects, or contain part of the data for an Observation object. If the data being curated are not in a standard project, and they are not in a standard place, Initialize a metadata container and data directory Args ==== session (Session): required, unless: path (str) : location of raw data files date # for its methods and attributes Base class for an observing session on a given year and DOY Public Attributes:: doy (int) - day of year for session logger (logging.Logger) - logging.Logger object parent (object) - a data reduction session (mult. observ. sessions) year (int) - doy (int) - project (str) - session_dir (str) - path to results from this session A session usually refers to a telescope, date and project. This will normally define a path to the session directory. initialize data reduction for one observing session Args ==== parent: (object) optional class for a data reduction tool date: (str) required, format YEAR/DOY project: (str) required dss (int) required path (str) optional If `path` is given for a non-standard observing files location, and it does not exist, it will be created. Then the Recording and Observation instances must be directed to where the files are. find or make the sessions directory Args: path (str) - explicit path to files Provide the user with menu to select data files. Finding the right data store is complicated as there are many kinds of data files * If datapath is ...RA_data/HDF5/... then the files could be .h5 (Ashish) or .hdf5 (Dean). * If datapath is ...RA_data/FITS/... then the extent is .fits. * If datapath is ...project_data/... then the extent is .pkl * If datapath is ...projects/... (default) then the extent is probably .csv or .dat or .prd. @param datapath : path to top of the tree where the DSS subdirectories are @type datapath : str @param name_pattern : pattern for selecting file names, e.g. source @type name_pattern : str @param load_hdf : use RA_data/HDF5 directory if True @type load_hdf : bool @para auto : take all files found @type auto : bool @return: list of str # Get the data files to be processed # a proper extent with no wildcards # take name pattern as is # only one * at front and back of pattern # no pattern specified. All files. Class for spectra needs a spectrum attribute compute the base 2 number of output channels for the specified resolution Reduce the number of channels in the spectrum. The default option is to reduce the spectrum to a specified number of channels with a default of 1024. The input spectrum is presumed to have 2**N channels so that num_chan/num_chan_in is an integer. If 'spectrum' is an N-D array, then the spectrum axis is given by 'axis' which defaults to 0. 'delta' is negative for lower sideband or reversed double sideband spectra. @param spectrum : spectrum values @type spectrum : list or nparray @param refval : X-axis value at the reference pixel of 'spectrum' @type refval : float @param refpix : reference pixel for 'spectrum' @type refpix : int @param delta : interval between pixels on the X-axis @type delta : float @param num_chan : optional number of channels to be returned (default: 2^10) @type num_chan : int @return: numpy.array Create an array of frequencies for the channels of a backend @param bandwidth : bandwidth @type bandwidth : float @param n_chans : number of channels @type n_chans : int @return: frequency of each channel in same units as bandwidth Returns the channel number where a given frequency is to be found. @param frequency : frequency of channel in sane units as bandwidth. @type frequency : float @param bandwidth : upper limit of spectrometer passband @type bandwidth : float @param n_chans : number of channels in the spectrometer @type n_chans : int @return: channel number (int) Do a Gaussian smoothing of the spectrum and then fit a polynomial. Optionally, the raw and smoothed data and the fitted polynomial can be plotted. Note ==== ``numpy.polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False)`` Least squares polynomial fit. Fit a polynomial:: p(x) = p[0] * x**deg + ... + p[deg] of degree deg to points (x, y). Returns a vector of coefficients p that minimises the squared error. @param spectrum : input data @type spectrum : list of float @param degree : number of samples to smoothed (Gaussian FWHM) @type degree : int @param poly_order : order of the polynomial @type poly_order : int @param plot : plotting option @type plot : boolean @return: (polynomial_coefficient, smoothed_spectrum) # normalize the spectrum so max is 1 and convert to dB. # do a Gaussian smoothing # deal with the edges by making them equal to the smoothed end points # left end # middle # right end # ------------------------ module functions ------------------------------- Examine a file to guide ``genfromtxt()`` Things to look for:: * Is there a header line with column names? If not, use argument ``names``. * Is the number of names equal to the number of columns? If not:: - use argument ``names`` and ``skip_header=1``, or - use argument ``delimiter`` with a list of column widths and ``skip_header=1``. Returns the directories where data and working files are kept @param project : project code string, e.g., RRL @type project : str @param station : DSN station number @type station : int @param year : year of observation @type year : int @param DOY : day of year of observations @type DOY : int @param datafmt : raw data format @type datafmt : str #logger.debug("get_obs_dirs: type %s for %s, DSS%d, %4d/%03d", # datafmt, project, station, year, DOY) # --------- old stuff to be discarded still needed for now --------------- Provides project, station, year and DOY, asking as needed. It follows one of several possible paths to get to the session:: proj - path through /usr/local/projects/<project> hdf5 - path through /usr/local/RA_data/HDF5 fits - path through /usr/local/RA_data/FITS wvsr - path through /data @param project : optional name as defined in /usr/local/projects @type project : str @param dss : optional station number @type dss : int @param date : optional YYYY/DDD @type date : str @return: project, DSS, year, DOY. # only one trailing / this needs to be completed and tested on crab14 or an auto host # get the project # get the station # special call # get the station # This seems odd but get_directory() needs '/' and int does not # get the date | 3.493397 | 3 |
PyGRB/__init__.py | HughPaynter/PyGRB | 0 | 8713 | """
PyGRB.
A GRB light-curve analysis package.
"""
__version__ = "0.0.5"
__author__ = '<NAME>'
from . import backend
from . import fetch
from . import main
from . import postprocess
from . import preprocess
| """
PyGRB.
A GRB light-curve analysis package.
"""
__version__ = "0.0.5"
__author__ = '<NAME>'
from . import backend
from . import fetch
from . import main
from . import postprocess
from . import preprocess
| en | 0.767558 | PyGRB. A GRB light-curve analysis package. | 0.715536 | 1 |
src/config.py | john9384/PyblogRestAPI | 0 | 8714 | import os
from dotenv import load_dotenv
load_dotenv()
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URI')
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('EMAIL_USERNAME')
MAIL_PASSWORD = <PASSWORD>('EMAIL_PASSWORD')
| import os
from dotenv import load_dotenv
load_dotenv()
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URI')
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('EMAIL_USERNAME')
MAIL_PASSWORD = <PASSWORD>('EMAIL_PASSWORD')
| none | 1 | 1.83995 | 2 |
|
Context_Guided_RelRep/train.py | Huda-Hakami/Context-Guided-Relation-Embeddings | 1 | 8715 | import numpy as np
from wordreps import WordReps
from algebra import cosine, normalize
import tensorflow as tf
import random
from dataset import DataSet
import CGRE_Model
from Eval import eval_SemEval
import sklearn.preprocessing
# ============ End Imports ============
class Training():
def __init__(self):
# Compositional relation embeddings (G1) Hyperparameters
self.batchSize=100
G1_HL=3
G1_Hdim=WR.dim
G1_BN=True #boolean variable T/F for batch normalization on G1 MLP
G1_l2_reg=0.001 # L2 regularization coefficient
self.G1_pkeep=1.0 # 1.0 means no Dropout applied during training on G1
# LSTM pattern encoding (G2) Hyperparameters
G2_HL=1
G2_Hdim=WR.dim
self.G2_pkeep=1.0 # 1.0 means no Dropout applied during training on G2
activ='tanh'
# Create relational model instance
self.RelModel=CGRE_Model.CGRE(activ,self.batchSize)
self.RelModel.G1_model(Ea,G1_BN,G1_HL,G1_Hdim,G1_l2_reg)
self.RelModel.G2_rnn_model(DS.max_length,G2_HL,G2_Hdim)
# --------------------------------------------------
def Train_Model(self):
# Hyperparameters
epochs=500
hist_loss=[]
hist_acc=[]
winn_loss=1e7
win_acc=-1
# Discriminator Hyperparameters (for Rel-Rep-alignment model)
D_HL=0
D_Hdim=WR.dim
D_BN=False # boolean variable T/F for batch normalization on D
self.D_pkeep=1.0 # 1.0 means no Dropout applied during training on the Discriminator D
D_l2_reg=0.001 # L2 regularization coefficient (to perform l2 regularized cross-entropy)
Train = DS.Training_triplesIDs
Train_Relations=set([rel for (a,b,p,w,rel) in Train])
Num_of_Classes=len(Train_Relations)
print ("Number of relation labels for cross-entropy objective=",Num_of_Classes)
# Assign ids to relations
Rel2id={}
i=0
for rel in Train_Relations:
Rel2id[rel]=i
i+=1
Train_dic={}
for (a,b,p,w,rel) in Train:
Train_dic.setdefault((a,b,rel),[])
Train_dic[(a,b,rel)].append((p,w))
Training_patterns=set([p for (_,_,p,_,_) in Train])
print ('Number of training patterns after removing test instances=',len(Training_patterns))
Train_list=list(Train_dic.keys())
print ("Number of training word-pairs (a,b,[(p,w)])",len(Train_list))
self.RelModel.define_loss(D_HL,D_Hdim,D_BN,D_l2_reg,Num_of_Classes)
self.RelModel.optimize()
self.sess=tf.Session()
self.sess.run(tf.global_variables_initializer())
print ("==========================================================================")
for epoch in range(epochs):
# Randomly shuffle training instances for each epoch
random.shuffle(Train_list)
# performance every 20 steps
if epoch%1==0:
Pair_Embeddings=self.Gen_Pair_Embeddings()
acc_1,corr_1=eval_SemEval(Pair_Embeddings,'Test')
acc_2,corr_2=eval_SemEval(Pair_Embeddings,'Valid')
acc_3,corr_3=eval_SemEval(Pair_Embeddings,'All')
print ("Epoch:%d, Acc_Test:%f, Acc_Valid:%f, Acc_All:%f, Corr_Test:%f, Corr_Valid:%f, Corr_All:%f"%(epoch,acc_1,acc_2,acc_3,corr_1,corr_2,corr_3))
hist_acc.append(acc_2)
# For early stopping
if acc_2>win_acc:
win_acc=acc_2
self.Save_Trained_Model()
print ("Parameters and Pair-Embeddings are changed...")
best_epoch=epoch
patient_cnt=0
else:
patient_cnt+=1
if patient_cnt>10:
print ("early stopping ... epoch number %d"%epoch)
print ("Winner acc:%f at epoch:%d"%(win_acc,best_epoch))
# break
# Training
for minibatch in next_batch(self.batchSize,Train_list):
a_ids,b_ids,labels=shred_tuples(minibatch)
Train_Y=np.zeros((len(minibatch),Num_of_Classes))
for i,rel in enumerate(labels):
rel_id=Rel2id[rel]
Train_Y[i,rel_id]=1.0
train_data={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:self.G1_pkeep,\
self.RelModel.is_training:True,self.RelModel.D_pkeep:self.D_pkeep}
minibatch_patterns=[Train_dic[(a,b,rel)] for (a,b,rel) in minibatch]
max_num_of_patterns,pattern_seq,early_stop,weights=Pattern_Sequences(a_ids,b_ids,minibatch_patterns)
train_data[self.RelModel.max_num_of_patterns]=max_num_of_patterns
train_data[self.RelModel.patterns_ids]=pattern_seq
train_data[self.RelModel.early_stop]=early_stop
train_data[self.RelModel.weights]=weights
train_data[self.RelModel.G2_pkeep]=self.G2_pkeep
# Loss options
train_data[self.RelModel.Y_]=Train_Y
self.sess.run(self.RelModel.train_step,feed_dict=train_data)
# --------------------------------------------------
def Save_Trained_Model(self):
Pair_Embeddings_dic=self.Gen_Pair_Embeddings()
np.save("res/Pair_Embeddings.npy",Pair_Embeddings_dic)
# --------------------------------------------------
def Gen_Pair_Embeddings(self):
word_pairs_ids=[(DS.word2id[a],DS.word2id[b]) for (a,b) in DS.Test_Pairs]
a_ids=[t[0] for t in word_pairs_ids]
b_ids=[t[1] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings1=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings1=sklearn.preprocessing.normalize(Pair_Embeddings1,axis=1,norm='l2') #L2 norm of r(a,b)
a_ids=[t[1] for t in word_pairs_ids]
b_ids=[t[0] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings2=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings2=sklearn.preprocessing.normalize(Pair_Embeddings2,axis=1,norm='l2') #L2 norm of r(b,a)
Pair_Embeddings=np.hstack((Pair_Embeddings1,Pair_Embeddings2))
Pair_Embeddings_dic={}
for i,(a,b) in enumerate(DS.Test_Pairs):
Pair_Embeddings_dic[(a,b)]=Pair_Embeddings[i]
return Pair_Embeddings_dic
# ============ End of the Evaluation class ============
def next_batch(batchSize,data):
# loop over our dataset in mini-batches of size `batchSize`
for i in np.arange(0, len(data), batchSize):
# yield the current batched data
yield data[i:i + batchSize]
# -------------------------------------------------------
def shred_tuples(tuples):
a_ids=[t[0] for t in tuples]
b_ids=[t[1] for t in tuples]
labels=[t[2] for t in tuples]
return a_ids,b_ids,labels
# -------------------------------------------------------
def Pattern_Sequences(a_ids,b_ids,minibatch_patterns):
max_num_of_patterns=np.max([len(L) for L in minibatch_patterns])
min_num_of_patterns=np.min([len(L) for L in minibatch_patterns])
# print ("Max num of patterns:",max_num_of_patterns)
# print ("Min num of patterns:",min_num_of_patterns)
pattern_seq=np.zeros((len(a_ids)*max_num_of_patterns,DS.max_length+2),dtype=int) #+2 is for the targeted two entities a and b
early_stop=[0 for i in range(len(a_ids)*max_num_of_patterns)]
weights=[0.0 for i in range(len(a_ids)*max_num_of_patterns)]
for i in range(len(a_ids)):
set_of_patterns=minibatch_patterns[i]
for j in range(max_num_of_patterns):
if j<len(set_of_patterns):
pattern_id,w=set_of_patterns[j][0],set_of_patterns[j][1]
pattern=DS.id2Patterns[pattern_id]
words=pattern.strip().split(' ')
words.insert(0,DS.id2word[a_ids[i]])
words.append(DS.id2word[b_ids[i]])
early_stop[(i*max_num_of_patterns)+j]=len(words)
weights[(i*max_num_of_patterns)+j]=w
for k,word in enumerate(words):
pattern_seq[(i*max_num_of_patterns)+j,k]=DS.word2id[word]
return max_num_of_patterns,pattern_seq,early_stop,weights
# -----------------------------------------------------------
if __name__=="__main__":
'''
Word Embeddings
'''
pretrained_glove_300=("../glove.6B.300d.zip","glove",300)
WR=WordReps()
norm=1
standardise=0
WR.Read_Embeddings_zip_file(pretrained_glove_300,norm,standardise)
WR.vects['<PAD>']=np.zeros(WR.dim)
# WR.vects['X']=np.random.rand(WR.dim)
# WR.vects['Y']=np.random.rand(WR.dim)
WR.vects['X']=np.random.normal(size=(WR.dim)).astype('float32')
WR.vects['Y']=np.random.normal(size=(WR.dim)).astype('float32')
'''
Dataset
'''
corpus='Wikipedia_English'
Train_dataset=('DiffVec',"DiffVec_Pairs")
Test_dataset=('SemEval',"SemEval_Pairs.txt")
labels_type='proxy'
Reverse_pairs=True
DS=DataSet(corpus,Train_dataset,Test_dataset,labels_type,Reverse_pairs)
id2Patterns="../Relational_Patterns/Patterns_Xmid5Y"
Patterns_per_pair="../Relational_Patterns/Patterns_Xmid5Y_PerPair"
DS.Retrieve_Patterns(id2Patterns,Patterns_per_pair)
Ea=DS.Generate_Embedding_Matrix(WR)
'''
Training & Evaluation
'''
Eval=Training()
Eval.Train_Model()
| import numpy as np
from wordreps import WordReps
from algebra import cosine, normalize
import tensorflow as tf
import random
from dataset import DataSet
import CGRE_Model
from Eval import eval_SemEval
import sklearn.preprocessing
# ============ End Imports ============
class Training():
def __init__(self):
# Compositional relation embeddings (G1) Hyperparameters
self.batchSize=100
G1_HL=3
G1_Hdim=WR.dim
G1_BN=True #boolean variable T/F for batch normalization on G1 MLP
G1_l2_reg=0.001 # L2 regularization coefficient
self.G1_pkeep=1.0 # 1.0 means no Dropout applied during training on G1
# LSTM pattern encoding (G2) Hyperparameters
G2_HL=1
G2_Hdim=WR.dim
self.G2_pkeep=1.0 # 1.0 means no Dropout applied during training on G2
activ='tanh'
# Create relational model instance
self.RelModel=CGRE_Model.CGRE(activ,self.batchSize)
self.RelModel.G1_model(Ea,G1_BN,G1_HL,G1_Hdim,G1_l2_reg)
self.RelModel.G2_rnn_model(DS.max_length,G2_HL,G2_Hdim)
# --------------------------------------------------
def Train_Model(self):
# Hyperparameters
epochs=500
hist_loss=[]
hist_acc=[]
winn_loss=1e7
win_acc=-1
# Discriminator Hyperparameters (for Rel-Rep-alignment model)
D_HL=0
D_Hdim=WR.dim
D_BN=False # boolean variable T/F for batch normalization on D
self.D_pkeep=1.0 # 1.0 means no Dropout applied during training on the Discriminator D
D_l2_reg=0.001 # L2 regularization coefficient (to perform l2 regularized cross-entropy)
Train = DS.Training_triplesIDs
Train_Relations=set([rel for (a,b,p,w,rel) in Train])
Num_of_Classes=len(Train_Relations)
print ("Number of relation labels for cross-entropy objective=",Num_of_Classes)
# Assign ids to relations
Rel2id={}
i=0
for rel in Train_Relations:
Rel2id[rel]=i
i+=1
Train_dic={}
for (a,b,p,w,rel) in Train:
Train_dic.setdefault((a,b,rel),[])
Train_dic[(a,b,rel)].append((p,w))
Training_patterns=set([p for (_,_,p,_,_) in Train])
print ('Number of training patterns after removing test instances=',len(Training_patterns))
Train_list=list(Train_dic.keys())
print ("Number of training word-pairs (a,b,[(p,w)])",len(Train_list))
self.RelModel.define_loss(D_HL,D_Hdim,D_BN,D_l2_reg,Num_of_Classes)
self.RelModel.optimize()
self.sess=tf.Session()
self.sess.run(tf.global_variables_initializer())
print ("==========================================================================")
for epoch in range(epochs):
# Randomly shuffle training instances for each epoch
random.shuffle(Train_list)
# performance every 20 steps
if epoch%1==0:
Pair_Embeddings=self.Gen_Pair_Embeddings()
acc_1,corr_1=eval_SemEval(Pair_Embeddings,'Test')
acc_2,corr_2=eval_SemEval(Pair_Embeddings,'Valid')
acc_3,corr_3=eval_SemEval(Pair_Embeddings,'All')
print ("Epoch:%d, Acc_Test:%f, Acc_Valid:%f, Acc_All:%f, Corr_Test:%f, Corr_Valid:%f, Corr_All:%f"%(epoch,acc_1,acc_2,acc_3,corr_1,corr_2,corr_3))
hist_acc.append(acc_2)
# For early stopping
if acc_2>win_acc:
win_acc=acc_2
self.Save_Trained_Model()
print ("Parameters and Pair-Embeddings are changed...")
best_epoch=epoch
patient_cnt=0
else:
patient_cnt+=1
if patient_cnt>10:
print ("early stopping ... epoch number %d"%epoch)
print ("Winner acc:%f at epoch:%d"%(win_acc,best_epoch))
# break
# Training
for minibatch in next_batch(self.batchSize,Train_list):
a_ids,b_ids,labels=shred_tuples(minibatch)
Train_Y=np.zeros((len(minibatch),Num_of_Classes))
for i,rel in enumerate(labels):
rel_id=Rel2id[rel]
Train_Y[i,rel_id]=1.0
train_data={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:self.G1_pkeep,\
self.RelModel.is_training:True,self.RelModel.D_pkeep:self.D_pkeep}
minibatch_patterns=[Train_dic[(a,b,rel)] for (a,b,rel) in minibatch]
max_num_of_patterns,pattern_seq,early_stop,weights=Pattern_Sequences(a_ids,b_ids,minibatch_patterns)
train_data[self.RelModel.max_num_of_patterns]=max_num_of_patterns
train_data[self.RelModel.patterns_ids]=pattern_seq
train_data[self.RelModel.early_stop]=early_stop
train_data[self.RelModel.weights]=weights
train_data[self.RelModel.G2_pkeep]=self.G2_pkeep
# Loss options
train_data[self.RelModel.Y_]=Train_Y
self.sess.run(self.RelModel.train_step,feed_dict=train_data)
# --------------------------------------------------
def Save_Trained_Model(self):
Pair_Embeddings_dic=self.Gen_Pair_Embeddings()
np.save("res/Pair_Embeddings.npy",Pair_Embeddings_dic)
# --------------------------------------------------
def Gen_Pair_Embeddings(self):
word_pairs_ids=[(DS.word2id[a],DS.word2id[b]) for (a,b) in DS.Test_Pairs]
a_ids=[t[0] for t in word_pairs_ids]
b_ids=[t[1] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings1=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings1=sklearn.preprocessing.normalize(Pair_Embeddings1,axis=1,norm='l2') #L2 norm of r(a,b)
a_ids=[t[1] for t in word_pairs_ids]
b_ids=[t[0] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings2=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings2=sklearn.preprocessing.normalize(Pair_Embeddings2,axis=1,norm='l2') #L2 norm of r(b,a)
Pair_Embeddings=np.hstack((Pair_Embeddings1,Pair_Embeddings2))
Pair_Embeddings_dic={}
for i,(a,b) in enumerate(DS.Test_Pairs):
Pair_Embeddings_dic[(a,b)]=Pair_Embeddings[i]
return Pair_Embeddings_dic
# ============ End of the Evaluation class ============
def next_batch(batchSize,data):
# loop over our dataset in mini-batches of size `batchSize`
for i in np.arange(0, len(data), batchSize):
# yield the current batched data
yield data[i:i + batchSize]
# -------------------------------------------------------
def shred_tuples(tuples):
a_ids=[t[0] for t in tuples]
b_ids=[t[1] for t in tuples]
labels=[t[2] for t in tuples]
return a_ids,b_ids,labels
# -------------------------------------------------------
def Pattern_Sequences(a_ids,b_ids,minibatch_patterns):
max_num_of_patterns=np.max([len(L) for L in minibatch_patterns])
min_num_of_patterns=np.min([len(L) for L in minibatch_patterns])
# print ("Max num of patterns:",max_num_of_patterns)
# print ("Min num of patterns:",min_num_of_patterns)
pattern_seq=np.zeros((len(a_ids)*max_num_of_patterns,DS.max_length+2),dtype=int) #+2 is for the targeted two entities a and b
early_stop=[0 for i in range(len(a_ids)*max_num_of_patterns)]
weights=[0.0 for i in range(len(a_ids)*max_num_of_patterns)]
for i in range(len(a_ids)):
set_of_patterns=minibatch_patterns[i]
for j in range(max_num_of_patterns):
if j<len(set_of_patterns):
pattern_id,w=set_of_patterns[j][0],set_of_patterns[j][1]
pattern=DS.id2Patterns[pattern_id]
words=pattern.strip().split(' ')
words.insert(0,DS.id2word[a_ids[i]])
words.append(DS.id2word[b_ids[i]])
early_stop[(i*max_num_of_patterns)+j]=len(words)
weights[(i*max_num_of_patterns)+j]=w
for k,word in enumerate(words):
pattern_seq[(i*max_num_of_patterns)+j,k]=DS.word2id[word]
return max_num_of_patterns,pattern_seq,early_stop,weights
# -----------------------------------------------------------
if __name__=="__main__":
'''
Word Embeddings
'''
pretrained_glove_300=("../glove.6B.300d.zip","glove",300)
WR=WordReps()
norm=1
standardise=0
WR.Read_Embeddings_zip_file(pretrained_glove_300,norm,standardise)
WR.vects['<PAD>']=np.zeros(WR.dim)
# WR.vects['X']=np.random.rand(WR.dim)
# WR.vects['Y']=np.random.rand(WR.dim)
WR.vects['X']=np.random.normal(size=(WR.dim)).astype('float32')
WR.vects['Y']=np.random.normal(size=(WR.dim)).astype('float32')
'''
Dataset
'''
corpus='Wikipedia_English'
Train_dataset=('DiffVec',"DiffVec_Pairs")
Test_dataset=('SemEval',"SemEval_Pairs.txt")
labels_type='proxy'
Reverse_pairs=True
DS=DataSet(corpus,Train_dataset,Test_dataset,labels_type,Reverse_pairs)
id2Patterns="../Relational_Patterns/Patterns_Xmid5Y"
Patterns_per_pair="../Relational_Patterns/Patterns_Xmid5Y_PerPair"
DS.Retrieve_Patterns(id2Patterns,Patterns_per_pair)
Ea=DS.Generate_Embedding_Matrix(WR)
'''
Training & Evaluation
'''
Eval=Training()
Eval.Train_Model()
| en | 0.62083 | # ============ End Imports ============ # Compositional relation embeddings (G1) Hyperparameters #boolean variable T/F for batch normalization on G1 MLP # L2 regularization coefficient # 1.0 means no Dropout applied during training on G1 # LSTM pattern encoding (G2) Hyperparameters # 1.0 means no Dropout applied during training on G2 # Create relational model instance # -------------------------------------------------- # Hyperparameters # Discriminator Hyperparameters (for Rel-Rep-alignment model) # boolean variable T/F for batch normalization on D # 1.0 means no Dropout applied during training on the Discriminator D # L2 regularization coefficient (to perform l2 regularized cross-entropy) # Assign ids to relations # Randomly shuffle training instances for each epoch # performance every 20 steps # For early stopping # break # Training # Loss options # -------------------------------------------------- # -------------------------------------------------- # Pair_Embeddings1=sklearn.preprocessing.normalize(Pair_Embeddings1,axis=1,norm='l2') #L2 norm of r(a,b) # Pair_Embeddings2=sklearn.preprocessing.normalize(Pair_Embeddings2,axis=1,norm='l2') #L2 norm of r(b,a) # ============ End of the Evaluation class ============ # loop over our dataset in mini-batches of size `batchSize` # yield the current batched data # ------------------------------------------------------- # ------------------------------------------------------- # print ("Max num of patterns:",max_num_of_patterns) # print ("Min num of patterns:",min_num_of_patterns) #+2 is for the targeted two entities a and b # ----------------------------------------------------------- Word Embeddings # WR.vects['X']=np.random.rand(WR.dim) # WR.vects['Y']=np.random.rand(WR.dim) Dataset Training & Evaluation | 2.436919 | 2 |
synch_integrate.py | HerculesJack/grtrans | 25 | 8716 | <filename>synch_integrate.py
from radtrans_integrate import radtrans_integrate
from polsynchemis import polsynchemis
import numpy as np
import scipy.integrate
# calculate synchrotron emissivity for given coefficients
def synch_jarho(nu,n,B,T,theta):
if ((np.isscalar(nu)==False) & (np.isscalar(n)==True)):
n = n + np.zeros(len(nu))
B = B + np.zeros(len(nu))
T = T + np.zeros(len(nu))
theta = theta + np.zeros(len(nu))
e = polsynchemis.polsynchth(nu,n,B,T,theta)
j = e[:,:4]; a = e[:,4:8]; rho = e[:,8:]
return j,a,rho
def run(x,jarr,aarr,rhoarr,sphstokes=-1,atol=1e-8,rtol=1e-6,max_tau=10):
if sphstokes==-1:
method=0
else:
method=3
radtrans_integrate.init_radtrans_integrate_data(method,4,len(x),len(x),max_tau,0.1,atol,rtol,1e-2,100000)
Karr = (np.append(aarr,rhoarr,axis=1))
tau = np.append(0.,scipy.integrate.cumtrapz(Karr[:,0],x))
radtrans_integrate.integrate(x[::-1],jarr[:,:],Karr[:,:],tau,4)
i = radtrans_integrate.intensity.copy()
radtrans_integrate.del_radtrans_integrate_data()
return i
| <filename>synch_integrate.py
from radtrans_integrate import radtrans_integrate
from polsynchemis import polsynchemis
import numpy as np
import scipy.integrate
# calculate synchrotron emissivity for given coefficients
def synch_jarho(nu,n,B,T,theta):
if ((np.isscalar(nu)==False) & (np.isscalar(n)==True)):
n = n + np.zeros(len(nu))
B = B + np.zeros(len(nu))
T = T + np.zeros(len(nu))
theta = theta + np.zeros(len(nu))
e = polsynchemis.polsynchth(nu,n,B,T,theta)
j = e[:,:4]; a = e[:,4:8]; rho = e[:,8:]
return j,a,rho
def run(x,jarr,aarr,rhoarr,sphstokes=-1,atol=1e-8,rtol=1e-6,max_tau=10):
if sphstokes==-1:
method=0
else:
method=3
radtrans_integrate.init_radtrans_integrate_data(method,4,len(x),len(x),max_tau,0.1,atol,rtol,1e-2,100000)
Karr = (np.append(aarr,rhoarr,axis=1))
tau = np.append(0.,scipy.integrate.cumtrapz(Karr[:,0],x))
radtrans_integrate.integrate(x[::-1],jarr[:,:],Karr[:,:],tau,4)
i = radtrans_integrate.intensity.copy()
radtrans_integrate.del_radtrans_integrate_data()
return i
| en | 0.650102 | # calculate synchrotron emissivity for given coefficients | 2.194252 | 2 |
actions/lib/Template_Parser.py | pjimmybrcd/campus_ztp_nps | 0 | 8717 | <gh_stars>0
"""
Copyright 2016 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from jinja2 import Template, Environment, StrictUndefined, UndefinedError, meta
class Template_Parser(object):
def __init__(self, configuration_template_file, variables={}):
''' Loads the configuration file '''
self.profile = ""
self.variables = variables
try:
with open(configuration_template_file, 'r') as f:
self.profile = "".join(line for line in f)
except:
raise IOError("Template file '%s' not found!", configuration_template_file)
def set_variables(self, variables):
''' Sets the variables '''
self.variables = variables
def get_required_variables(self):
''' Returns a set of the required variables in the template '''
return meta.find_undeclared_variables(Environment().parse(self.profile))
def get_parsed_lines(self):
''' Returns a set of lines with all variables filed in '''
try:
return Template(self.profile, undefined=StrictUndefined).render(self.variables)
except UndefinedError as e:
raise Exception(e)
| """
Copyright 2016 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from jinja2 import Template, Environment, StrictUndefined, UndefinedError, meta
class Template_Parser(object):
def __init__(self, configuration_template_file, variables={}):
''' Loads the configuration file '''
self.profile = ""
self.variables = variables
try:
with open(configuration_template_file, 'r') as f:
self.profile = "".join(line for line in f)
except:
raise IOError("Template file '%s' not found!", configuration_template_file)
def set_variables(self, variables):
''' Sets the variables '''
self.variables = variables
def get_required_variables(self):
''' Returns a set of the required variables in the template '''
return meta.find_undeclared_variables(Environment().parse(self.profile))
def get_parsed_lines(self):
''' Returns a set of lines with all variables filed in '''
try:
return Template(self.profile, undefined=StrictUndefined).render(self.variables)
except UndefinedError as e:
raise Exception(e) | en | 0.836286 | Copyright 2016 Brocade Communications Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Loads the configuration file Sets the variables Returns a set of the required variables in the template Returns a set of lines with all variables filed in | 2.116847 | 2 |
lca_writer/data/loader.py | line-mind/lca_writer | 1 | 8718 | <reponame>line-mind/lca_writer
import os
__all__ = ['DATA_FOLDER', 'load_data']
DATA_FOLDER = os.path.dirname(os.path.abspath(__file__))
def load_data(name):
"""
Loads an Excel form from the data folder with the specified name.
Parameters
----------
name : str
The name of the form without file extension.
"""
from ..lca_writer import LCAWriter # to prevent recursive import
p = os.path.join(DATA_FOLDER, name + '.xlsx')
return LCAWriter(p)
| import os
__all__ = ['DATA_FOLDER', 'load_data']
DATA_FOLDER = os.path.dirname(os.path.abspath(__file__))
def load_data(name):
"""
Loads an Excel form from the data folder with the specified name.
Parameters
----------
name : str
The name of the form without file extension.
"""
from ..lca_writer import LCAWriter # to prevent recursive import
p = os.path.join(DATA_FOLDER, name + '.xlsx')
return LCAWriter(p) | en | 0.622142 | Loads an Excel form from the data folder with the specified name. Parameters ---------- name : str The name of the form without file extension. # to prevent recursive import | 2.94347 | 3 |
main.py | Dephilia/pipenv-docker-development | 0 | 8719 | var = "Docker"
print(f"Hello {var} world!")
| var = "Docker"
print(f"Hello {var} world!")
| none | 1 | 1.312097 | 1 |
|
app/v1/utils/mixins.py | pndemo/yummy-recipes-api | 0 | 8720 | <gh_stars>0
""" Model mixin classes for auth, category and recipe modules """
from app import db
# pylint: disable=C0103
# pylint: disable=E1101
class BaseMixin(object):
""" Define the 'BaseModel' mapped to all database tables. """
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
def save(self):
"""Save to database table"""
db.session.add(self)
db.session.commit()
def delete(self):
"""Delete from database table"""
db.session.delete(self)
db.session.commit()
class TimestampMixin(object):
""" Database logging of data manipulation timestamps. """
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(), \
onupdate=db.func.current_timestamp())
| """ Model mixin classes for auth, category and recipe modules """
from app import db
# pylint: disable=C0103
# pylint: disable=E1101
class BaseMixin(object):
""" Define the 'BaseModel' mapped to all database tables. """
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
def save(self):
"""Save to database table"""
db.session.add(self)
db.session.commit()
def delete(self):
"""Delete from database table"""
db.session.delete(self)
db.session.commit()
class TimestampMixin(object):
""" Database logging of data manipulation timestamps. """
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(), \
onupdate=db.func.current_timestamp()) | en | 0.629719 | Model mixin classes for auth, category and recipe modules # pylint: disable=C0103 # pylint: disable=E1101 Define the 'BaseModel' mapped to all database tables. Save to database table Delete from database table Database logging of data manipulation timestamps. | 2.540699 | 3 |
apps/dash-port-analytics/app/ui/tab_map_controls.py | JeroenvdSande/dash-sample-apps | 2,332 | 8721 | <reponame>JeroenvdSande/dash-sample-apps
import dash_core_components as dcc
import dash_html_components as html
from config import strings
def make_tab_port_map_controls(
port_arr: list,
port_val: str,
vessel_types_arr: list,
vessel_type_val: str,
year_arr: list,
year_val: int,
month_arr: list,
month_val: int,
) -> html.Div:
"""
Returns a HTML div of user controls found on top of the map tab.
:param port_arr: list, all possible ports
:param port_val: str, current port value
:param vessel_types_arr: list, all possible vessel types
:param vessel_type_val: str, current vessel type value
:param year_arr: list, all possible years
:param year_val: str, current year value
:param month_arr: list, all possible months
:param month_val: str, current month value
:return: HTML div
"""
return html.Div(
className="tab-port-map-controls",
children=[
html.Div(
className="tab-port-map-single-control-container area-a",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT]
),
dcc.Dropdown(
id="port-map-dropdown-port",
clearable=False,
options=[{"label": port, "value": port} for port in port_arr],
value=port_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-b"),
html.Div(
className="tab-port-map-single-control-container area-c",
children=[
html.Label(
className="control-label", children=[strings.LABEL_VESSEL]
),
dcc.Dropdown(
id="port-map-dropdown-vessel-type",
clearable=False,
options=[
{"label": vessel_type, "value": vessel_type}
for vessel_type in vessel_types_arr
],
value=vessel_type_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-d"),
html.Div(
className="tab-port-map-single-control-container date-grid area-e",
children=[
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label", children=[strings.LABEL_YEAR]
),
dcc.Dropdown(
id="port-map-dropdown-year",
clearable=False,
options=[
{"label": year, "value": year} for year in year_arr
],
value=year_val,
),
],
),
html.Div(
className="tab-port-map-single-control-separator smaller-line"
),
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label",
children=[strings.LABEL_MONTH],
),
dcc.Dropdown(
id="port-map-dropdown-month",
clearable=False,
options=[
{"label": month, "value": month}
for month in month_arr
],
value=month_val,
),
],
),
],
),
],
)
| import dash_core_components as dcc
import dash_html_components as html
from config import strings
def make_tab_port_map_controls(
port_arr: list,
port_val: str,
vessel_types_arr: list,
vessel_type_val: str,
year_arr: list,
year_val: int,
month_arr: list,
month_val: int,
) -> html.Div:
"""
Returns a HTML div of user controls found on top of the map tab.
:param port_arr: list, all possible ports
:param port_val: str, current port value
:param vessel_types_arr: list, all possible vessel types
:param vessel_type_val: str, current vessel type value
:param year_arr: list, all possible years
:param year_val: str, current year value
:param month_arr: list, all possible months
:param month_val: str, current month value
:return: HTML div
"""
return html.Div(
className="tab-port-map-controls",
children=[
html.Div(
className="tab-port-map-single-control-container area-a",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT]
),
dcc.Dropdown(
id="port-map-dropdown-port",
clearable=False,
options=[{"label": port, "value": port} for port in port_arr],
value=port_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-b"),
html.Div(
className="tab-port-map-single-control-container area-c",
children=[
html.Label(
className="control-label", children=[strings.LABEL_VESSEL]
),
dcc.Dropdown(
id="port-map-dropdown-vessel-type",
clearable=False,
options=[
{"label": vessel_type, "value": vessel_type}
for vessel_type in vessel_types_arr
],
value=vessel_type_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-d"),
html.Div(
className="tab-port-map-single-control-container date-grid area-e",
children=[
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label", children=[strings.LABEL_YEAR]
),
dcc.Dropdown(
id="port-map-dropdown-year",
clearable=False,
options=[
{"label": year, "value": year} for year in year_arr
],
value=year_val,
),
],
),
html.Div(
className="tab-port-map-single-control-separator smaller-line"
),
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label",
children=[strings.LABEL_MONTH],
),
dcc.Dropdown(
id="port-map-dropdown-month",
clearable=False,
options=[
{"label": month, "value": month}
for month in month_arr
],
value=month_val,
),
],
),
],
),
],
) | en | 0.404029 | Returns a HTML div of user controls found on top of the map tab. :param port_arr: list, all possible ports :param port_val: str, current port value :param vessel_types_arr: list, all possible vessel types :param vessel_type_val: str, current vessel type value :param year_arr: list, all possible years :param year_val: str, current year value :param month_arr: list, all possible months :param month_val: str, current month value :return: HTML div | 2.771475 | 3 |
subs2srs/gui/state.py | TFarla/subs2srs-cross-platform | 3 | 8722 | <filename>subs2srs/gui/state.py
from typing import List
from subs2srs.core.preview_item import PreviewItem
class StatePreview:
items: List[PreviewItem] = []
inactive_items = set()
def __init__(self):
super().__init__()
self.items = []
self.inactive_items = set()
self.audio = None
class State:
deck_name = None
sub1_file = "/Users/thomasfarla/Documents/subs2srs-cross-platform/tests/fixtures/in.srt"
sub2_file = None
video_file = "/Users/thomasfarla/Documents/subs2srs-cross-platform/tests/fixtures/in.mkv"
output_file = "/Users/thomasfarla/Documents/test-subs"
preview = StatePreview()
| <filename>subs2srs/gui/state.py
from typing import List
from subs2srs.core.preview_item import PreviewItem
class StatePreview:
items: List[PreviewItem] = []
inactive_items = set()
def __init__(self):
super().__init__()
self.items = []
self.inactive_items = set()
self.audio = None
class State:
deck_name = None
sub1_file = "/Users/thomasfarla/Documents/subs2srs-cross-platform/tests/fixtures/in.srt"
sub2_file = None
video_file = "/Users/thomasfarla/Documents/subs2srs-cross-platform/tests/fixtures/in.mkv"
output_file = "/Users/thomasfarla/Documents/test-subs"
preview = StatePreview()
| none | 1 | 2.324013 | 2 |
|
sync_ends/main.py | nirav1997/sync_ends | 0 | 8723 | <reponame>nirav1997/sync_ends
import sys
sys.path.append("..")
from src.sync_ends_service import SyncEnd
from src.parser import Parser
def main():
# get the arguments from commadn line
parser = Parser()
collection_name, api_key, trigger_interval, slack_channel, slack_token = parser.get_argumenets()
sync_end = SyncEnd(api_key, collection_name, trigger_interval, slack_channel, slack_token)
try:
sync_end.start()
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| import sys
sys.path.append("..")
from src.sync_ends_service import SyncEnd
from src.parser import Parser
def main():
# get the arguments from commadn line
parser = Parser()
collection_name, api_key, trigger_interval, slack_channel, slack_token = parser.get_argumenets()
sync_end = SyncEnd(api_key, collection_name, trigger_interval, slack_channel, slack_token)
try:
sync_end.start()
except Exception as e:
print(e)
if __name__ == "__main__":
main() | en | 0.637058 | # get the arguments from commadn line | 2.144802 | 2 |
graphql_compiler/compiler/workarounds/orientdb_query_execution.py | 0xflotus/graphql-compiler | 0 | 8724 | # Copyright 2018-present Kensho Technologies, LLC.
"""Workarounds for OrientDB scheduler issue that causes poor query planning for certain queries.
For purposes of query planning, the OrientDB query planner ignores "where:" clauses
that hit indexes but do not use the "=" operator. For example, "CONTAINS" can be used to check
that a field covered by an index is in a specified list of values, and can therefore be covered
by an index, but OrientDB will ignore this. When no equality ("=") checks on indexed columns
are present, OrientDB will generate a query plan that starts execution at the class with
lowest cardinality, which can lead to excessive numbers of scanned and discarded records.
Assuming the query planner creates a query plan where a location with CONTAINS is
the first in the execution order, the execution system will apply indexes
to speed up this operation. Therefore, it's sufficient to trick the query planner into
always creating such a query plan, even though it thinks indexes cannot be used in the query.
Valid query execution start points for the OrientDB query planner must satisfy the following:
- Must not be "optional: true".
- Must not have a "while:" clause nor follow a location that has one.
- Must have a "class:" defined. This class is used for cardinality estimation, and to
look for available indexes that may cover any "where:" clause that may be present.
The optimizations in this file improve performance by enabling execution start points according
to the following assumptions:
1. Start points with "where:" clauses that reference only local fields (i.e. not tagged values
from other query locations) are always better than start points without a "where:".
This is because the filter will have to be applied one way or the other, so we might as well
apply it early.
2. If no such start points are available, we'd like to make available as many start points
as possible, since we'd like OrientDB to start at the start point whose class has
the lowest possible cardinality.
The process of applying the optimizations is as follows:
- Exclude and ignore all query steps that are inside a fold, optional, or recursion scope,
or have a "where:" clause that references a non-local (i.e. tagged) field.
- Find all remaining query steps with "where:" clauses that reference only local fields.
- If any are found, we guide our actions from assumption 1 above:
- Ensure they have a defined "class:" -- i.e. the OrientDB scheduler will consider them
valid start points.
- Then, prune all other query steps (ones without such "where:" clauses) by removing their
"class:" clause, making them invalid as query start points for OrientDB's scheduler.
- If none are found, we guide our actions from assumption 2 above:
- Ensure that all query points not inside fold, optional, or recursion scope contain
a "class:" clause. That increases the number of available query start points,
so OrientDB can choose the start point of lowest cardinality.
"""
from ..blocks import CoerceType, QueryRoot, Recurse, Traverse
from ..expressions import ContextField, ContextFieldExistence
from ..helpers import get_only_element_from_collection
from ..ir_lowering_match.utils import convert_coerce_type_and_add_to_where_block
def _is_local_filter(filter_block):
"""Return True if the Filter block references no non-local fields, and False otherwise."""
# We need the "result" value of this function to be mutated within the "visitor_fn".
# Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here:
# https://www.python.org/dev/peps/pep-3104/
# Instead, we use a dict to store the value we need mutated, since the "visitor_fn"
# can mutate state in the parent scope, but not rebind variables in it without "nonlocal".
# TODO(predrag): Revisit this if we drop support for Python 2.
result = {
'is_local_filter': True
}
filter_predicate = filter_block.predicate
def visitor_fn(expression):
"""Expression visitor function that looks for uses of non-local fields."""
non_local_expression_types = (ContextField, ContextFieldExistence)
if isinstance(expression, non_local_expression_types):
result['is_local_filter'] = False
# Don't change the expression.
return expression
filter_predicate.visit_and_update(visitor_fn)
return result['is_local_filter']
def _classify_query_locations(match_query):
"""Classify query locations into three groups: preferred, eligible, ineligible.
- Ineligible locations are ones that cannot be the starting point of query execution.
These include locations within recursions, locations that are the target of
an optional traversal, and locations with an associated "where:" clause with non-local filter.
- Preferred locations are ones that are eligible to be the starting point, and also have
an associated "where:" clause that references no non-local fields -- only local fields,
literals, and variables.
- Eligible locations are all locations that do not fall into either of these two categories.
Args:
match_query: MatchQuery object describing the query being analyzed for optimization
Returns:
tuple (preferred, eligible, ineligible) where each element is a set of Location objects.
The three sets are disjoint.
"""
preferred_locations = set()
eligible_locations = set()
ineligible_locations = set()
# Any query must have at least one traversal with at least one step.
# The first step in this traversal must be a QueryRoot.
first_match_step = match_query.match_traversals[0][0]
if not isinstance(first_match_step.root_block, QueryRoot):
raise AssertionError(u'First step of first traversal unexpectedly was not QueryRoot: '
u'{} {}'.format(first_match_step, match_query))
# The first step in the first traversal cannot possibly be inside an optional, recursion,
# or fold. Its location is always an eligible start location for a query.
# We need to determine whether it is merely eligible, or actually a preferred location.
if first_match_step.where_block is not None:
if _is_local_filter(first_match_step.where_block):
preferred_locations.add(first_match_step.as_block.location)
else:
# TODO(predrag): Fix once we have a proper fix for tag-and-filter in the same scope.
# Either the locally-scoped tag will have to generate a LocalField
# instead of a ContextField, or we'll have to rework the local filter
# detection code in this module.
raise AssertionError(u'The first step of the first traversal somehow had a non-local '
u'filter. This should not be possible, since there is nowhere '
u'for the tagged value to have come from. Values: {} {}'
.format(first_match_step, match_query))
else:
eligible_locations.add(first_match_step.as_block.location)
# This loop will repeat the analysis of the first step of the first traversal.
# QueryRoots other than the first are required to always be at a location whose status
# (preferred / eligible / ineligible) is already known. Since we already processed
# the first QueryRoot above, the rest of the loop can assume all QueryRoots are like that.
for current_traversal in match_query.match_traversals:
for match_step in current_traversal:
current_step_location = match_step.as_block.location
if isinstance(match_step.root_block, QueryRoot):
already_encountered_location = any((
current_step_location in preferred_locations,
current_step_location in eligible_locations,
current_step_location in ineligible_locations,
))
if not already_encountered_location:
raise AssertionError(u'Unexpectedly encountered a location in QueryRoot whose '
u'status has not been determined: {} {} {}'
.format(current_step_location, match_step, match_query))
at_eligible_or_preferred_location = (
current_step_location in preferred_locations or
current_step_location in eligible_locations)
# This location has already been encountered and processed.
# Other than setting the "at_eligible_or_preferred_location" state for the sake of
# the following MATCH steps, there is nothing further to be done.
continue
elif isinstance(match_step.root_block, Recurse):
# All Recurse blocks cause locations within to be ineligible.
at_eligible_or_preferred_location = False
elif isinstance(match_step.root_block, Traverse):
# Optional Traverse blocks cause locations within to be ineligible.
# Non-optional Traverse blocks do not change the eligibility of locations within:
# if the pre-Traverse location was eligible, so will the location within,
# and if it was not eligible, neither will the location within.
if match_step.root_block.optional:
at_eligible_or_preferred_location = False
else:
raise AssertionError(u'Unreachable condition reached: {} {} {}'
.format(match_step.root_block, match_step, match_query))
if not at_eligible_or_preferred_location:
ineligible_locations.add(current_step_location)
elif match_step.where_block is not None:
if _is_local_filter(match_step.where_block):
# This location has a local filter, and is not otherwise ineligible (it's not
# in a recursion etc.). Therefore, it's a preferred query start location.
preferred_locations.add(current_step_location)
else:
# Locations with non-local filters are never eligible locations, since they
# depend on another location being executed before them.
ineligible_locations.add(current_step_location)
else:
# No local filtering (i.e. not preferred), but also not ineligible. Eligible it is.
eligible_locations.add(current_step_location)
return preferred_locations, eligible_locations, ineligible_locations
def _calculate_type_bound_at_step(match_step):
"""Return the GraphQL type bound at the given step, or None if no bound is given."""
current_type_bounds = []
if isinstance(match_step.root_block, QueryRoot):
# The QueryRoot start class is a type bound.
current_type_bounds.extend(match_step.root_block.start_class)
if match_step.coerce_type_block is not None:
# The CoerceType target class is also a type bound.
current_type_bounds.extend(match_step.coerce_type_block.target_class)
if current_type_bounds:
# A type bound exists. Assert that there is exactly one bound, defined in precisely one way.
return get_only_element_from_collection(current_type_bounds)
else:
# No type bound exists at this MATCH step.
return None
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
def _expose_only_preferred_locations(match_query, location_types, coerced_locations,
preferred_locations, eligible_locations):
"""Return a MATCH query where only preferred locations are valid as query start locations."""
preferred_location_types = dict()
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in preferred_locations:
# This location is preferred. We have to make sure that at least one occurrence
# of this location in the MATCH query has an associated "class:" clause,
# which would be generated by a type bound at the corresponding MATCH step.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = preferred_location_types.get(current_step_location, None)
if previous_type_bound is not None:
# The location is already valid. If so, make sure that this step either does
# not have any type bounds (e.g. via QueryRoot or CoerceType blocks),
# or has type bounds that match the previously-decided type bound.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
else:
# The location is not yet known to be valid. If it does not have
# a type bound in this MATCH step, add a type coercion to the type
# registered in "location_types".
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_step = match_step._replace(
coerce_type_block=CoerceType({current_type_bound}))
preferred_location_types[current_step_location] = current_type_bound
elif current_step_location in eligible_locations:
# This location is eligible, but not preferred. We have not make sure
# none of the MATCH steps with this location have type bounds, and therefore
# will not produce a corresponding "class:" clause in the resulting MATCH query.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is not None:
# There is a type bound here that we need to neutralize.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
if (current_step_location not in coerced_locations or
previous_type_bound is not None):
# The type bound here is already implied by the GraphQL query structure,
# or has already been applied at a previous occurrence of this location.
# We can simply delete the QueryRoot / CoerceType blocks that impart it.
if isinstance(match_step.root_block, QueryRoot):
new_root_block = None
else:
new_root_block = match_step.root_block
new_step = match_step._replace(
root_block=new_root_block, coerce_type_block=None)
else:
# The type bound here is not already implied by the GraphQL query structure.
# This should only be possible via a CoerceType block. Lower this CoerceType
# block into a Filter with INSTANCEOF to ensure the resulting query has the
# same semantics, while making the location invalid as a query start point.
if (isinstance(match_step.root_block, QueryRoot) or
match_step.coerce_type_block is None):
raise AssertionError(u'Unexpected MATCH step applying a type bound not '
u'already implied by the GraphQL query structure: '
u'{} {}'.format(match_step, match_query))
new_where_block = convert_coerce_type_and_add_to_where_block(
match_step.coerce_type_block, match_step.where_block)
new_step = match_step._replace(
coerce_type_block=None, where_block=new_where_block)
else:
# There is no type bound that OrientDB can find defined at this location.
# No action is necessary.
pass
else:
# This location is neither preferred nor eligible.
# No action is necessary at this location.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def expose_ideal_query_execution_start_points(compound_match_query, location_types,
coerced_locations):
"""Ensure that OrientDB only considers desirable query start points in query planning."""
new_queries = []
for match_query in compound_match_query.match_queries:
location_classification = _classify_query_locations(match_query)
preferred_locations, eligible_locations, _ = location_classification
if preferred_locations:
# Convert all eligible locations into non-eligible ones, by removing
# their "class:" clause. The "class:" clause is provided either by having
# a QueryRoot block or a CoerceType block in the MatchStep corresponding
# to the location. We remove it by converting the class check into
# an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away.
new_query = _expose_only_preferred_locations(
match_query, location_types, coerced_locations,
preferred_locations, eligible_locations)
elif eligible_locations:
# Make sure that all eligible locations have a "class:" clause by adding
# a CoerceType block that is a no-op as guaranteed by the schema. This merely
# ensures that OrientDB is able to use each of these locations as a query start point,
# and will choose the one whose class is of lowest cardinality.
new_query = _expose_all_eligible_locations(
match_query, location_types, eligible_locations)
else:
raise AssertionError(u'This query has no preferred or eligible query start locations. '
u'This is almost certainly a bug: {}'.format(match_query))
new_queries.append(new_query)
return compound_match_query._replace(match_queries=new_queries)
| # Copyright 2018-present Kensho Technologies, LLC.
"""Workarounds for OrientDB scheduler issue that causes poor query planning for certain queries.
For purposes of query planning, the OrientDB query planner ignores "where:" clauses
that hit indexes but do not use the "=" operator. For example, "CONTAINS" can be used to check
that a field covered by an index is in a specified list of values, and can therefore be covered
by an index, but OrientDB will ignore this. When no equality ("=") checks on indexed columns
are present, OrientDB will generate a query plan that starts execution at the class with
lowest cardinality, which can lead to excessive numbers of scanned and discarded records.
Assuming the query planner creates a query plan where a location with CONTAINS is
the first in the execution order, the execution system will apply indexes
to speed up this operation. Therefore, it's sufficient to trick the query planner into
always creating such a query plan, even though it thinks indexes cannot be used in the query.
Valid query execution start points for the OrientDB query planner must satisfy the following:
- Must not be "optional: true".
- Must not have a "while:" clause nor follow a location that has one.
- Must have a "class:" defined. This class is used for cardinality estimation, and to
look for available indexes that may cover any "where:" clause that may be present.
The optimizations in this file improve performance by enabling execution start points according
to the following assumptions:
1. Start points with "where:" clauses that reference only local fields (i.e. not tagged values
from other query locations) are always better than start points without a "where:".
This is because the filter will have to be applied one way or the other, so we might as well
apply it early.
2. If no such start points are available, we'd like to make available as many start points
as possible, since we'd like OrientDB to start at the start point whose class has
the lowest possible cardinality.
The process of applying the optimizations is as follows:
- Exclude and ignore all query steps that are inside a fold, optional, or recursion scope,
or have a "where:" clause that references a non-local (i.e. tagged) field.
- Find all remaining query steps with "where:" clauses that reference only local fields.
- If any are found, we guide our actions from assumption 1 above:
- Ensure they have a defined "class:" -- i.e. the OrientDB scheduler will consider them
valid start points.
- Then, prune all other query steps (ones without such "where:" clauses) by removing their
"class:" clause, making them invalid as query start points for OrientDB's scheduler.
- If none are found, we guide our actions from assumption 2 above:
- Ensure that all query points not inside fold, optional, or recursion scope contain
a "class:" clause. That increases the number of available query start points,
so OrientDB can choose the start point of lowest cardinality.
"""
from ..blocks import CoerceType, QueryRoot, Recurse, Traverse
from ..expressions import ContextField, ContextFieldExistence
from ..helpers import get_only_element_from_collection
from ..ir_lowering_match.utils import convert_coerce_type_and_add_to_where_block
def _is_local_filter(filter_block):
"""Return True if the Filter block references no non-local fields, and False otherwise."""
# We need the "result" value of this function to be mutated within the "visitor_fn".
# Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here:
# https://www.python.org/dev/peps/pep-3104/
# Instead, we use a dict to store the value we need mutated, since the "visitor_fn"
# can mutate state in the parent scope, but not rebind variables in it without "nonlocal".
# TODO(predrag): Revisit this if we drop support for Python 2.
result = {
'is_local_filter': True
}
filter_predicate = filter_block.predicate
def visitor_fn(expression):
"""Expression visitor function that looks for uses of non-local fields."""
non_local_expression_types = (ContextField, ContextFieldExistence)
if isinstance(expression, non_local_expression_types):
result['is_local_filter'] = False
# Don't change the expression.
return expression
filter_predicate.visit_and_update(visitor_fn)
return result['is_local_filter']
def _classify_query_locations(match_query):
"""Classify query locations into three groups: preferred, eligible, ineligible.
- Ineligible locations are ones that cannot be the starting point of query execution.
These include locations within recursions, locations that are the target of
an optional traversal, and locations with an associated "where:" clause with non-local filter.
- Preferred locations are ones that are eligible to be the starting point, and also have
an associated "where:" clause that references no non-local fields -- only local fields,
literals, and variables.
- Eligible locations are all locations that do not fall into either of these two categories.
Args:
match_query: MatchQuery object describing the query being analyzed for optimization
Returns:
tuple (preferred, eligible, ineligible) where each element is a set of Location objects.
The three sets are disjoint.
"""
preferred_locations = set()
eligible_locations = set()
ineligible_locations = set()
# Any query must have at least one traversal with at least one step.
# The first step in this traversal must be a QueryRoot.
first_match_step = match_query.match_traversals[0][0]
if not isinstance(first_match_step.root_block, QueryRoot):
raise AssertionError(u'First step of first traversal unexpectedly was not QueryRoot: '
u'{} {}'.format(first_match_step, match_query))
# The first step in the first traversal cannot possibly be inside an optional, recursion,
# or fold. Its location is always an eligible start location for a query.
# We need to determine whether it is merely eligible, or actually a preferred location.
if first_match_step.where_block is not None:
if _is_local_filter(first_match_step.where_block):
preferred_locations.add(first_match_step.as_block.location)
else:
# TODO(predrag): Fix once we have a proper fix for tag-and-filter in the same scope.
# Either the locally-scoped tag will have to generate a LocalField
# instead of a ContextField, or we'll have to rework the local filter
# detection code in this module.
raise AssertionError(u'The first step of the first traversal somehow had a non-local '
u'filter. This should not be possible, since there is nowhere '
u'for the tagged value to have come from. Values: {} {}'
.format(first_match_step, match_query))
else:
eligible_locations.add(first_match_step.as_block.location)
# This loop will repeat the analysis of the first step of the first traversal.
# QueryRoots other than the first are required to always be at a location whose status
# (preferred / eligible / ineligible) is already known. Since we already processed
# the first QueryRoot above, the rest of the loop can assume all QueryRoots are like that.
for current_traversal in match_query.match_traversals:
for match_step in current_traversal:
current_step_location = match_step.as_block.location
if isinstance(match_step.root_block, QueryRoot):
already_encountered_location = any((
current_step_location in preferred_locations,
current_step_location in eligible_locations,
current_step_location in ineligible_locations,
))
if not already_encountered_location:
raise AssertionError(u'Unexpectedly encountered a location in QueryRoot whose '
u'status has not been determined: {} {} {}'
.format(current_step_location, match_step, match_query))
at_eligible_or_preferred_location = (
current_step_location in preferred_locations or
current_step_location in eligible_locations)
# This location has already been encountered and processed.
# Other than setting the "at_eligible_or_preferred_location" state for the sake of
# the following MATCH steps, there is nothing further to be done.
continue
elif isinstance(match_step.root_block, Recurse):
# All Recurse blocks cause locations within to be ineligible.
at_eligible_or_preferred_location = False
elif isinstance(match_step.root_block, Traverse):
# Optional Traverse blocks cause locations within to be ineligible.
# Non-optional Traverse blocks do not change the eligibility of locations within:
# if the pre-Traverse location was eligible, so will the location within,
# and if it was not eligible, neither will the location within.
if match_step.root_block.optional:
at_eligible_or_preferred_location = False
else:
raise AssertionError(u'Unreachable condition reached: {} {} {}'
.format(match_step.root_block, match_step, match_query))
if not at_eligible_or_preferred_location:
ineligible_locations.add(current_step_location)
elif match_step.where_block is not None:
if _is_local_filter(match_step.where_block):
# This location has a local filter, and is not otherwise ineligible (it's not
# in a recursion etc.). Therefore, it's a preferred query start location.
preferred_locations.add(current_step_location)
else:
# Locations with non-local filters are never eligible locations, since they
# depend on another location being executed before them.
ineligible_locations.add(current_step_location)
else:
# No local filtering (i.e. not preferred), but also not ineligible. Eligible it is.
eligible_locations.add(current_step_location)
return preferred_locations, eligible_locations, ineligible_locations
def _calculate_type_bound_at_step(match_step):
"""Return the GraphQL type bound at the given step, or None if no bound is given."""
current_type_bounds = []
if isinstance(match_step.root_block, QueryRoot):
# The QueryRoot start class is a type bound.
current_type_bounds.extend(match_step.root_block.start_class)
if match_step.coerce_type_block is not None:
# The CoerceType target class is also a type bound.
current_type_bounds.extend(match_step.coerce_type_block.target_class)
if current_type_bounds:
# A type bound exists. Assert that there is exactly one bound, defined in precisely one way.
return get_only_element_from_collection(current_type_bounds)
else:
# No type bound exists at this MATCH step.
return None
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
def _expose_only_preferred_locations(match_query, location_types, coerced_locations,
preferred_locations, eligible_locations):
"""Return a MATCH query where only preferred locations are valid as query start locations."""
preferred_location_types = dict()
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in preferred_locations:
# This location is preferred. We have to make sure that at least one occurrence
# of this location in the MATCH query has an associated "class:" clause,
# which would be generated by a type bound at the corresponding MATCH step.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = preferred_location_types.get(current_step_location, None)
if previous_type_bound is not None:
# The location is already valid. If so, make sure that this step either does
# not have any type bounds (e.g. via QueryRoot or CoerceType blocks),
# or has type bounds that match the previously-decided type bound.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
else:
# The location is not yet known to be valid. If it does not have
# a type bound in this MATCH step, add a type coercion to the type
# registered in "location_types".
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_step = match_step._replace(
coerce_type_block=CoerceType({current_type_bound}))
preferred_location_types[current_step_location] = current_type_bound
elif current_step_location in eligible_locations:
# This location is eligible, but not preferred. We have not make sure
# none of the MATCH steps with this location have type bounds, and therefore
# will not produce a corresponding "class:" clause in the resulting MATCH query.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is not None:
# There is a type bound here that we need to neutralize.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
if (current_step_location not in coerced_locations or
previous_type_bound is not None):
# The type bound here is already implied by the GraphQL query structure,
# or has already been applied at a previous occurrence of this location.
# We can simply delete the QueryRoot / CoerceType blocks that impart it.
if isinstance(match_step.root_block, QueryRoot):
new_root_block = None
else:
new_root_block = match_step.root_block
new_step = match_step._replace(
root_block=new_root_block, coerce_type_block=None)
else:
# The type bound here is not already implied by the GraphQL query structure.
# This should only be possible via a CoerceType block. Lower this CoerceType
# block into a Filter with INSTANCEOF to ensure the resulting query has the
# same semantics, while making the location invalid as a query start point.
if (isinstance(match_step.root_block, QueryRoot) or
match_step.coerce_type_block is None):
raise AssertionError(u'Unexpected MATCH step applying a type bound not '
u'already implied by the GraphQL query structure: '
u'{} {}'.format(match_step, match_query))
new_where_block = convert_coerce_type_and_add_to_where_block(
match_step.coerce_type_block, match_step.where_block)
new_step = match_step._replace(
coerce_type_block=None, where_block=new_where_block)
else:
# There is no type bound that OrientDB can find defined at this location.
# No action is necessary.
pass
else:
# This location is neither preferred nor eligible.
# No action is necessary at this location.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def expose_ideal_query_execution_start_points(compound_match_query, location_types,
coerced_locations):
"""Ensure that OrientDB only considers desirable query start points in query planning."""
new_queries = []
for match_query in compound_match_query.match_queries:
location_classification = _classify_query_locations(match_query)
preferred_locations, eligible_locations, _ = location_classification
if preferred_locations:
# Convert all eligible locations into non-eligible ones, by removing
# their "class:" clause. The "class:" clause is provided either by having
# a QueryRoot block or a CoerceType block in the MatchStep corresponding
# to the location. We remove it by converting the class check into
# an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away.
new_query = _expose_only_preferred_locations(
match_query, location_types, coerced_locations,
preferred_locations, eligible_locations)
elif eligible_locations:
# Make sure that all eligible locations have a "class:" clause by adding
# a CoerceType block that is a no-op as guaranteed by the schema. This merely
# ensures that OrientDB is able to use each of these locations as a query start point,
# and will choose the one whose class is of lowest cardinality.
new_query = _expose_all_eligible_locations(
match_query, location_types, eligible_locations)
else:
raise AssertionError(u'This query has no preferred or eligible query start locations. '
u'This is almost certainly a bug: {}'.format(match_query))
new_queries.append(new_query)
return compound_match_query._replace(match_queries=new_queries)
| en | 0.914852 | # Copyright 2018-present Kensho Technologies, LLC. Workarounds for OrientDB scheduler issue that causes poor query planning for certain queries. For purposes of query planning, the OrientDB query planner ignores "where:" clauses that hit indexes but do not use the "=" operator. For example, "CONTAINS" can be used to check that a field covered by an index is in a specified list of values, and can therefore be covered by an index, but OrientDB will ignore this. When no equality ("=") checks on indexed columns are present, OrientDB will generate a query plan that starts execution at the class with lowest cardinality, which can lead to excessive numbers of scanned and discarded records. Assuming the query planner creates a query plan where a location with CONTAINS is the first in the execution order, the execution system will apply indexes to speed up this operation. Therefore, it's sufficient to trick the query planner into always creating such a query plan, even though it thinks indexes cannot be used in the query. Valid query execution start points for the OrientDB query planner must satisfy the following: - Must not be "optional: true". - Must not have a "while:" clause nor follow a location that has one. - Must have a "class:" defined. This class is used for cardinality estimation, and to look for available indexes that may cover any "where:" clause that may be present. The optimizations in this file improve performance by enabling execution start points according to the following assumptions: 1. Start points with "where:" clauses that reference only local fields (i.e. not tagged values from other query locations) are always better than start points without a "where:". This is because the filter will have to be applied one way or the other, so we might as well apply it early. 2. If no such start points are available, we'd like to make available as many start points as possible, since we'd like OrientDB to start at the start point whose class has the lowest possible cardinality. The process of applying the optimizations is as follows: - Exclude and ignore all query steps that are inside a fold, optional, or recursion scope, or have a "where:" clause that references a non-local (i.e. tagged) field. - Find all remaining query steps with "where:" clauses that reference only local fields. - If any are found, we guide our actions from assumption 1 above: - Ensure they have a defined "class:" -- i.e. the OrientDB scheduler will consider them valid start points. - Then, prune all other query steps (ones without such "where:" clauses) by removing their "class:" clause, making them invalid as query start points for OrientDB's scheduler. - If none are found, we guide our actions from assumption 2 above: - Ensure that all query points not inside fold, optional, or recursion scope contain a "class:" clause. That increases the number of available query start points, so OrientDB can choose the start point of lowest cardinality. Return True if the Filter block references no non-local fields, and False otherwise. # We need the "result" value of this function to be mutated within the "visitor_fn". # Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here: # https://www.python.org/dev/peps/pep-3104/ # Instead, we use a dict to store the value we need mutated, since the "visitor_fn" # can mutate state in the parent scope, but not rebind variables in it without "nonlocal". # TODO(predrag): Revisit this if we drop support for Python 2. Expression visitor function that looks for uses of non-local fields. # Don't change the expression. Classify query locations into three groups: preferred, eligible, ineligible. - Ineligible locations are ones that cannot be the starting point of query execution. These include locations within recursions, locations that are the target of an optional traversal, and locations with an associated "where:" clause with non-local filter. - Preferred locations are ones that are eligible to be the starting point, and also have an associated "where:" clause that references no non-local fields -- only local fields, literals, and variables. - Eligible locations are all locations that do not fall into either of these two categories. Args: match_query: MatchQuery object describing the query being analyzed for optimization Returns: tuple (preferred, eligible, ineligible) where each element is a set of Location objects. The three sets are disjoint. # Any query must have at least one traversal with at least one step. # The first step in this traversal must be a QueryRoot. # The first step in the first traversal cannot possibly be inside an optional, recursion, # or fold. Its location is always an eligible start location for a query. # We need to determine whether it is merely eligible, or actually a preferred location. # TODO(predrag): Fix once we have a proper fix for tag-and-filter in the same scope. # Either the locally-scoped tag will have to generate a LocalField # instead of a ContextField, or we'll have to rework the local filter # detection code in this module. # This loop will repeat the analysis of the first step of the first traversal. # QueryRoots other than the first are required to always be at a location whose status # (preferred / eligible / ineligible) is already known. Since we already processed # the first QueryRoot above, the rest of the loop can assume all QueryRoots are like that. # This location has already been encountered and processed. # Other than setting the "at_eligible_or_preferred_location" state for the sake of # the following MATCH steps, there is nothing further to be done. # All Recurse blocks cause locations within to be ineligible. # Optional Traverse blocks cause locations within to be ineligible. # Non-optional Traverse blocks do not change the eligibility of locations within: # if the pre-Traverse location was eligible, so will the location within, # and if it was not eligible, neither will the location within. # This location has a local filter, and is not otherwise ineligible (it's not # in a recursion etc.). Therefore, it's a preferred query start location. # Locations with non-local filters are never eligible locations, since they # depend on another location being executed before them. # No local filtering (i.e. not preferred), but also not ineligible. Eligible it is. Return the GraphQL type bound at the given step, or None if no bound is given. # The QueryRoot start class is a type bound. # The CoerceType target class is also a type bound. # A type bound exists. Assert that there is exactly one bound, defined in precisely one way. # No type bound exists at this MATCH step. Ensure that the two bounds either are an exact match, or one of them is None. Return a MATCH query where only preferred locations are valid as query start locations. # This location is preferred. We have to make sure that at least one occurrence # of this location in the MATCH query has an associated "class:" clause, # which would be generated by a type bound at the corresponding MATCH step. # The location is already valid. If so, make sure that this step either does # not have any type bounds (e.g. via QueryRoot or CoerceType blocks), # or has type bounds that match the previously-decided type bound. # The location is not yet known to be valid. If it does not have # a type bound in this MATCH step, add a type coercion to the type # registered in "location_types". # This location is eligible, but not preferred. We have not make sure # none of the MATCH steps with this location have type bounds, and therefore # will not produce a corresponding "class:" clause in the resulting MATCH query. # There is a type bound here that we need to neutralize. # Record the deduced type bound, so that if we encounter this location again, # we ensure that we again infer the same type bound. # The type bound here is already implied by the GraphQL query structure, # or has already been applied at a previous occurrence of this location. # We can simply delete the QueryRoot / CoerceType blocks that impart it. # The type bound here is not already implied by the GraphQL query structure. # This should only be possible via a CoerceType block. Lower this CoerceType # block into a Filter with INSTANCEOF to ensure the resulting query has the # same semantics, while making the location invalid as a query start point. # There is no type bound that OrientDB can find defined at this location. # No action is necessary. # This location is neither preferred nor eligible. # No action is necessary at this location. Return a MATCH query where all eligible locations are valid as query start locations. # This location is eligible. We need to make sure it has an associated type bound, # so that it produces a "class:" clause that will make it a valid query start # location. It either already has such a type bound, or we can use the type # implied by the GraphQL query structure to add one. # There is a type bound here. We simply ensure that the bound is not conflicting # with any other type bound at a different MATCH step with the same location. # Record the deduced type bound, so that if we encounter this location again, # we ensure that we again infer the same type bound. # This function may only be called if there are no preferred locations. Since this # location cannot be preferred, and is not eligible, it must be ineligible. # No action is necessary in this case. Ensure that OrientDB only considers desirable query start points in query planning. # Convert all eligible locations into non-eligible ones, by removing # their "class:" clause. The "class:" clause is provided either by having # a QueryRoot block or a CoerceType block in the MatchStep corresponding # to the location. We remove it by converting the class check into # an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away. # Make sure that all eligible locations have a "class:" clause by adding # a CoerceType block that is a no-op as guaranteed by the schema. This merely # ensures that OrientDB is able to use each of these locations as a query start point, # and will choose the one whose class is of lowest cardinality. | 2.010179 | 2 |
traffic_light/core.py | ofalk/cleware-traffic-light | 0 | 8725 | from enum import IntEnum
import functools
import usb.core
import usb.util
from traffic_light.error import TrafficLightError, MultipleTrafficLightsError
BM_REQUEST_TYPE = 0x21
B_REQUEST = 0x09
W_VALUE = 0x200
W_INDEX = 0x00
ID_VENDOR = 0x0d50
ID_PRODUCT = 0x0008
INTERFACE = 0
class Color(IntEnum):
RED = 0x10
YELLOW = 0x11
GREEN = 0x12
class State(IntEnum):
OFF = 0x0
ON = 0x1
class ClewareTrafficLight:
def __init__(self, address=None):
if address:
self.address = address
self.device = usb.core.find(
address=address,
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
elif len(list(ClewareTrafficLight.find_devices())) > 1:
raise MultipleTrafficLightsError(
"No address is given and there are multiple devices conected! "
"Use 'print_devices' to see a list of connected devices."
)
else:
self.device = usb.core.find(
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
if self.device is None:
raise TrafficLightError('Cleware traffic light not found!')
self.reattach = False
def attach(self):
"""Attaches the device back to the kernel"""
usb.util.dispose_resources(self.device)
if self.reattach:
self.device.attach_kernel_driver(INTERFACE)
def detach(self):
"""Detaches the device from to kernel so it can be used"""
if self.device.is_kernel_driver_active(INTERFACE):
self.device.detach_kernel_driver(INTERFACE)
self.reattach = True
@staticmethod
def find_devices():
"""Returns the raw iterator of all found traffic lights"""
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT)
if devices:
return devices
return []
@staticmethod
def print_devices():
"""Prints a list of all connected traffic lights"""
devices = ClewareTrafficLight.get_devices()
for device in devices:
print(device)
@staticmethod
def get_devices():
"""Returns a list of ClewareTrafficLight instances"""
usb_devices = ClewareTrafficLight.find_devices()
return [ClewareTrafficLight(d.address) for d in usb_devices]
def set_led(self, color, value, timeout=1000):
"""Sets the given state and color of the attached traffic light
Attribute:
color -- the to set color as the enum. E.g. Color.RED
state -- the state to which it should be set. E.g. State.ON
address -- the usb address of a specific traffic light
"""
try:
self.detach()
self.device.ctrl_transfer(BM_REQUEST_TYPE, B_REQUEST, W_VALUE, W_INDEX, [0x00, color, value], timeout=timeout)
except Exception as exc:
raise TrafficLightError(str(exc)) from exc
finally:
self.attach()
def __getattr__(self, name):
"""Parses attribut calls in function"""
args = name.split('_')
try:
color = Color[args[0].upper()]
state = State[args[1].upper()]
except Exception as exc:
raise TrafficLightError("Either the given color or state could not be parsed! Exc: {}"
.format(exc))
return functools.partial(self.set_led, color, state)
def __str__(self):
"""Converts instance into string with important imformations"""
return ("== Cleware Traffic Light ==\n"
"Address: {} \n"
"IdVendor: {} \n"
"IdProduct: {}".format(self.address, ID_VENDOR, ID_PRODUCT))
| from enum import IntEnum
import functools
import usb.core
import usb.util
from traffic_light.error import TrafficLightError, MultipleTrafficLightsError
BM_REQUEST_TYPE = 0x21
B_REQUEST = 0x09
W_VALUE = 0x200
W_INDEX = 0x00
ID_VENDOR = 0x0d50
ID_PRODUCT = 0x0008
INTERFACE = 0
class Color(IntEnum):
RED = 0x10
YELLOW = 0x11
GREEN = 0x12
class State(IntEnum):
OFF = 0x0
ON = 0x1
class ClewareTrafficLight:
def __init__(self, address=None):
if address:
self.address = address
self.device = usb.core.find(
address=address,
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
elif len(list(ClewareTrafficLight.find_devices())) > 1:
raise MultipleTrafficLightsError(
"No address is given and there are multiple devices conected! "
"Use 'print_devices' to see a list of connected devices."
)
else:
self.device = usb.core.find(
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
if self.device is None:
raise TrafficLightError('Cleware traffic light not found!')
self.reattach = False
def attach(self):
"""Attaches the device back to the kernel"""
usb.util.dispose_resources(self.device)
if self.reattach:
self.device.attach_kernel_driver(INTERFACE)
def detach(self):
"""Detaches the device from to kernel so it can be used"""
if self.device.is_kernel_driver_active(INTERFACE):
self.device.detach_kernel_driver(INTERFACE)
self.reattach = True
@staticmethod
def find_devices():
"""Returns the raw iterator of all found traffic lights"""
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT)
if devices:
return devices
return []
@staticmethod
def print_devices():
"""Prints a list of all connected traffic lights"""
devices = ClewareTrafficLight.get_devices()
for device in devices:
print(device)
@staticmethod
def get_devices():
"""Returns a list of ClewareTrafficLight instances"""
usb_devices = ClewareTrafficLight.find_devices()
return [ClewareTrafficLight(d.address) for d in usb_devices]
def set_led(self, color, value, timeout=1000):
"""Sets the given state and color of the attached traffic light
Attribute:
color -- the to set color as the enum. E.g. Color.RED
state -- the state to which it should be set. E.g. State.ON
address -- the usb address of a specific traffic light
"""
try:
self.detach()
self.device.ctrl_transfer(BM_REQUEST_TYPE, B_REQUEST, W_VALUE, W_INDEX, [0x00, color, value], timeout=timeout)
except Exception as exc:
raise TrafficLightError(str(exc)) from exc
finally:
self.attach()
def __getattr__(self, name):
"""Parses attribut calls in function"""
args = name.split('_')
try:
color = Color[args[0].upper()]
state = State[args[1].upper()]
except Exception as exc:
raise TrafficLightError("Either the given color or state could not be parsed! Exc: {}"
.format(exc))
return functools.partial(self.set_led, color, state)
def __str__(self):
"""Converts instance into string with important imformations"""
return ("== Cleware Traffic Light ==\n"
"Address: {} \n"
"IdVendor: {} \n"
"IdProduct: {}".format(self.address, ID_VENDOR, ID_PRODUCT))
| en | 0.79782 | Attaches the device back to the kernel Detaches the device from to kernel so it can be used Returns the raw iterator of all found traffic lights Prints a list of all connected traffic lights Returns a list of ClewareTrafficLight instances Sets the given state and color of the attached traffic light Attribute: color -- the to set color as the enum. E.g. Color.RED state -- the state to which it should be set. E.g. State.ON address -- the usb address of a specific traffic light Parses attribut calls in function Converts instance into string with important imformations | 2.919528 | 3 |
sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py | dubiety/azure-sdk-for-python | 1 | 8726 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_analyze_orchestration_app_luis_response_async.py
DESCRIPTION:
This sample demonstrates how to analyze user query using an orchestration project.
In this sample, orchestration project's top intent will map to a LUIS project.
For more info about how to setup a CLU orchestration project, see the README.
USAGE:
python sample_analyze_orchestration_app_luis_response_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource.
2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource.
3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME - project name for your CLU orchestration project.
4) AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME - deployment name for your CLU orchestration project.
"""
import asyncio
async def sample_analyze_orchestration_app_luis_response_async():
# [START analyze_orchestration_app_luis_response]
# import libraries
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.conversations.aio import ConversationAnalysisClient
# get secrets
clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"]
clu_key = os.environ["AZURE_CONVERSATIONS_KEY"]
project_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME"]
deployment_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME"]
# analyze query
client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key))
async with client:
query = "Reserve a table for 2 at the Italian restaurant"
result = await client.analyze_conversation(
task={
"kind": "Conversation",
"analysisInput": {
"conversationItem": {
"participantId": "1",
"id": "1",
"modality": "text",
"language": "en",
"text": query
},
"isLoggingEnabled": False
},
"parameters": {
"projectName": project_name,
"deploymentName": deployment_name,
"verbose": True
}
}
)
# view result
print("query: {}".format(result["result"]["query"]))
print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"]))
# top intent
top_intent = result["result"]["prediction"]["topIntent"]
print("top intent: {}".format(top_intent))
top_intent_object = result["result"]["prediction"]["intents"][top_intent]
print("confidence score: {}".format(top_intent_object["confidenceScore"]))
print("project kind: {}".format(top_intent_object["targetProjectKind"]))
if top_intent_object["targetProjectKind"] == "Luis":
print("\nluis response:")
luis_response = top_intent_object["result"]["prediction"]
print("top intent: {}".format(luis_response["topIntent"]))
print("\nentities:")
for entity in luis_response["entities"]:
print("\n{}".format(entity))
# [END analyze_orchestration_app_luis_response]
async def main():
await sample_analyze_orchestration_app_luis_response_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_analyze_orchestration_app_luis_response_async.py
DESCRIPTION:
This sample demonstrates how to analyze user query using an orchestration project.
In this sample, orchestration project's top intent will map to a LUIS project.
For more info about how to setup a CLU orchestration project, see the README.
USAGE:
python sample_analyze_orchestration_app_luis_response_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource.
2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource.
3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME - project name for your CLU orchestration project.
4) AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME - deployment name for your CLU orchestration project.
"""
import asyncio
async def sample_analyze_orchestration_app_luis_response_async():
# [START analyze_orchestration_app_luis_response]
# import libraries
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.conversations.aio import ConversationAnalysisClient
# get secrets
clu_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"]
clu_key = os.environ["AZURE_CONVERSATIONS_KEY"]
project_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME"]
deployment_name = os.environ["AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME"]
# analyze query
client = ConversationAnalysisClient(clu_endpoint, AzureKeyCredential(clu_key))
async with client:
query = "Reserve a table for 2 at the Italian restaurant"
result = await client.analyze_conversation(
task={
"kind": "Conversation",
"analysisInput": {
"conversationItem": {
"participantId": "1",
"id": "1",
"modality": "text",
"language": "en",
"text": query
},
"isLoggingEnabled": False
},
"parameters": {
"projectName": project_name,
"deploymentName": deployment_name,
"verbose": True
}
}
)
# view result
print("query: {}".format(result["result"]["query"]))
print("project kind: {}\n".format(result["result"]["prediction"]["projectKind"]))
# top intent
top_intent = result["result"]["prediction"]["topIntent"]
print("top intent: {}".format(top_intent))
top_intent_object = result["result"]["prediction"]["intents"][top_intent]
print("confidence score: {}".format(top_intent_object["confidenceScore"]))
print("project kind: {}".format(top_intent_object["targetProjectKind"]))
if top_intent_object["targetProjectKind"] == "Luis":
print("\nluis response:")
luis_response = top_intent_object["result"]["prediction"]
print("top intent: {}".format(luis_response["topIntent"]))
print("\nentities:")
for entity in luis_response["entities"]:
print("\n{}".format(entity))
# [END analyze_orchestration_app_luis_response]
async def main():
await sample_analyze_orchestration_app_luis_response_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | en | 0.659231 | # coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ FILE: sample_analyze_orchestration_app_luis_response_async.py DESCRIPTION: This sample demonstrates how to analyze user query using an orchestration project. In this sample, orchestration project's top intent will map to a LUIS project. For more info about how to setup a CLU orchestration project, see the README. USAGE: python sample_analyze_orchestration_app_luis_response_async.py Set the environment variables with your own values before running the sample: 1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource. 2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource. 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME - project name for your CLU orchestration project. 4) AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME - deployment name for your CLU orchestration project. # [START analyze_orchestration_app_luis_response] # import libraries # get secrets # analyze query # view result # top intent # [END analyze_orchestration_app_luis_response] | 1.953416 | 2 |
src/sunstruck/schemas/__init__.py | la-mar/sunstruck-api | 3 | 8727 | # flake8: noqa
from schemas.client_credentials import *
from schemas.message import *
from schemas.token import *
from schemas.user import *
| # flake8: noqa
from schemas.client_credentials import *
from schemas.message import *
from schemas.token import *
from schemas.user import *
| it | 0.238973 | # flake8: noqa | 1.094032 | 1 |
intro/deploy.py | terziev-viktor/SolidityCourse | 0 | 8728 | <gh_stars>0
import json
from web3 import Web3
from solcx import compile_standard, install_solc
with open("./SimpleStorage.sol", "r") as file:
simple_storage_src = file.read()
# install solcx
install_solc("0.8.0")
# compile the source
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"SimpleStorage.sol": {"content": simple_storage_src}},
"settings":
{
"outputSelection":
{
"*":
{
"*": ["abi", "metadata", "evm.bytecode", "evm.sourceMap"]
}
}
},
},
solc_version = "0.8.0"
)
with open("./out.json", "w") as file:
json.dump(compiled_sol, file)
# getting the bytecode
bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"]["bytecode"]["object"]
# getting the abi
abi = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["abi"]
# connecting to ganache
w3 = Web3(Web3.HTTPProvider("HTTP://127.0.0.1:7545"))
chain_id = 1337
my_address = "0x02ECDdb09504C4d4B2ba2c7Ec80d77d44f6e631c"
private_key = "0xa9ddbecce894fdad11cd9864d9c58f794d23bd5f0d78d1c2eea204b284edfefc"
# Create the contract in python
SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)
# Get the latest test transaction
nonce = w3.eth.getTransactionCount(my_address)
# 1. Build a transaction
# 2. Sing the transaction
# 3. Send the transaction
transaction = SimpleStorage.constructor().buildTransaction({"gasPrice": w3.eth.gas_price, "chainId": chain_id, "from": my_address, "nonce": nonce})
signed_txn = w3.eth.account.sign_transaction(transaction, private_key)
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# confirm transaction is received
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
print("tx_hash=", tx_hash)
print("receipt=", tx_receipt)
# working on-chain
simple_storage = w3.eth.contract(address=tx_receipt.contractAddress, abi=abi)
print(simple_storage.functions.retrieve().call())
store_transaction = simple_storage.functions.store(15).buildTransaction({
"gasPrice": w3.eth.gas_price,
"chainId": chain_id,
"from": my_address,
"nonce": nonce + 1
}
)
singed_store_transaction = w3.eth.account.sign_transaction(store_transaction, private_key)
store_transaction_hash = w3.eth.send_raw_transaction(singed_store_transaction.rawTransaction)
store_transaction_receipt = w3.eth.wait_for_transaction_receipt(store_transaction_hash)
| import json
from web3 import Web3
from solcx import compile_standard, install_solc
with open("./SimpleStorage.sol", "r") as file:
simple_storage_src = file.read()
# install solcx
install_solc("0.8.0")
# compile the source
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"SimpleStorage.sol": {"content": simple_storage_src}},
"settings":
{
"outputSelection":
{
"*":
{
"*": ["abi", "metadata", "evm.bytecode", "evm.sourceMap"]
}
}
},
},
solc_version = "0.8.0"
)
with open("./out.json", "w") as file:
json.dump(compiled_sol, file)
# getting the bytecode
bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"]["bytecode"]["object"]
# getting the abi
abi = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["abi"]
# connecting to ganache
w3 = Web3(Web3.HTTPProvider("HTTP://127.0.0.1:7545"))
chain_id = 1337
my_address = "0x02ECDdb09504C4d4B2ba2c7Ec80d77d44f6e631c"
private_key = "0xa9ddbecce894fdad11cd9864d9c58f794d23bd5f0d78d1c2eea204b284edfefc"
# Create the contract in python
SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)
# Get the latest test transaction
nonce = w3.eth.getTransactionCount(my_address)
# 1. Build a transaction
# 2. Sing the transaction
# 3. Send the transaction
transaction = SimpleStorage.constructor().buildTransaction({"gasPrice": w3.eth.gas_price, "chainId": chain_id, "from": my_address, "nonce": nonce})
signed_txn = w3.eth.account.sign_transaction(transaction, private_key)
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# confirm transaction is received
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
print("tx_hash=", tx_hash)
print("receipt=", tx_receipt)
# working on-chain
simple_storage = w3.eth.contract(address=tx_receipt.contractAddress, abi=abi)
print(simple_storage.functions.retrieve().call())
store_transaction = simple_storage.functions.store(15).buildTransaction({
"gasPrice": w3.eth.gas_price,
"chainId": chain_id,
"from": my_address,
"nonce": nonce + 1
}
)
singed_store_transaction = w3.eth.account.sign_transaction(store_transaction, private_key)
store_transaction_hash = w3.eth.send_raw_transaction(singed_store_transaction.rawTransaction)
store_transaction_receipt = w3.eth.wait_for_transaction_receipt(store_transaction_hash) | en | 0.791989 | # install solcx # compile the source # getting the bytecode # getting the abi # connecting to ganache # Create the contract in python # Get the latest test transaction # 1. Build a transaction # 2. Sing the transaction # 3. Send the transaction # confirm transaction is received # working on-chain | 2.215618 | 2 |
noise/extras/meta/protocol/protocol.py | mgp25/noise | 6 | 8729 | <reponame>mgp25/noise
from noise.dh.dh import DH
from noise.cipher.cipher import Cipher
from noise.hash.hash import Hash
from noise.processing.handshakepatterns.handshakepattern import HandshakePattern
from noise.processing.impl.handshakestate import HandshakeState
from noise.processing.impl.symmetricstate import SymmetricState
from noise.processing.impl.cipherstate import CipherState
class NoiseProtocol(object):
def __init__(self, pattern, dh, cipher, hash):
"""
:param pattern:
:type pattern:
:param dh:
:type dh:
:param cipher:
:type cipher:
:param hash:
:type hash:
"""
self._pattern = pattern # type: HandshakePattern
self._dh = dh # type: DH
self._cipher = cipher # type: Cipher
self._hash = hash # type: Hash
self._oneway = len(HandshakePattern.parse_handshakepattern(pattern.name)[0]) == 1 # type: bool
@property
def oneway(self):
return self._oneway
@property
def pattern(self):
return self._pattern
@property
def dh(self):
return self._dh
@property
def cipher(self):
return self._cipher
@property
def hash(self):
return self._hash
def create_cipherstate(self, cipher=None):
"""
:param cipher:
:type cipher: Cipher
:return:
:rtype: CipherState
"""
return CipherState(cipher or self._cipher)
def create_symmetricstate(self, cipherstate=None, hash=None):
"""
:param cipherstate:
:type cipherstate: CipherState
:param hash:
:type hash: Hash
:return:
:rtype: SymmetricState
"""
return SymmetricState(cipherstate or self.create_cipherstate(), hash or self._hash)
def create_handshakestate(self, symmetricstate=None, dh=None):
"""
:param symmetricstate:
:type symmetricstate: SymmetricState
:param dh:
:type dh: DH
:return:
:rtype: HandshakeState
"""
return HandshakeState(symmetricstate or self.create_symmetricstate(), dh or self._dh)
| from noise.dh.dh import DH
from noise.cipher.cipher import Cipher
from noise.hash.hash import Hash
from noise.processing.handshakepatterns.handshakepattern import HandshakePattern
from noise.processing.impl.handshakestate import HandshakeState
from noise.processing.impl.symmetricstate import SymmetricState
from noise.processing.impl.cipherstate import CipherState
class NoiseProtocol(object):
def __init__(self, pattern, dh, cipher, hash):
"""
:param pattern:
:type pattern:
:param dh:
:type dh:
:param cipher:
:type cipher:
:param hash:
:type hash:
"""
self._pattern = pattern # type: HandshakePattern
self._dh = dh # type: DH
self._cipher = cipher # type: Cipher
self._hash = hash # type: Hash
self._oneway = len(HandshakePattern.parse_handshakepattern(pattern.name)[0]) == 1 # type: bool
@property
def oneway(self):
return self._oneway
@property
def pattern(self):
return self._pattern
@property
def dh(self):
return self._dh
@property
def cipher(self):
return self._cipher
@property
def hash(self):
return self._hash
def create_cipherstate(self, cipher=None):
"""
:param cipher:
:type cipher: Cipher
:return:
:rtype: CipherState
"""
return CipherState(cipher or self._cipher)
def create_symmetricstate(self, cipherstate=None, hash=None):
"""
:param cipherstate:
:type cipherstate: CipherState
:param hash:
:type hash: Hash
:return:
:rtype: SymmetricState
"""
return SymmetricState(cipherstate or self.create_cipherstate(), hash or self._hash)
def create_handshakestate(self, symmetricstate=None, dh=None):
"""
:param symmetricstate:
:type symmetricstate: SymmetricState
:param dh:
:type dh: DH
:return:
:rtype: HandshakeState
"""
return HandshakeState(symmetricstate or self.create_symmetricstate(), dh or self._dh) | en | 0.576752 | :param pattern: :type pattern: :param dh: :type dh: :param cipher: :type cipher: :param hash: :type hash: # type: HandshakePattern # type: DH # type: Cipher # type: Hash # type: bool :param cipher: :type cipher: Cipher :return: :rtype: CipherState :param cipherstate: :type cipherstate: CipherState :param hash: :type hash: Hash :return: :rtype: SymmetricState :param symmetricstate: :type symmetricstate: SymmetricState :param dh: :type dh: DH :return: :rtype: HandshakeState | 2.33459 | 2 |
info_popup.py | cartazio/SublimeHaskell | 2 | 8730 | import urllib.parse
import webbrowser
import json
from xml.etree import ElementTree
import sublime
import SublimeHaskell.sublime_haskell_common as Common
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.internals.unicode_opers as UnicodeOpers
import SublimeHaskell.symbols as symbols
import SublimeHaskell.internals.backend_mgr as BackendManager
import SublimeHaskell.parseoutput as ParseOutput
import SublimeHaskell.types as types
# Unused module variable:
# style_header = "<style>" \
# "a { text-decoration: underline; }" \
# ".type { color: red; }" \
# ".tyvar { color: blue; }" \
# ".operator { color: green; }" \
# ".comment { color: gray; font-style: italic; }" \
# ".docs { color: gray; }" \
# "</style>"
class Styles(object):
"""
Loads and holds cache of scheme styles
Also generates style header
"""
def __init__(self):
self.schemes = {}
CSS_CLASSES = {
'comment': 'comment',
'function': 'entity.name.function',
'type': 'entity.name.type',
'operator': 'keyword.operator',
'keyword': 'keyword.declaration',
'tyvar': 'variable.generic',
'error': 'sublimehaskell.mark.error',
'warning': 'sublimehaskell.mark.warning',
'hint': 'sublimehaskell.mark.hint'
}
def load_scheme(self, scheme_path):
if scheme_path not in self.schemes:
scheme_res = sublime.load_resource(scheme_path)
if scheme_res:
# Go through all styles and collect scope/foreground/fontStyle etc.
# Prefer ST3 'sublime-color-scheme' JSON over older TextMate XML.
self.schemes[scheme_path] = self.collect_sublime_scheme(json.loads(scheme_res)) \
if scheme_path.endswith('.sublime-color-scheme') \
else self.collect_textmate_scheme(ElementTree.fromstring(scheme_res))
return self.schemes.get(scheme_path, {})
def collect_textmate_scheme(self, scheme_tree):
scheme = {}
for style in scheme_tree.findall(".//dict[key='scope']"):
try:
cur_style = {}
cur_tag = None
for elem in style.iter():
if elem.tag == 'key':
cur_tag = elem.text # We are going to fill it next time
elif elem.tag == 'string' and cur_tag is not None:
cur_style[cur_tag] = elem.text
cur_tag = None
if 'scope' in cur_style:
scheme[cur_style['scope']] = cur_style
except ValueError:
pass
return scheme
def collect_sublime_scheme(self, scheme_dict):
scheme = {}
for rule in scheme_dict.get('rules', []):
scope = rule.get('scope', '')
if scope:
scheme[scope] = rule
return scheme
def gen_style(self, scheme_path):
scheme = self.load_scheme(scheme_path)
parts = []
parts.append("<style>")
parts.append("a { text-decoration: underline; }")
# generate CSS style for each class
for cls, scope in self.CSS_CLASSES.items():
# find scope or its parent in scheme
scope_parts = scope.split('.')
for css_scope in reversed(['.'.join(scope_parts[0:i+1]) for i in range(0, len(scope_parts))]):
if css_scope in scheme: # Found some scope, fill style class
style_parts = []
if 'foreground' in scheme[css_scope]:
style_parts.append("color: {0}".format(scheme[css_scope]['foreground']))
# Prefer ST3 'sublime-color-scheme' JSON attribute over the older TextMate-ish name
font_style = scheme[css_scope].get('font_style', scheme[css_scope].get('fontStyle', ''))
if font_style:
style_parts.append("font-style: {0}".format(font_style))
parts.append(".{0} {{ {1} }}".format(cls, "; ".join(style_parts)))
break
parts.append("</style>")
return "".join(parts)
class SublimeHaskellHoverPopup(object):
# HTML style formatting
STYLES = Styles()
def __init__(self, view, filename, point, hover_zone):
super().__init__()
self.view = view
self.filename = filename
self.point = point
self.hover_zone = hover_zone
self.line = view.rowcol(point)[0]
self.shown = False
def do_hover(self):
if self.hover_zone == sublime.HOVER_TEXT:
qsymbol = Common.get_qualified_symbol_at_point(self.view, self.point)
## print('hover: qualified symbol {0}'.format(qsymbol))
module_word = qsymbol.module
ident = qsymbol.name
if module_word is not None and ident is None:
# TODO: Any ideas for popup about module?
pass
elif ident is not None:
whois_name = qsymbol.qualified_name()
full_name = qsymbol.full_name()
# Try get type of hovered symbol
typed_expr = None
if types.SourceHaskellTypeCache().has(self.filename):
typed_expr = self.get_type(types.SourceHaskellTypeCache().get(self.filename), whois_name)
else:
project_name = Common.locate_cabal_project_from_view(self.view)[1]
point_rgn = sublime.Region(self.point, self.point)
typed_expr = self.get_type(types.get_type_view(self.view, project_name, point_rgn), whois_name)
# Try whois
suggest_import = False
decl = Utils.head_of(BackendManager.active_backend().whois(whois_name, self.filename))
if not decl:
suggest_import = True
decl = Utils.head_of(BackendManager.active_backend().lookup(full_name, self.filename))
self.create_symbol_popup(typed_expr, decl, suggest_import)
elif self.hover_zone == sublime.HOVER_GUTTER:
errs = [err for err in ParseOutput.MARKER_MANAGER.marks_for_view(self.view) if err.region.start.line == self.line]
if errs:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
for err in errs:
msg = UnicodeOpers.use_unicode_operators(symbols.escape_text(err.message))
# Decorate first word with style
decors = {
'Error': 'error',
'Warning': 'warning',
'Hint': 'hint'
}
for dec, dec_style in decors.items():
msg = msg.replace(dec, u'<span class="{0}">{1}</span>'.format(dec_style, dec))
popup_parts.append(u'<p>{0}</p>'.format(msg))
if err.correction is not None:
popup_parts.append(err.correction.popup())
popup_text = u''.join(popup_parts)
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
def create_symbol_popup(self, typed_expr, decl, suggest_import):
if typed_expr or decl:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
if typed_expr:
popup_parts.append(u'<p><span class="function">{0}</span>{1}</p>'.format(
typed_expr.substr(self.view),
symbols.format_type(UnicodeOpers.use_unicode_operators(' :: {0}'.format(typed_expr.typename)))))
if decl:
popup_msg = [u'<a href="import:{0}">Add import</a>'.format(urllib.parse.quote_plus(decl.name))] \
if suggest_import else []
popup_parts.append(decl.popup(popup_msg))
popup_text = u''.join(popup_parts)
if not self.shown:
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
else:
self.view.update_popup(popup_text)
def get_type(self, type_list, qual_name):
filt_types = [t for t in type_list
if t.substr(self.view) == qual_name and t.region(self.view).contains(self.point)]
return Utils.head_of(filt_types)
def on_navigate(self, url):
if self.view.is_popup_visible():
self.view.hide_popup()
if url[0:4] == 'http':
webbrowser.open(url)
elif url[0:8] == 'autofix:':
rgn = symbols.Region.from_str(url[8:])
ParseOutput.MARKER_MANAGER.apply_autocorrect(self.view, rgn)
elif url[0:7] == "import:":
decl_name = urllib.parse.unquote(url[7:])
self.view.run_command('sublime_haskell_insert_import_for_symbol',
{'filename': self.view.file_name(),
'decl': decl_name})
else:
self.view.window().open_file(url, sublime.ENCODED_POSITION | sublime.TRANSIENT)
def on_hide(self):
self.shown = False
| import urllib.parse
import webbrowser
import json
from xml.etree import ElementTree
import sublime
import SublimeHaskell.sublime_haskell_common as Common
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.internals.unicode_opers as UnicodeOpers
import SublimeHaskell.symbols as symbols
import SublimeHaskell.internals.backend_mgr as BackendManager
import SublimeHaskell.parseoutput as ParseOutput
import SublimeHaskell.types as types
# Unused module variable:
# style_header = "<style>" \
# "a { text-decoration: underline; }" \
# ".type { color: red; }" \
# ".tyvar { color: blue; }" \
# ".operator { color: green; }" \
# ".comment { color: gray; font-style: italic; }" \
# ".docs { color: gray; }" \
# "</style>"
class Styles(object):
"""
Loads and holds cache of scheme styles
Also generates style header
"""
def __init__(self):
self.schemes = {}
CSS_CLASSES = {
'comment': 'comment',
'function': 'entity.name.function',
'type': 'entity.name.type',
'operator': 'keyword.operator',
'keyword': 'keyword.declaration',
'tyvar': 'variable.generic',
'error': 'sublimehaskell.mark.error',
'warning': 'sublimehaskell.mark.warning',
'hint': 'sublimehaskell.mark.hint'
}
def load_scheme(self, scheme_path):
if scheme_path not in self.schemes:
scheme_res = sublime.load_resource(scheme_path)
if scheme_res:
# Go through all styles and collect scope/foreground/fontStyle etc.
# Prefer ST3 'sublime-color-scheme' JSON over older TextMate XML.
self.schemes[scheme_path] = self.collect_sublime_scheme(json.loads(scheme_res)) \
if scheme_path.endswith('.sublime-color-scheme') \
else self.collect_textmate_scheme(ElementTree.fromstring(scheme_res))
return self.schemes.get(scheme_path, {})
def collect_textmate_scheme(self, scheme_tree):
scheme = {}
for style in scheme_tree.findall(".//dict[key='scope']"):
try:
cur_style = {}
cur_tag = None
for elem in style.iter():
if elem.tag == 'key':
cur_tag = elem.text # We are going to fill it next time
elif elem.tag == 'string' and cur_tag is not None:
cur_style[cur_tag] = elem.text
cur_tag = None
if 'scope' in cur_style:
scheme[cur_style['scope']] = cur_style
except ValueError:
pass
return scheme
def collect_sublime_scheme(self, scheme_dict):
scheme = {}
for rule in scheme_dict.get('rules', []):
scope = rule.get('scope', '')
if scope:
scheme[scope] = rule
return scheme
def gen_style(self, scheme_path):
scheme = self.load_scheme(scheme_path)
parts = []
parts.append("<style>")
parts.append("a { text-decoration: underline; }")
# generate CSS style for each class
for cls, scope in self.CSS_CLASSES.items():
# find scope or its parent in scheme
scope_parts = scope.split('.')
for css_scope in reversed(['.'.join(scope_parts[0:i+1]) for i in range(0, len(scope_parts))]):
if css_scope in scheme: # Found some scope, fill style class
style_parts = []
if 'foreground' in scheme[css_scope]:
style_parts.append("color: {0}".format(scheme[css_scope]['foreground']))
# Prefer ST3 'sublime-color-scheme' JSON attribute over the older TextMate-ish name
font_style = scheme[css_scope].get('font_style', scheme[css_scope].get('fontStyle', ''))
if font_style:
style_parts.append("font-style: {0}".format(font_style))
parts.append(".{0} {{ {1} }}".format(cls, "; ".join(style_parts)))
break
parts.append("</style>")
return "".join(parts)
class SublimeHaskellHoverPopup(object):
# HTML style formatting
STYLES = Styles()
def __init__(self, view, filename, point, hover_zone):
super().__init__()
self.view = view
self.filename = filename
self.point = point
self.hover_zone = hover_zone
self.line = view.rowcol(point)[0]
self.shown = False
def do_hover(self):
if self.hover_zone == sublime.HOVER_TEXT:
qsymbol = Common.get_qualified_symbol_at_point(self.view, self.point)
## print('hover: qualified symbol {0}'.format(qsymbol))
module_word = qsymbol.module
ident = qsymbol.name
if module_word is not None and ident is None:
# TODO: Any ideas for popup about module?
pass
elif ident is not None:
whois_name = qsymbol.qualified_name()
full_name = qsymbol.full_name()
# Try get type of hovered symbol
typed_expr = None
if types.SourceHaskellTypeCache().has(self.filename):
typed_expr = self.get_type(types.SourceHaskellTypeCache().get(self.filename), whois_name)
else:
project_name = Common.locate_cabal_project_from_view(self.view)[1]
point_rgn = sublime.Region(self.point, self.point)
typed_expr = self.get_type(types.get_type_view(self.view, project_name, point_rgn), whois_name)
# Try whois
suggest_import = False
decl = Utils.head_of(BackendManager.active_backend().whois(whois_name, self.filename))
if not decl:
suggest_import = True
decl = Utils.head_of(BackendManager.active_backend().lookup(full_name, self.filename))
self.create_symbol_popup(typed_expr, decl, suggest_import)
elif self.hover_zone == sublime.HOVER_GUTTER:
errs = [err for err in ParseOutput.MARKER_MANAGER.marks_for_view(self.view) if err.region.start.line == self.line]
if errs:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
for err in errs:
msg = UnicodeOpers.use_unicode_operators(symbols.escape_text(err.message))
# Decorate first word with style
decors = {
'Error': 'error',
'Warning': 'warning',
'Hint': 'hint'
}
for dec, dec_style in decors.items():
msg = msg.replace(dec, u'<span class="{0}">{1}</span>'.format(dec_style, dec))
popup_parts.append(u'<p>{0}</p>'.format(msg))
if err.correction is not None:
popup_parts.append(err.correction.popup())
popup_text = u''.join(popup_parts)
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
def create_symbol_popup(self, typed_expr, decl, suggest_import):
if typed_expr or decl:
popup_parts = [self.STYLES.gen_style(self.view.settings().get('color_scheme'))]
if typed_expr:
popup_parts.append(u'<p><span class="function">{0}</span>{1}</p>'.format(
typed_expr.substr(self.view),
symbols.format_type(UnicodeOpers.use_unicode_operators(' :: {0}'.format(typed_expr.typename)))))
if decl:
popup_msg = [u'<a href="import:{0}">Add import</a>'.format(urllib.parse.quote_plus(decl.name))] \
if suggest_import else []
popup_parts.append(decl.popup(popup_msg))
popup_text = u''.join(popup_parts)
if not self.shown:
self.shown = True
self.view.show_popup(popup_text, sublime.HIDE_ON_MOUSE_MOVE_AWAY, self.point, 600, 600,
self.on_navigate, self.on_hide)
else:
self.view.update_popup(popup_text)
def get_type(self, type_list, qual_name):
filt_types = [t for t in type_list
if t.substr(self.view) == qual_name and t.region(self.view).contains(self.point)]
return Utils.head_of(filt_types)
def on_navigate(self, url):
if self.view.is_popup_visible():
self.view.hide_popup()
if url[0:4] == 'http':
webbrowser.open(url)
elif url[0:8] == 'autofix:':
rgn = symbols.Region.from_str(url[8:])
ParseOutput.MARKER_MANAGER.apply_autocorrect(self.view, rgn)
elif url[0:7] == "import:":
decl_name = urllib.parse.unquote(url[7:])
self.view.run_command('sublime_haskell_insert_import_for_symbol',
{'filename': self.view.file_name(),
'decl': decl_name})
else:
self.view.window().open_file(url, sublime.ENCODED_POSITION | sublime.TRANSIENT)
def on_hide(self):
self.shown = False
| en | 0.54856 | # Unused module variable: # style_header = "<style>" \ # "a { text-decoration: underline; }" \ # ".type { color: red; }" \ # ".tyvar { color: blue; }" \ # ".operator { color: green; }" \ # ".comment { color: gray; font-style: italic; }" \ # ".docs { color: gray; }" \ # "</style>" Loads and holds cache of scheme styles Also generates style header # Go through all styles and collect scope/foreground/fontStyle etc. # Prefer ST3 'sublime-color-scheme' JSON over older TextMate XML. # We are going to fill it next time # generate CSS style for each class # find scope or its parent in scheme # Found some scope, fill style class # Prefer ST3 'sublime-color-scheme' JSON attribute over the older TextMate-ish name # HTML style formatting ## print('hover: qualified symbol {0}'.format(qsymbol)) # TODO: Any ideas for popup about module? # Try get type of hovered symbol # Try whois # Decorate first word with style | 2.177699 | 2 |
modules/google_home_lights.py | artizanatweb/ghome-assistant | 0 | 8731 | <reponame>artizanatweb/ghome-assistant
#!/usr/bin/env python
# Copyright (C) 2017 Seeed Technology Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modules.pixel_ring import pixel_ring
import numpy
import time
import threading
try:
import queue as Queue
except ImportError:
import Queue as Queue
class GoogleHomeLights:
def __init__(self):
self.basis = numpy.array([0] * 4 * 12)
self.basis[0 * 4 + 0] = 2
self.basis[3 * 4 + 2] = 2
self.basis[6 * 4 + 1] = 1
self.basis[6 * 4 + 2] = 1
self.basis[9 * 4 + 1] = 2
self.pixels = self.basis * 0
self.write(self.pixels)
pixel_ring.write(0, [6, 0, 0, 0])
self.next = threading.Event()
self.queue = Queue.Queue()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
def wakeup(self, direction=0):
def f():
self._wakeup(direction)
self.queue.put(f)
def listen(self):
self.next.set()
self.queue.put(self._listen)
def think(self):
self.next.set()
self.queue.put(self._think)
def speak(self):
self.next.set()
self.queue.put(self._speak)
def off(self):
self.next.set()
self.queue.put(self._off)
def _run(self):
while True:
func = self.queue.get()
func()
def _wakeup(self, direction=0):
position = int((direction + 15) / 30) % 12
basis = numpy.roll(self.basis, position * 4)
for i in range(1, 25):
pixels = basis * i
self.write(pixels)
time.sleep(0.005)
pixels = numpy.roll(pixels, 4)
self.write(pixels)
time.sleep(0.1)
for i in range(2):
new_pixels = numpy.roll(pixels, 4)
self.write(new_pixels * 0.5 + pixels)
pixels = new_pixels
time.sleep(0.1)
self.write(pixels)
self.pixels = pixels
def _listen(self):
pixels = self.pixels
for i in range(1, 25):
self.write(pixels * i / 24)
time.sleep(0.01)
def _think(self):
pixels = self.pixels
self.next.clear()
while not self.next.is_set():
pixels = numpy.roll(pixels, 4)
self.write(pixels)
time.sleep(0.2)
t = 0.1
for i in range(0, 5):
pixels = numpy.roll(pixels, 4)
self.write(pixels * (4 - i) / 4)
time.sleep(t)
t /= 2
# time.sleep(0.5)
self.pixels = pixels
def _speak(self):
pixels = self.pixels
self.next.clear()
while not self.next.is_set():
for i in range(5, 25):
self.write(pixels * i / 24)
time.sleep(0.01)
time.sleep(0.3)
for i in range(24, 4, -1):
self.write(pixels * i / 24)
time.sleep(0.01)
time.sleep(0.3)
self._off()
def _off(self):
self.write([0] * 4 * 12)
def write(self, data):
if type(data) is list:
pixel_ring.write(3, data)
else:
pixel_ring.write(3, data.astype('uint8').tostring())
lights = GoogleHomeLights()
if __name__ == '__main__':
while True:
try:
lights.wakeup()
time.sleep(3)
lights.think()
time.sleep(3)
lights.speak()
time.sleep(3)
lights.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixel_ring.off() | #!/usr/bin/env python
# Copyright (C) 2017 Seeed Technology Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modules.pixel_ring import pixel_ring
import numpy
import time
import threading
try:
import queue as Queue
except ImportError:
import Queue as Queue
class GoogleHomeLights:
def __init__(self):
self.basis = numpy.array([0] * 4 * 12)
self.basis[0 * 4 + 0] = 2
self.basis[3 * 4 + 2] = 2
self.basis[6 * 4 + 1] = 1
self.basis[6 * 4 + 2] = 1
self.basis[9 * 4 + 1] = 2
self.pixels = self.basis * 0
self.write(self.pixels)
pixel_ring.write(0, [6, 0, 0, 0])
self.next = threading.Event()
self.queue = Queue.Queue()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
def wakeup(self, direction=0):
def f():
self._wakeup(direction)
self.queue.put(f)
def listen(self):
self.next.set()
self.queue.put(self._listen)
def think(self):
self.next.set()
self.queue.put(self._think)
def speak(self):
self.next.set()
self.queue.put(self._speak)
def off(self):
self.next.set()
self.queue.put(self._off)
def _run(self):
while True:
func = self.queue.get()
func()
def _wakeup(self, direction=0):
position = int((direction + 15) / 30) % 12
basis = numpy.roll(self.basis, position * 4)
for i in range(1, 25):
pixels = basis * i
self.write(pixels)
time.sleep(0.005)
pixels = numpy.roll(pixels, 4)
self.write(pixels)
time.sleep(0.1)
for i in range(2):
new_pixels = numpy.roll(pixels, 4)
self.write(new_pixels * 0.5 + pixels)
pixels = new_pixels
time.sleep(0.1)
self.write(pixels)
self.pixels = pixels
def _listen(self):
pixels = self.pixels
for i in range(1, 25):
self.write(pixels * i / 24)
time.sleep(0.01)
def _think(self):
pixels = self.pixels
self.next.clear()
while not self.next.is_set():
pixels = numpy.roll(pixels, 4)
self.write(pixels)
time.sleep(0.2)
t = 0.1
for i in range(0, 5):
pixels = numpy.roll(pixels, 4)
self.write(pixels * (4 - i) / 4)
time.sleep(t)
t /= 2
# time.sleep(0.5)
self.pixels = pixels
def _speak(self):
pixels = self.pixels
self.next.clear()
while not self.next.is_set():
for i in range(5, 25):
self.write(pixels * i / 24)
time.sleep(0.01)
time.sleep(0.3)
for i in range(24, 4, -1):
self.write(pixels * i / 24)
time.sleep(0.01)
time.sleep(0.3)
self._off()
def _off(self):
self.write([0] * 4 * 12)
def write(self, data):
if type(data) is list:
pixel_ring.write(3, data)
else:
pixel_ring.write(3, data.astype('uint8').tostring())
lights = GoogleHomeLights()
if __name__ == '__main__':
while True:
try:
lights.wakeup()
time.sleep(3)
lights.think()
time.sleep(3)
lights.speak()
time.sleep(3)
lights.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixel_ring.off() | en | 0.832492 | #!/usr/bin/env python # Copyright (C) 2017 Seeed Technology Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # time.sleep(0.5) | 2.753712 | 3 |
python/modules_packages_libraries/models/animal_kigdom/animals.py | aloa04/practice | 0 | 8732 | class Animal():
edad:int
patas:int
ruido:str
nombre: str
kgComida: float = 0
def __init__(self, edad, patas, ruido, nombre):
self.edad =edad
self.patas = patas
self.ruido = ruido
self.nombre = nombre
def comer(self, alimento):
self.kgComida += alimento
print('Hola,', self.nombre, 'comes', self.kgComida)
def hacerRuido(self):
print('Hola', self.nombre, 'haces' , self.ruido) | class Animal():
edad:int
patas:int
ruido:str
nombre: str
kgComida: float = 0
def __init__(self, edad, patas, ruido, nombre):
self.edad =edad
self.patas = patas
self.ruido = ruido
self.nombre = nombre
def comer(self, alimento):
self.kgComida += alimento
print('Hola,', self.nombre, 'comes', self.kgComida)
def hacerRuido(self):
print('Hola', self.nombre, 'haces' , self.ruido) | none | 1 | 3.424185 | 3 |
|
tensortools/optimize/mncp_hals.py | klmcguir/tensortools | 0 | 8733 | """
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
With support for missing data.
"""
import numpy as np
import scipy as sci
from scipy import linalg
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
from .._hals_update import _hals_update
def mncp_hals(X, rank, mask, random_state=None, init='rand', **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method. Supports missing data.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
A binary tensor with the same shape as ``X``. All entries equal to zero
correspond to held out or missing data in ``X``. All entries equal to
one correspond to observed entries in ``X`` and the decomposition is
fit to these datapoints.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and <NAME>. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
X = np.copy(X)
X[~mask] = np.linalg.norm(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X[mask].ravel())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
# First, HALS update.
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, p)
# Then, update masked elements.
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
resid = X - pred
result.update(linalg.norm(resid.ravel()) / normX)
# end optimization loop, return result.
return result.finalize() | """
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
With support for missing data.
"""
import numpy as np
import scipy as sci
from scipy import linalg
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
from .._hals_update import _hals_update
def mncp_hals(X, rank, mask, random_state=None, init='rand', **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method. Supports missing data.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
A binary tensor with the same shape as ``X``. All entries equal to zero
correspond to held out or missing data in ``X``. All entries equal to
one correspond to observed entries in ``X`` and the decomposition is
fit to these datapoints.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and <NAME>. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
X = np.copy(X)
X[~mask] = np.linalg.norm(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X[mask].ravel())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
# First, HALS update.
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, p)
# Then, update masked elements.
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
resid = X - pred
result.update(linalg.norm(resid.ravel()) / normX)
# end optimization loop, return result.
return result.finalize() | en | 0.530111 | Nonnegative CP decomposition by Hierarchical alternating least squares (HALS). With support for missing data. Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least Squares (HALS) Method. Supports missing data. Parameters ---------- X : (I_1, ..., I_N) array_like A real array with nonnegative entries and ``X.ndim >= 3``. rank : integer The `rank` sets the number of components to be computed. mask : (I_1, ..., I_N) array_like A binary tensor with the same shape as ``X``. All entries equal to zero correspond to held out or missing data in ``X``. All entries equal to one correspond to observed entries in ``X`` and the decomposition is fit to these datapoints. random_state : integer, RandomState instance or None, optional (default ``None``) If integer, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. init : str, or KTensor, optional (default ``'rand'``). Specifies initial guess for KTensor factor matrices. If ``'randn'``, Gaussian random numbers are used to initialize. If ``'rand'``, uniform random numbers are used to initialize. If KTensor instance, a copy is made to initialize the optimization. options : dict, specifying fitting options. tol : float, optional (default ``tol=1E-5``) Stopping tolerance for reconstruction error. max_iter : integer, optional (default ``max_iter = 500``) Maximum number of iterations to perform before exiting. min_iter : integer, optional (default ``min_iter = 1``) Minimum number of iterations to perform before exiting. max_time : integer, optional (default ``max_time = np.inf``) Maximum computational time before exiting. verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``) Display progress. Returns ------- result : FitResult instance Object which holds the fitted results. It provides the factor matrices in form of a KTensor, ``result.factors``. Notes ----- This implemenation is using the Hierarcial Alternating Least Squares Method. References ---------- Cichocki, Andrzej, and <NAME>. "Fast local algorithms for large scale nonnegative matrix and tensor factorizations." IEICE transactions on fundamentals of electronics, communications and computer sciences 92.3: 708-721, 2009. Examples -------- # Mask missing elements. # Check inputs. # Initialize problem. # Store problem dimensions. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Iterate the HALS algorithm until convergence or maxiter is reached # i) compute the N gram matrices and multiply # ii) Compute Khatri-Rao product # iii) Update component U_1, U_2, ... U_N # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # First, HALS update. # Select all components, but U_n # i) compute the N-1 gram matrices # ii) Compute Khatri-Rao product # iii) Update component U_n # Then, update masked elements. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Update the optimization result, checks for convergence. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Compute objective function # grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1]) # obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX # end optimization loop, return result. | 2.508288 | 3 |
raredecay/tools/data_tools.py | jonas-eschle/raredecay | 7 | 8734 | """
@author: <NAME> "Mayou36"
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
Contains several tools to convert, load, save and plot data
"""
import warnings
import os
import copy
import pandas as pd
import numpy as np
import uproot
import pickle
from . import dev_tool
# both produce error (27.07.2016) when importing them if run from main.py.
# No problem when run as main...
# from raredecay.tools import dev_tool
from .. import meta_config as meta_cfg
def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):
"""Search for best cut on value to still keep percent_sig_to_keep of signal
Parameters
----------
signal_data : 1-D numpy array
The signal
bkg_data : 1-D numpy array
The background data
percent_sig_to_keep : 0 < float <= 100
What percentage of the data to keep in order to apply the cuts.
"""
# if percent_sig_to_keep < 100:
# raise NotImplementedError("percentage of < 100 not yet imlemented")
percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep
bkg_length_before = len(bkg_data)
bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length
lower_cut, upper_cut = np.percentile(signal_data, percentile)
cut_bkg = np.count_nonzero(
np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut)
)
rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length
return [lower_cut, upper_cut], rejected_bkg
def make_root_dict(path_to_rootfile, tree_name, branches):
"""Returns a root_numpy compatible "root-dict" of a root-tree.
Parameters
----------
path_to_rootfile : str
The exact path to the root-tree including the filename. Example:
/home/user1/data/myRootTree1.root
tree_name : str
The name of the tree
branches : str or list[str, str, str,... ]
The branches of the tree to use
"""
output = dict(filenames=path_to_rootfile, treename=tree_name, branches=branches)
output = dev_tool.entries_to_str(output)
return output
def add_to_rootfile(rootfile, new_branch, branch_name=None, overwrite=True):
"""Adds a new branch to a given root file.
.. warning:: Overwrite not working currently!
Parameters
----------
rootfile : root-dict
The ROOT-file where the data should be added
new_branch : numpy.array 1-D, list, root-dict
A one-dimensional numpy array that contains the data.
branch_name : str
The name of the branche resp. the name in the dtype of the array.
"""
from root_numpy import array2root
from rootpy.io import root_open
rootfile = dev_tool.entries_to_str(rootfile)
new_branch = dev_tool.entries_to_str(new_branch)
branch_name = dev_tool.entries_to_str(branch_name)
# get the right parameters
# TODO: what does that if there? an assertion maybe?
write_mode = "update"
branch_name = "new_branch1" if branch_name is None else branch_name
if isinstance(rootfile, dict):
filename = rootfile.get("filenames")
treename = rootfile.get("treename")
new_branch = to_ndarray(new_branch)
# new_branch.dtype = [(branch_name, 'f8')]
# write to ROOT-file
write_to_root = False
if os.path.isfile(filename):
with root_open(filename, mode="a") as root_file:
tree = getattr(root_file, treename) # test
if not tree.has_branch(branch_name):
write_to_root = True
# array2tree(new_branch, tree=tree)
# f.write("", TObject.kOverwrite) # overwrite, does not create friends
else:
write_mode = "recreate"
write_to_root = True
if write_to_root:
arr = np.core.records.fromarrays([new_branch], names=branch_name)
array2root(arr=arr, filename=filename, treename=treename, mode=write_mode)
return 0
else:
return 1
# TODO: remove? outdated
def format_data_weights(data_to_shape, weights):
"""Format the data and the weights perfectly. Same length and more.
Change the data to pandas.DataFrame and fill the weights with ones where
nothing or None is specified. Returns both in lists.
Very useful to loop over several data and weights.
Parameters
----------
data_to_shape : (root_dict, numpy.array, pandas.DataFrame)
The data for which we apply the weights. Usual 2-D shape.
weights : (list, numpy.array, pandas.DataFrame, None)
The weights to be reshaped
*Best format* :
[array(weights),array(weights), None, array(weights),...]
*None* can be used if no special weights are specified.
If weights contains less "weight-containing array-like objects" then
data_to_shape does, the difference will be filled with *1*
Return
------
out : list(pandas.DataFrame(data), pandas.DataFrame(data),...)
Return a list containing data
out : list(numpy.array(weight), numpy.array(weight),...)
Return a list with the weights, converted and filled.
"""
# conver the data
if not isinstance(data_to_shape, list):
data_to_shape = [data_to_shape]
data_to_shape = list(map(to_pandas, data_to_shape))
# convert the weights
if not isinstance(weights, list):
weights = [weights]
if weights[0] is not None:
if len(weights[0]) == 1:
weights = [weights]
# convert to pandas
assert isinstance(weights, list), "weights could not be converted to list"
for data_id, data in enumerate(data_to_shape):
if data_id >= len(weights):
weights.append(None)
if weights[data_id] is None:
weights[data_id] = np.array([1] * len(data))
weights[data_id] = to_pandas(weights[data_id]).squeeze().values
return data_to_shape, weights
def obj_to_string(objects, separator=None):
"""Return a string containing all objects as strings, separated by the separator.
Useful for automatic conversion for different types. The following objects
will automatically be converted:
- None will be omitted
Parameters
----------
objects : any object or list(obj, obj, ...) with a string representation
The objects will be converted to a string and concatenated, separated
by the separator.
separator : str
The separator between the objects. Default is " - ".
"""
objects = dev_tool.entries_to_str(objects)
if isinstance(objects, str): # no need to change things
return objects
separator = " - " if separator is None else separator
assert isinstance(separator, str), "Separator not a str"
objects = to_list(objects)
objects = [str(obj) for obj in objects if obj not in (None, "")] # remove Nones
string_out = ""
for word in objects:
string_out += word + separator if word != objects[-1] else word
return string_out
def is_root(data_to_check):
"""Check whether a given data is a root file. Needs dicts to be True."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, dict):
path_name = data_to_check.get("filenames")
# assert isinstance(path_name, str), ("'filenames' of the dictionary " +
# str(data_to_check) + "is not a string")
if path_name.endswith(meta_cfg.ROOT_DATATYPE):
flag = True
return flag
def is_list(data_to_check):
"""Check whether the given data is a list."""
flag = False
if isinstance(data_to_check, list):
flag = True
return flag
def is_ndarray(data_to_check):
"""Check whether a given data is an ndarray."""
flag = False
if isinstance(data_to_check, np.ndarray):
flag = True
return flag
def is_pickle(data_to_check):
"""Check if the file is a pickled file (checks the ending)."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, str):
if data_to_check.endswith(meta_cfg.PICKLE_DATATYPE):
flag = True
return flag
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
def to_ndarray(data_in, float_array=False):
"""Convert data to numpy array (containing only floats).
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
import uproot
if is_root(data_in):
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
branches = to_list(data_in["branches"])
loaded = tree.arrays(branches, library="np")
loaded = np.stack([loaded[branch] for branch in branches])
if len(branches) == 1:
loaded = loaded[0]
data_in = loaded
# change numpy.void to normal floats
if isinstance(data_in, (pd.Series, pd.DataFrame)):
test_sample = data_in.iloc[0]
else:
test_sample = data_in[0]
if isinstance(test_sample, np.void):
data_in = np.array([val[0] for val in data_in])
if isinstance(data_in, (np.recarray, np.ndarray)):
data_in = data_in.tolist()
if is_list(data_in) or isinstance(data_in, pd.Series):
data_in = np.array(data_in)
if not isinstance(data_in[0], (int, float, str, bool)):
if float_array:
iter_data = copy.deepcopy(data_in)
# HACK
data_in = np.ndarray(shape=len(data_in), dtype=data_in.dtype)
# HACK END
for i, element in enumerate(iter_data):
if not isinstance(element, (int, float, str, bool)):
# does that work or should we iterate over copy?
try:
element_len = len(element)
except TypeError:
element_len = 1
if element_len > 1:
data_in[i] = to_ndarray(element)
float_array = False
elif element_len == 1:
data_in[i] = float(element)
warnings.warn("Could not force float array")
if float_array:
data_in = np.asfarray(data_in)
assert is_ndarray(data_in), "Error, could not convert data to numpy array"
return data_in
def to_pandas_old(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
# TODO: generalize
root_index_name = "__index__"
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
root_index = None
import root_numpy
if root_index_name in root_numpy.list_branches(
filename=data_in["filenames"], treename=data_in.get("treename")
):
root_index = root_numpy.root2array(
filenames=data_in["filenames"],
treename=data_in.get("treename"),
selection=data_in.get("selection"),
branches=root_index_name,
)
data_in = root_numpy.root2array(**data_in) # why **? it's a root dict
if is_list(data_in):
data_in = np.array(data_in)
if is_ndarray(data_in):
if (isinstance(columns, (list, tuple)) and len(columns) == 1) or isinstance(
columns, str
):
data_in = to_ndarray(data_in)
data_in = pd.DataFrame(data_in, columns=columns, index=root_index)
if index is not None:
data_in = data_in.loc[index]
elif isinstance(data_in, pd.DataFrame):
pass
else:
raise TypeError("Could not convert data to pandas. Data: " + data_in)
return data_in
def to_pandas(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
if columns is None:
columns = data_in["branches"]
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
if "__index__" in tree.keys(): # legacy, we can also convert this
return to_pandas_old(data_in=data_in, index=index, columns=columns)
branches = to_list(columns)
loaded = tree.arrays(branches, library="pd")
if index is not None:
loaded = loaded.loc[index]
return loaded
else:
# HACK START
return to_pandas_old(data_in=data_in, index=index, columns=columns)
# HACK END
# from root_pandas import read_root
#
# root_pandas_numpy_map = dict(filenames='paths', treename='key', branches='columns',
# selection='where')
#
# if is_root(data_in):
# is_root2array = False
# for key, val in copy.deepcopy(list(data_in.items())):
# if key in root_pandas_numpy_map:
# is_root2array = True
# del data_in[key]
# data_in[root_pandas_numpy_map[key]] = val
# data_in['columns'] = to_list(data_in['columns'])
# if is_root2array:
# data_in['columns'] = ['noexpand:'+col for col in data_in['columns'] if not col.startswith('noexpand:')]
# remove the noexpand:
# data_in = read_root(**data_in) # why **? it's a root dict
# if is_list(data_in):
# data_in = np.array(data_in)
# if is_ndarray(data_in):
# if ((isinstance(columns, (list, tuple)) and len(columns) == 1) or
# isinstance(columns, string)):
#
# data_in = to_ndarray(data_in)
# data_in = pd.DataFrame(data_in, columns=columns)
# if index is not None:
# data_in = data_in.loc[index]
# elif isinstance(data_in, pd.DataFrame):
# pass
# else:
# raise TypeError("Could not convert data to pandas. Data: " + data_in)
# return data_in
def adv_return(return_value, save_name=None):
"""Save the value if save_name specified, otherwise just return input.
Can be wrapped around the return value. Without any arguments, the return
of your function will be exactly the same. With arguments, the value can
be saved (**pickled**) before it is returned.
Parameters
----------
return_value : any python object
The python object which should be pickled.
save_name : str, None
| The (file-)name for the pickled file. File-extension will be added \
automatically if specified in *raredecay.meta_config*.
| If *None* is passed, the object won't be pickled.
Return
------
out : python object
Return return_value without changes.
**Usage**:
Instead of a simple return statement
>>> return my_variable/my_object
one can use the **completely equivalent** statement
>>> return adv_return(my_variable/my_object)
If the return value should be saved in addition to be returned, use
>>> return adv_return(my_variable/my_object, save_name='my_object.pickle')
(*the .pickle ending is not required but added automatically if omitted*)
which returns the value and saves it.
"""
save_name = dev_tool.entries_to_str(save_name)
if save_name not in (None, False):
if isinstance(save_name, str):
save_name = meta_cfg.PICKLE_PATH + save_name
if not is_pickle(save_name):
save_name += "." + meta_cfg.PICKLE_DATATYPE
with open(str(save_name), "wb") as f:
pickle.dump(return_value, f, meta_cfg.PICKLE_PROTOCOL)
print(str(return_value) + " pickled to " + save_name)
else:
pass
# HACK how to solve logger problem?
# logger.error("Could not pickle data, name for file (" +
# str(save_name) + ") is not a string!" +
# "\n Therefore, the following data was only returned" +
# " but not saved! \n Data:" + str(return_value))
return return_value
def try_unpickle(file_to_unpickle, use_metapath_bkwcomp=False):
"""Try to unpickle a file and return, otherwise just return input."""
file_to_unpickle = dev_tool.entries_to_str(file_to_unpickle)
if is_pickle(file_to_unpickle):
extra_path = meta_cfg.PICKLE_PATH if use_metapath_bkwcomp else ""
with open(extra_path + file_to_unpickle, "rb") as f:
file_to_unpickle = pickle.load(f)
return file_to_unpickle
| """
@author: <NAME> "Mayou36"
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
Contains several tools to convert, load, save and plot data
"""
import warnings
import os
import copy
import pandas as pd
import numpy as np
import uproot
import pickle
from . import dev_tool
# both produce error (27.07.2016) when importing them if run from main.py.
# No problem when run as main...
# from raredecay.tools import dev_tool
from .. import meta_config as meta_cfg
def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):
"""Search for best cut on value to still keep percent_sig_to_keep of signal
Parameters
----------
signal_data : 1-D numpy array
The signal
bkg_data : 1-D numpy array
The background data
percent_sig_to_keep : 0 < float <= 100
What percentage of the data to keep in order to apply the cuts.
"""
# if percent_sig_to_keep < 100:
# raise NotImplementedError("percentage of < 100 not yet imlemented")
percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep
bkg_length_before = len(bkg_data)
bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length
lower_cut, upper_cut = np.percentile(signal_data, percentile)
cut_bkg = np.count_nonzero(
np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut)
)
rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length
return [lower_cut, upper_cut], rejected_bkg
def make_root_dict(path_to_rootfile, tree_name, branches):
"""Returns a root_numpy compatible "root-dict" of a root-tree.
Parameters
----------
path_to_rootfile : str
The exact path to the root-tree including the filename. Example:
/home/user1/data/myRootTree1.root
tree_name : str
The name of the tree
branches : str or list[str, str, str,... ]
The branches of the tree to use
"""
output = dict(filenames=path_to_rootfile, treename=tree_name, branches=branches)
output = dev_tool.entries_to_str(output)
return output
def add_to_rootfile(rootfile, new_branch, branch_name=None, overwrite=True):
"""Adds a new branch to a given root file.
.. warning:: Overwrite not working currently!
Parameters
----------
rootfile : root-dict
The ROOT-file where the data should be added
new_branch : numpy.array 1-D, list, root-dict
A one-dimensional numpy array that contains the data.
branch_name : str
The name of the branche resp. the name in the dtype of the array.
"""
from root_numpy import array2root
from rootpy.io import root_open
rootfile = dev_tool.entries_to_str(rootfile)
new_branch = dev_tool.entries_to_str(new_branch)
branch_name = dev_tool.entries_to_str(branch_name)
# get the right parameters
# TODO: what does that if there? an assertion maybe?
write_mode = "update"
branch_name = "new_branch1" if branch_name is None else branch_name
if isinstance(rootfile, dict):
filename = rootfile.get("filenames")
treename = rootfile.get("treename")
new_branch = to_ndarray(new_branch)
# new_branch.dtype = [(branch_name, 'f8')]
# write to ROOT-file
write_to_root = False
if os.path.isfile(filename):
with root_open(filename, mode="a") as root_file:
tree = getattr(root_file, treename) # test
if not tree.has_branch(branch_name):
write_to_root = True
# array2tree(new_branch, tree=tree)
# f.write("", TObject.kOverwrite) # overwrite, does not create friends
else:
write_mode = "recreate"
write_to_root = True
if write_to_root:
arr = np.core.records.fromarrays([new_branch], names=branch_name)
array2root(arr=arr, filename=filename, treename=treename, mode=write_mode)
return 0
else:
return 1
# TODO: remove? outdated
def format_data_weights(data_to_shape, weights):
"""Format the data and the weights perfectly. Same length and more.
Change the data to pandas.DataFrame and fill the weights with ones where
nothing or None is specified. Returns both in lists.
Very useful to loop over several data and weights.
Parameters
----------
data_to_shape : (root_dict, numpy.array, pandas.DataFrame)
The data for which we apply the weights. Usual 2-D shape.
weights : (list, numpy.array, pandas.DataFrame, None)
The weights to be reshaped
*Best format* :
[array(weights),array(weights), None, array(weights),...]
*None* can be used if no special weights are specified.
If weights contains less "weight-containing array-like objects" then
data_to_shape does, the difference will be filled with *1*
Return
------
out : list(pandas.DataFrame(data), pandas.DataFrame(data),...)
Return a list containing data
out : list(numpy.array(weight), numpy.array(weight),...)
Return a list with the weights, converted and filled.
"""
# conver the data
if not isinstance(data_to_shape, list):
data_to_shape = [data_to_shape]
data_to_shape = list(map(to_pandas, data_to_shape))
# convert the weights
if not isinstance(weights, list):
weights = [weights]
if weights[0] is not None:
if len(weights[0]) == 1:
weights = [weights]
# convert to pandas
assert isinstance(weights, list), "weights could not be converted to list"
for data_id, data in enumerate(data_to_shape):
if data_id >= len(weights):
weights.append(None)
if weights[data_id] is None:
weights[data_id] = np.array([1] * len(data))
weights[data_id] = to_pandas(weights[data_id]).squeeze().values
return data_to_shape, weights
def obj_to_string(objects, separator=None):
"""Return a string containing all objects as strings, separated by the separator.
Useful for automatic conversion for different types. The following objects
will automatically be converted:
- None will be omitted
Parameters
----------
objects : any object or list(obj, obj, ...) with a string representation
The objects will be converted to a string and concatenated, separated
by the separator.
separator : str
The separator between the objects. Default is " - ".
"""
objects = dev_tool.entries_to_str(objects)
if isinstance(objects, str): # no need to change things
return objects
separator = " - " if separator is None else separator
assert isinstance(separator, str), "Separator not a str"
objects = to_list(objects)
objects = [str(obj) for obj in objects if obj not in (None, "")] # remove Nones
string_out = ""
for word in objects:
string_out += word + separator if word != objects[-1] else word
return string_out
def is_root(data_to_check):
"""Check whether a given data is a root file. Needs dicts to be True."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, dict):
path_name = data_to_check.get("filenames")
# assert isinstance(path_name, str), ("'filenames' of the dictionary " +
# str(data_to_check) + "is not a string")
if path_name.endswith(meta_cfg.ROOT_DATATYPE):
flag = True
return flag
def is_list(data_to_check):
"""Check whether the given data is a list."""
flag = False
if isinstance(data_to_check, list):
flag = True
return flag
def is_ndarray(data_to_check):
"""Check whether a given data is an ndarray."""
flag = False
if isinstance(data_to_check, np.ndarray):
flag = True
return flag
def is_pickle(data_to_check):
"""Check if the file is a pickled file (checks the ending)."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, str):
if data_to_check.endswith(meta_cfg.PICKLE_DATATYPE):
flag = True
return flag
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
def to_ndarray(data_in, float_array=False):
"""Convert data to numpy array (containing only floats).
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
import uproot
if is_root(data_in):
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
branches = to_list(data_in["branches"])
loaded = tree.arrays(branches, library="np")
loaded = np.stack([loaded[branch] for branch in branches])
if len(branches) == 1:
loaded = loaded[0]
data_in = loaded
# change numpy.void to normal floats
if isinstance(data_in, (pd.Series, pd.DataFrame)):
test_sample = data_in.iloc[0]
else:
test_sample = data_in[0]
if isinstance(test_sample, np.void):
data_in = np.array([val[0] for val in data_in])
if isinstance(data_in, (np.recarray, np.ndarray)):
data_in = data_in.tolist()
if is_list(data_in) or isinstance(data_in, pd.Series):
data_in = np.array(data_in)
if not isinstance(data_in[0], (int, float, str, bool)):
if float_array:
iter_data = copy.deepcopy(data_in)
# HACK
data_in = np.ndarray(shape=len(data_in), dtype=data_in.dtype)
# HACK END
for i, element in enumerate(iter_data):
if not isinstance(element, (int, float, str, bool)):
# does that work or should we iterate over copy?
try:
element_len = len(element)
except TypeError:
element_len = 1
if element_len > 1:
data_in[i] = to_ndarray(element)
float_array = False
elif element_len == 1:
data_in[i] = float(element)
warnings.warn("Could not force float array")
if float_array:
data_in = np.asfarray(data_in)
assert is_ndarray(data_in), "Error, could not convert data to numpy array"
return data_in
def to_pandas_old(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
# TODO: generalize
root_index_name = "__index__"
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
root_index = None
import root_numpy
if root_index_name in root_numpy.list_branches(
filename=data_in["filenames"], treename=data_in.get("treename")
):
root_index = root_numpy.root2array(
filenames=data_in["filenames"],
treename=data_in.get("treename"),
selection=data_in.get("selection"),
branches=root_index_name,
)
data_in = root_numpy.root2array(**data_in) # why **? it's a root dict
if is_list(data_in):
data_in = np.array(data_in)
if is_ndarray(data_in):
if (isinstance(columns, (list, tuple)) and len(columns) == 1) or isinstance(
columns, str
):
data_in = to_ndarray(data_in)
data_in = pd.DataFrame(data_in, columns=columns, index=root_index)
if index is not None:
data_in = data_in.loc[index]
elif isinstance(data_in, pd.DataFrame):
pass
else:
raise TypeError("Could not convert data to pandas. Data: " + data_in)
return data_in
def to_pandas(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
if columns is None:
columns = data_in["branches"]
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
if "__index__" in tree.keys(): # legacy, we can also convert this
return to_pandas_old(data_in=data_in, index=index, columns=columns)
branches = to_list(columns)
loaded = tree.arrays(branches, library="pd")
if index is not None:
loaded = loaded.loc[index]
return loaded
else:
# HACK START
return to_pandas_old(data_in=data_in, index=index, columns=columns)
# HACK END
# from root_pandas import read_root
#
# root_pandas_numpy_map = dict(filenames='paths', treename='key', branches='columns',
# selection='where')
#
# if is_root(data_in):
# is_root2array = False
# for key, val in copy.deepcopy(list(data_in.items())):
# if key in root_pandas_numpy_map:
# is_root2array = True
# del data_in[key]
# data_in[root_pandas_numpy_map[key]] = val
# data_in['columns'] = to_list(data_in['columns'])
# if is_root2array:
# data_in['columns'] = ['noexpand:'+col for col in data_in['columns'] if not col.startswith('noexpand:')]
# remove the noexpand:
# data_in = read_root(**data_in) # why **? it's a root dict
# if is_list(data_in):
# data_in = np.array(data_in)
# if is_ndarray(data_in):
# if ((isinstance(columns, (list, tuple)) and len(columns) == 1) or
# isinstance(columns, string)):
#
# data_in = to_ndarray(data_in)
# data_in = pd.DataFrame(data_in, columns=columns)
# if index is not None:
# data_in = data_in.loc[index]
# elif isinstance(data_in, pd.DataFrame):
# pass
# else:
# raise TypeError("Could not convert data to pandas. Data: " + data_in)
# return data_in
def adv_return(return_value, save_name=None):
"""Save the value if save_name specified, otherwise just return input.
Can be wrapped around the return value. Without any arguments, the return
of your function will be exactly the same. With arguments, the value can
be saved (**pickled**) before it is returned.
Parameters
----------
return_value : any python object
The python object which should be pickled.
save_name : str, None
| The (file-)name for the pickled file. File-extension will be added \
automatically if specified in *raredecay.meta_config*.
| If *None* is passed, the object won't be pickled.
Return
------
out : python object
Return return_value without changes.
**Usage**:
Instead of a simple return statement
>>> return my_variable/my_object
one can use the **completely equivalent** statement
>>> return adv_return(my_variable/my_object)
If the return value should be saved in addition to be returned, use
>>> return adv_return(my_variable/my_object, save_name='my_object.pickle')
(*the .pickle ending is not required but added automatically if omitted*)
which returns the value and saves it.
"""
save_name = dev_tool.entries_to_str(save_name)
if save_name not in (None, False):
if isinstance(save_name, str):
save_name = meta_cfg.PICKLE_PATH + save_name
if not is_pickle(save_name):
save_name += "." + meta_cfg.PICKLE_DATATYPE
with open(str(save_name), "wb") as f:
pickle.dump(return_value, f, meta_cfg.PICKLE_PROTOCOL)
print(str(return_value) + " pickled to " + save_name)
else:
pass
# HACK how to solve logger problem?
# logger.error("Could not pickle data, name for file (" +
# str(save_name) + ") is not a string!" +
# "\n Therefore, the following data was only returned" +
# " but not saved! \n Data:" + str(return_value))
return return_value
def try_unpickle(file_to_unpickle, use_metapath_bkwcomp=False):
"""Try to unpickle a file and return, otherwise just return input."""
file_to_unpickle = dev_tool.entries_to_str(file_to_unpickle)
if is_pickle(file_to_unpickle):
extra_path = meta_cfg.PICKLE_PATH if use_metapath_bkwcomp else ""
with open(extra_path + file_to_unpickle, "rb") as f:
file_to_unpickle = pickle.load(f)
return file_to_unpickle
| en | 0.643366 | @author: <NAME> "Mayou36" DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED! Contains several tools to convert, load, save and plot data # both produce error (27.07.2016) when importing them if run from main.py. # No problem when run as main... # from raredecay.tools import dev_tool Search for best cut on value to still keep percent_sig_to_keep of signal Parameters ---------- signal_data : 1-D numpy array The signal bkg_data : 1-D numpy array The background data percent_sig_to_keep : 0 < float <= 100 What percentage of the data to keep in order to apply the cuts. # if percent_sig_to_keep < 100: # raise NotImplementedError("percentage of < 100 not yet imlemented") # TODO: modify for percent_sig_to_keep Returns a root_numpy compatible "root-dict" of a root-tree. Parameters ---------- path_to_rootfile : str The exact path to the root-tree including the filename. Example: /home/user1/data/myRootTree1.root tree_name : str The name of the tree branches : str or list[str, str, str,... ] The branches of the tree to use Adds a new branch to a given root file. .. warning:: Overwrite not working currently! Parameters ---------- rootfile : root-dict The ROOT-file where the data should be added new_branch : numpy.array 1-D, list, root-dict A one-dimensional numpy array that contains the data. branch_name : str The name of the branche resp. the name in the dtype of the array. # get the right parameters # TODO: what does that if there? an assertion maybe? # new_branch.dtype = [(branch_name, 'f8')] # write to ROOT-file # test # array2tree(new_branch, tree=tree) # f.write("", TObject.kOverwrite) # overwrite, does not create friends # TODO: remove? outdated Format the data and the weights perfectly. Same length and more. Change the data to pandas.DataFrame and fill the weights with ones where nothing or None is specified. Returns both in lists. Very useful to loop over several data and weights. Parameters ---------- data_to_shape : (root_dict, numpy.array, pandas.DataFrame) The data for which we apply the weights. Usual 2-D shape. weights : (list, numpy.array, pandas.DataFrame, None) The weights to be reshaped *Best format* : [array(weights),array(weights), None, array(weights),...] *None* can be used if no special weights are specified. If weights contains less "weight-containing array-like objects" then data_to_shape does, the difference will be filled with *1* Return ------ out : list(pandas.DataFrame(data), pandas.DataFrame(data),...) Return a list containing data out : list(numpy.array(weight), numpy.array(weight),...) Return a list with the weights, converted and filled. # conver the data # convert the weights # convert to pandas Return a string containing all objects as strings, separated by the separator. Useful for automatic conversion for different types. The following objects will automatically be converted: - None will be omitted Parameters ---------- objects : any object or list(obj, obj, ...) with a string representation The objects will be converted to a string and concatenated, separated by the separator. separator : str The separator between the objects. Default is " - ". # no need to change things # remove Nones Check whether a given data is a root file. Needs dicts to be True. # assert isinstance(path_name, str), ("'filenames' of the dictionary " + # str(data_to_check) + "is not a string") Check whether the given data is a list. Check whether a given data is an ndarray. Check if the file is a pickled file (checks the ending). Convert the data into a list. Does not pack lists into a new one. If your input is, for example, a string or a list of strings, or a tuple filled with strings, you have, in general, a problem: - just iterate through the object will fail because it iterates through the characters of the string. - using list(obj) converts the tuple, leaves the list but splits the strings characters into single elements of a new list. - using [obj] creates a list containing a string, but also a list containing a list or a tuple, which you did not want to. Solution: use to_list(obj), which creates a new list in case the object is a single object (a string is a single object in this sence) or converts to a list if the object is already a container for several objects. Parameters ---------- data_in : any obj So far, any object can be entered. Returns ------- out : list Return a list containing the object or the object converted to a list. Convert data to numpy array (containing only floats). Parameters ---------- data_in : any reasonable data The data to be converted # change numpy.void to normal floats # HACK # HACK END # does that work or should we iterate over copy? Convert data from numpy or root to pandas dataframe. Convert data safely to pandas, whatever the format is. Parameters ---------- data_in : any reasonable data The data to be converted # TODO: generalize # why **? it's a root dict Convert data from numpy or root to pandas dataframe. Convert data safely to pandas, whatever the format is. Parameters ---------- data_in : any reasonable data The data to be converted # legacy, we can also convert this # HACK START # HACK END # from root_pandas import read_root # # root_pandas_numpy_map = dict(filenames='paths', treename='key', branches='columns', # selection='where') # # if is_root(data_in): # is_root2array = False # for key, val in copy.deepcopy(list(data_in.items())): # if key in root_pandas_numpy_map: # is_root2array = True # del data_in[key] # data_in[root_pandas_numpy_map[key]] = val # data_in['columns'] = to_list(data_in['columns']) # if is_root2array: # data_in['columns'] = ['noexpand:'+col for col in data_in['columns'] if not col.startswith('noexpand:')] # remove the noexpand: # data_in = read_root(**data_in) # why **? it's a root dict # if is_list(data_in): # data_in = np.array(data_in) # if is_ndarray(data_in): # if ((isinstance(columns, (list, tuple)) and len(columns) == 1) or # isinstance(columns, string)): # # data_in = to_ndarray(data_in) # data_in = pd.DataFrame(data_in, columns=columns) # if index is not None: # data_in = data_in.loc[index] # elif isinstance(data_in, pd.DataFrame): # pass # else: # raise TypeError("Could not convert data to pandas. Data: " + data_in) # return data_in Save the value if save_name specified, otherwise just return input. Can be wrapped around the return value. Without any arguments, the return of your function will be exactly the same. With arguments, the value can be saved (**pickled**) before it is returned. Parameters ---------- return_value : any python object The python object which should be pickled. save_name : str, None | The (file-)name for the pickled file. File-extension will be added \ automatically if specified in *raredecay.meta_config*. | If *None* is passed, the object won't be pickled. Return ------ out : python object Return return_value without changes. **Usage**: Instead of a simple return statement >>> return my_variable/my_object one can use the **completely equivalent** statement >>> return adv_return(my_variable/my_object) If the return value should be saved in addition to be returned, use >>> return adv_return(my_variable/my_object, save_name='my_object.pickle') (*the .pickle ending is not required but added automatically if omitted*) which returns the value and saves it. # HACK how to solve logger problem? # logger.error("Could not pickle data, name for file (" + # str(save_name) + ") is not a string!" + # "\n Therefore, the following data was only returned" + # " but not saved! \n Data:" + str(return_value)) Try to unpickle a file and return, otherwise just return input. | 1.916833 | 2 |
toontown/coghq/boardbothq/BoardOfficeManagerAI.py | LittleNed/toontown-stride | 1 | 8735 | <reponame>LittleNed/toontown-stride<filename>toontown/coghq/boardbothq/BoardOfficeManagerAI.py
from direct.directnotify import DirectNotifyGlobal
import DistributedBoardOfficeAI
from toontown.toonbase import ToontownGlobals
from toontown.coghq.boardbothq import BoardOfficeLayout
from direct.showbase import DirectObject
import random
class BoardOfficeManagerAI(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('BoardOfficeManagerAI')
boardofficeId = None
def __init__(self, air):
DirectObject.DirectObject.__init__(self)
self.air = air
def getDoId(self):
return 0
def createBoardOffice(self, boardofficeId, players):
for avId in players:
if bboard.has('boardofficeId-%s' % avId):
boardofficeId = bboard.get('boardofficeId-%s' % avId)
break
numFloors = ToontownGlobals.BoardOfficeNumFloors[boardofficeId]
floor = random.randrange(numFloors)
for avId in players:
if bboard.has('mintFloor-%s' % avId):
floor = bboard.get('mintFloor-%s' % avId)
floor = max(0, floor)
floor = min(floor, numFloors - 1)
break
for avId in players:
if bboard.has('mintRoom-%s' % avId):
roomId = bboard.get('mintRoom-%s' % avId)
for i in xrange(numFloors):
layout = BoardOfficeLayout.BoardOfficeLayout(boardofficeId, i)
if roomId in layout.getRoomIds():
floor = i
else:
from toontown.coghq.boardbothq import BoardOfficeRoomSpecs
roomName = BoardOfficeRoomSpecs.BoardOfficeRoomId2RoomName[roomId]
BoardOfficeManagerAI.notify.warning('room %s (%s) not found in any floor of mint %s' % (roomId, roomName, boardofficeId))
mintZone = self.air.allocateZone()
mint = DistributedBoardOfficeAI.DistributedBoardOfficeAI(self.air, boardofficeId, mintZone, floor, players)
mint.generateWithRequired(mintZone)
return mintZone
| from direct.directnotify import DirectNotifyGlobal
import DistributedBoardOfficeAI
from toontown.toonbase import ToontownGlobals
from toontown.coghq.boardbothq import BoardOfficeLayout
from direct.showbase import DirectObject
import random
class BoardOfficeManagerAI(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('BoardOfficeManagerAI')
boardofficeId = None
def __init__(self, air):
DirectObject.DirectObject.__init__(self)
self.air = air
def getDoId(self):
return 0
def createBoardOffice(self, boardofficeId, players):
for avId in players:
if bboard.has('boardofficeId-%s' % avId):
boardofficeId = bboard.get('boardofficeId-%s' % avId)
break
numFloors = ToontownGlobals.BoardOfficeNumFloors[boardofficeId]
floor = random.randrange(numFloors)
for avId in players:
if bboard.has('mintFloor-%s' % avId):
floor = bboard.get('mintFloor-%s' % avId)
floor = max(0, floor)
floor = min(floor, numFloors - 1)
break
for avId in players:
if bboard.has('mintRoom-%s' % avId):
roomId = bboard.get('mintRoom-%s' % avId)
for i in xrange(numFloors):
layout = BoardOfficeLayout.BoardOfficeLayout(boardofficeId, i)
if roomId in layout.getRoomIds():
floor = i
else:
from toontown.coghq.boardbothq import BoardOfficeRoomSpecs
roomName = BoardOfficeRoomSpecs.BoardOfficeRoomId2RoomName[roomId]
BoardOfficeManagerAI.notify.warning('room %s (%s) not found in any floor of mint %s' % (roomId, roomName, boardofficeId))
mintZone = self.air.allocateZone()
mint = DistributedBoardOfficeAI.DistributedBoardOfficeAI(self.air, boardofficeId, mintZone, floor, players)
mint.generateWithRequired(mintZone)
return mintZone | none | 1 | 2.272752 | 2 |
|
ansiblemetrics/utils.py | radon-h2020/AnsibleMetrics | 1 | 8736 | <reponame>radon-h2020/AnsibleMetrics
from typing import Union
def key_value_list(d: Union[dict, list], key=None) -> list:
"""
This function iterates over all the key-value pairs of a dictionary and returns a list of tuple (key, value) where the key contain only primitive value (i.e., no list or dict), e.g., string, number etc.
d -- a dictionary to iterate through
"""
if not d:
return []
if not isinstance(d, dict) and not isinstance(d, list):
return []
key_values = []
if isinstance(d, list):
for entry in d:
if isinstance(entry, dict):
key_values.extend(key_value_list(entry))
else:
key_values.append((key, entry))
else:
for k, v in d.items():
if k is None or v is None:
continue
if not isinstance(v, dict) and type(v) != list:
key_values.append((k, v))
elif isinstance(v, list):
key_values.extend(key_value_list(v, k))
else:
key_values.extend(key_value_list(v))
return key_values
def all_keys(d: Union[dict, list]) -> list:
"""
Returns a list of all the keys of a dictionary (duplicates included)
d -- a dictionary to iterate through
"""
if not d:
return []
if d is None or not isinstance(d, dict) and not isinstance(d, list):
return []
keys = []
if isinstance(d, list):
for entry in d:
keys.extend(all_keys(entry))
else:
for k, v in d.items():
keys.append(k)
keys.extend(all_keys(v))
return keys
def all_values(d: Union[dict, list]) -> list:
"""
Returns a list of all the primitive values of a dictionary (duplicates included)
d -- a dictionary to iterate through
"""
if not d:
return []
if not isinstance(d, dict) and not isinstance(d, list):
return [d]
values = []
if isinstance(d, list):
for entry in d:
values.extend(all_values(entry))
else:
for k, v in d.items():
values.extend(all_values(v))
return values
| from typing import Union
def key_value_list(d: Union[dict, list], key=None) -> list:
"""
This function iterates over all the key-value pairs of a dictionary and returns a list of tuple (key, value) where the key contain only primitive value (i.e., no list or dict), e.g., string, number etc.
d -- a dictionary to iterate through
"""
if not d:
return []
if not isinstance(d, dict) and not isinstance(d, list):
return []
key_values = []
if isinstance(d, list):
for entry in d:
if isinstance(entry, dict):
key_values.extend(key_value_list(entry))
else:
key_values.append((key, entry))
else:
for k, v in d.items():
if k is None or v is None:
continue
if not isinstance(v, dict) and type(v) != list:
key_values.append((k, v))
elif isinstance(v, list):
key_values.extend(key_value_list(v, k))
else:
key_values.extend(key_value_list(v))
return key_values
def all_keys(d: Union[dict, list]) -> list:
"""
Returns a list of all the keys of a dictionary (duplicates included)
d -- a dictionary to iterate through
"""
if not d:
return []
if d is None or not isinstance(d, dict) and not isinstance(d, list):
return []
keys = []
if isinstance(d, list):
for entry in d:
keys.extend(all_keys(entry))
else:
for k, v in d.items():
keys.append(k)
keys.extend(all_keys(v))
return keys
def all_values(d: Union[dict, list]) -> list:
"""
Returns a list of all the primitive values of a dictionary (duplicates included)
d -- a dictionary to iterate through
"""
if not d:
return []
if not isinstance(d, dict) and not isinstance(d, list):
return [d]
values = []
if isinstance(d, list):
for entry in d:
values.extend(all_values(entry))
else:
for k, v in d.items():
values.extend(all_values(v))
return values | en | 0.635071 | This function iterates over all the key-value pairs of a dictionary and returns a list of tuple (key, value) where the key contain only primitive value (i.e., no list or dict), e.g., string, number etc. d -- a dictionary to iterate through Returns a list of all the keys of a dictionary (duplicates included) d -- a dictionary to iterate through Returns a list of all the primitive values of a dictionary (duplicates included) d -- a dictionary to iterate through | 4.115702 | 4 |
yampy/apis/groups.py | Kunal-Shah-Bose/yam-python | 0 | 8737 | <reponame>Kunal-Shah-Bose/yam-python
from yampy.apis.utils import ArgumentConverter, none_filter, stringify_booleans
from yampy.models import extract_id
class GroupsAPI(object):
"""
Provides an interface for accessing the groups related endpoints of the
Yammer API. You should not instantiate this class directly; use the
:meth:`yampy.Yammer.groups` method instead.
"""
def __init__(self, client):
"""
Initializes a new GroupsAPI that will use the given client object
to make HTTP requests.
"""
self._client = client
self._argument_converter = ArgumentConverter(
none_filter, stringify_booleans,
)
def all(self, mine=None, reverse=None):
"""
Returns all the groups in the current user's network.
Customize the response using the keyword arguments:
* mine -- Only return group of current user.
* reverse -- return group in descending order by name.
"""
return self._client.get("/groups", **self._argument_converter(
mine=mine,
reverse=reverse,
))
def find(self, group_id):
"""
Returns the group identified by the given group_id.
"""
return self._client.get(self._group_path(group_id))
def members(self, group_id, page=None, reverse=None):
"""
Returns the group identified by the given group_id.
Customize the response using the keyword arguments:
* page -- Enable pagination, and return the nth page of 50 users.
"""
path = "/group_memberships"
return self._client.get(path, **self._argument_converter(
page=page,
reverse=reverse,
))
def join(self, group_id):
"""
Join the group identified by the given group_id.
Return True
"""
path = "/group_memberships"
group_id = extract_id(group_id)
return self._client.post(path, **self._argument_converter(
group_id=group_id,
))
def leave(self, group_id):
"""
Leave the group identified by the given group_id.
Return True
"""
path = "/group_memberships"
group_id = extract_id(group_id)
return self._client.delete(path, **self._argument_converter(
group_id=group_id,
))
def create(self, name, private=False):
"""
Create a group.
Return Group info
"""
path = "/groups"
return self._client.post(path, **self._argument_converter(
name=name,
private=private,
))
def delete(self, group_id):
"""
Delete a group.
Return True if success
"""
return self._client.delete(self._group_path(group_id), delete="true")
def _group_path(self, group_id):
return "/groups/%d" % extract_id(group_id)
| from yampy.apis.utils import ArgumentConverter, none_filter, stringify_booleans
from yampy.models import extract_id
class GroupsAPI(object):
"""
Provides an interface for accessing the groups related endpoints of the
Yammer API. You should not instantiate this class directly; use the
:meth:`yampy.Yammer.groups` method instead.
"""
def __init__(self, client):
"""
Initializes a new GroupsAPI that will use the given client object
to make HTTP requests.
"""
self._client = client
self._argument_converter = ArgumentConverter(
none_filter, stringify_booleans,
)
def all(self, mine=None, reverse=None):
"""
Returns all the groups in the current user's network.
Customize the response using the keyword arguments:
* mine -- Only return group of current user.
* reverse -- return group in descending order by name.
"""
return self._client.get("/groups", **self._argument_converter(
mine=mine,
reverse=reverse,
))
def find(self, group_id):
"""
Returns the group identified by the given group_id.
"""
return self._client.get(self._group_path(group_id))
def members(self, group_id, page=None, reverse=None):
"""
Returns the group identified by the given group_id.
Customize the response using the keyword arguments:
* page -- Enable pagination, and return the nth page of 50 users.
"""
path = "/group_memberships"
return self._client.get(path, **self._argument_converter(
page=page,
reverse=reverse,
))
def join(self, group_id):
"""
Join the group identified by the given group_id.
Return True
"""
path = "/group_memberships"
group_id = extract_id(group_id)
return self._client.post(path, **self._argument_converter(
group_id=group_id,
))
def leave(self, group_id):
"""
Leave the group identified by the given group_id.
Return True
"""
path = "/group_memberships"
group_id = extract_id(group_id)
return self._client.delete(path, **self._argument_converter(
group_id=group_id,
))
def create(self, name, private=False):
"""
Create a group.
Return Group info
"""
path = "/groups"
return self._client.post(path, **self._argument_converter(
name=name,
private=private,
))
def delete(self, group_id):
"""
Delete a group.
Return True if success
"""
return self._client.delete(self._group_path(group_id), delete="true")
def _group_path(self, group_id):
return "/groups/%d" % extract_id(group_id) | en | 0.728803 | Provides an interface for accessing the groups related endpoints of the Yammer API. You should not instantiate this class directly; use the :meth:`yampy.Yammer.groups` method instead. Initializes a new GroupsAPI that will use the given client object to make HTTP requests. Returns all the groups in the current user's network. Customize the response using the keyword arguments: * mine -- Only return group of current user. * reverse -- return group in descending order by name. Returns the group identified by the given group_id. Returns the group identified by the given group_id. Customize the response using the keyword arguments: * page -- Enable pagination, and return the nth page of 50 users. Join the group identified by the given group_id. Return True Leave the group identified by the given group_id. Return True Create a group. Return Group info Delete a group. Return True if success | 2.824139 | 3 |
phy/gui/actions.py | ycanerol/phy | 118 | 8738 | <filename>phy/gui/actions.py
# -*- coding: utf-8 -*-
"""Actions and snippets."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import inspect
from functools import partial, wraps
import logging
import re
import sys
import traceback
from .qt import QKeySequence, QAction, require_qt, input_dialog, busy_cursor, _get_icon
from phylib.utils import Bunch
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Snippet parsing utilities
# -----------------------------------------------------------------------------
def _parse_arg(s):
"""Parse a number or string."""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return s
def _parse_list(s):
"""Parse a comma-separated list of values (strings or numbers)."""
# Range: 'x-y'
if '-' in s:
m, M = map(_parse_arg, s.split('-'))
return list(range(m, M + 1))
# List of ids: 'x,y,z'
elif ',' in s:
return list(map(_parse_arg, s.split(',')))
else:
return _parse_arg(s)
def _parse_snippet(s):
"""Parse an entire snippet command."""
return tuple(map(_parse_list, s.split(' ')))
def _prompt_args(title, docstring, default=None):
"""Display a prompt dialog requesting function arguments.
'default' is a function returning the default value for the proposed input dialog.
"""
# There are args, need to display the dialog.
# Extract Example: `...` in the docstring to put a predefined text
# in the input dialog.
logger.debug("Prompting arguments for %s", title)
r = re.search('Example: `([^`]+)`', docstring)
docstring_ = docstring[:r.start()].strip() if r else docstring
try:
text = str(default()) if default else (r.group(1) if r else None)
except Exception as e: # pragma: no cover
logger.error("Error while handling user input: %s", str(e))
return
s, ok = input_dialog(title, docstring_, text)
if not ok or not s:
return
# Parse user-supplied arguments and call the function.
args = _parse_snippet(s)
return args
# -----------------------------------------------------------------------------
# Show shortcut utility functions
# -----------------------------------------------------------------------------
def _get_shortcut_string(shortcut):
"""Return a string representation of a shortcut."""
if not shortcut:
return ''
if isinstance(shortcut, (tuple, list)):
return ', '.join([_get_shortcut_string(s) for s in shortcut])
if isinstance(shortcut, str):
if hasattr(QKeySequence, shortcut):
shortcut = QKeySequence(getattr(QKeySequence, shortcut))
else:
return shortcut.lower()
assert isinstance(shortcut, QKeySequence)
s = shortcut.toString() or ''
return str(s).lower()
def _get_qkeysequence(shortcut):
"""Return a QKeySequence or list of QKeySequence from a shortcut string."""
if shortcut is None:
return []
if isinstance(shortcut, (tuple, list)):
return [_get_qkeysequence(s) for s in shortcut]
assert isinstance(shortcut, str)
if hasattr(QKeySequence, shortcut):
return QKeySequence(getattr(QKeySequence, shortcut))
sequence = QKeySequence.fromString(shortcut)
assert not sequence.isEmpty()
return sequence
def _show_shortcuts(shortcuts):
"""Display shortcuts."""
out = []
for n in sorted(shortcuts):
shortcut = _get_shortcut_string(shortcuts[n])
if not n.startswith('_') and not shortcut.startswith('-'):
out.append('- {0:<40} {1:s}'.format(n, shortcut))
if out:
print('Keyboard shortcuts')
print('\n'.join(out))
print('')
def _show_snippets(snippets):
"""Display snippets."""
out = []
for n in sorted(snippets):
snippet = snippets[n]
if not n.startswith('_'):
out.append('- {0:<40} :{1:s}'.format(n, snippet))
if out:
print('Snippets')
print('\n'.join(out))
print('')
def show_shortcuts_snippets(actions):
"""Show the shortcuts and snippets of an Actions instance."""
print(actions.name)
print('-' * len(actions.name))
print()
_show_shortcuts(actions.shortcuts)
_show_snippets(actions._default_snippets)
# -----------------------------------------------------------------------------
# Actions
# -----------------------------------------------------------------------------
def _alias(name):
# Get the alias from the character after & if it exists.
alias = name[name.index('&') + 1] if '&' in name else name
alias = alias.replace(' ', '_').lower()
return alias
def _expected_args(f):
if isinstance(f, partial):
argspec = inspect.getfullargspec(f.func)
else:
argspec = inspect.getfullargspec(f)
f_args = argspec.args
if 'self' in f_args:
f_args.remove('self')
# Remove arguments with defaults from the list.
if len(argspec.defaults or ()):
f_args = f_args[:-len(argspec.defaults)]
# Remove arguments supplied in a partial.
if isinstance(f, partial):
f_args = f_args[len(f.args):]
f_args = [arg for arg in f_args if arg not in f.keywords]
return tuple(f_args)
@require_qt
def _create_qaction(gui, **kwargs):
# Create the QAction instance.
name = kwargs.get('name', '')
name = name[0].upper() + name[1:].replace('_', ' ')
action = QAction(name, gui)
# Show an input dialog if there are args.
callback = kwargs.get('callback', None)
title = getattr(callback, '__name__', 'action')
# Number of expected arguments.
n_args = kwargs.get('n_args', None) or len(_expected_args(callback))
@wraps(callback)
def wrapped(is_checked, *args):
if kwargs.get('checkable', None):
args = (is_checked,) + args
if kwargs.get('prompt', None):
args += _prompt_args(
title, docstring, default=kwargs.get('prompt_default', None)) or ()
if not args: # pragma: no cover
logger.debug("User cancelled input prompt, aborting.")
return
if len(args) < n_args:
logger.warning(
"Invalid function arguments: expecting %d but got %d", n_args, len(args))
return
try:
# Set a busy cursor if set_busy is True.
with busy_cursor(kwargs.get('set_busy', None)):
return callback(*args)
except Exception: # pragma: no cover
logger.warning("Error when executing action %s.", name)
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
action.triggered.connect(wrapped)
sequence = _get_qkeysequence(kwargs.get('shortcut', None))
if not isinstance(sequence, (tuple, list)):
sequence = [sequence]
action.setShortcuts(sequence)
assert kwargs.get('docstring', None)
docstring = re.sub(r'\s+', ' ', kwargs.get('docstring', None))
docstring += ' (alias: {})'.format(kwargs.get('alias', None))
action.setStatusTip(docstring)
action.setWhatsThis(docstring)
action.setCheckable(kwargs.get('checkable', None))
action.setChecked(kwargs.get('checked', None))
if kwargs.get('icon', None):
action.setIcon(_get_icon(kwargs['icon']))
return action
class Actions(object):
"""Group of actions bound to a GUI.
This class attaches to a GUI and implements the following features:
* Add and remove actions
* Keyboard shortcuts for the actions
* Display all shortcuts
Constructor
-----------
gui : GUI instance
name : str
Name of this group of actions.
menu : str
Name of the GUI menu that will contain the actions.
submenu : str
Name of the GUI submenu that will contain the actions.
default_shortcuts : dict
Map action names to keyboard shortcuts (regular strings).
default_snippets : dict
Map action names to snippets (regular strings).
"""
def __init__(
self, gui, name=None, menu=None, submenu=None, view=None,
insert_menu_before=None, default_shortcuts=None, default_snippets=None):
self._actions_dict = {}
self._aliases = {}
self._default_shortcuts = default_shortcuts or {}
self._default_snippets = default_snippets or {}
assert name
self.name = name
self.menu = menu
self.submenu = submenu
self.view = view
self.view_submenu = None
self.insert_menu_before = insert_menu_before
self._view_submenus = {}
self.gui = gui
gui.actions.append(self)
# Create the menu when creating the Actions instance.
if menu:
gui.get_menu(menu, insert_menu_before)
def _get_menu(self, menu=None, submenu=None, view=None, view_submenu=None):
"""Return the QMenu depending on a combination of keyword arguments."""
# Defaults.
menu = menu or self.menu
submenu = submenu or self.submenu
view = view or self.view
view_submenu = view_submenu or self.view_submenu
# If the action is a view action, it should be added to the view's menu in the dock widget.
if view:
if view_submenu and view_submenu not in self._view_submenus:
self._view_submenus[view_submenu] = view.dock._menu.addMenu(view_submenu)
if view_submenu:
return self._view_submenus[view_submenu]
else:
return view.dock._menu
# Create the submenu if there is one.
if submenu:
# Create the submenu.
self.gui.get_submenu(menu, submenu)
# Make sure the action gets added to the submenu.
menu = submenu
if menu:
return self.gui.get_menu(menu)
def add(self, callback=None, name=None, shortcut=None, alias=None, prompt=False, n_args=None,
docstring=None, menu=None, submenu=None, view=None, view_submenu=None, verbose=True,
checkable=False, checked=False, set_busy=False, prompt_default=None,
show_shortcut=True, icon=None, toolbar=False):
"""Add an action with a keyboard shortcut.
Parameters
----------
callback : function
Take no argument if checkable is False, or a boolean (checked) if it is True
name : str
Action name, the callback's name by default.
shortcut : str
The keyboard shortcut for this action.
alias : str
Snippet, the name by default.
prompt : boolean
Whether this action should display a dialog with an input box where the user can
write arguments to the callback function.
n_args : int
If prompt is True, specify the number of expected arguments.
set_busy : boolean
Whether to use a busy cursor while performing the action.
prompt_default : str
The default text in the input text box, if prompt is True.
docstring : str
The action docstring, to be displayed in the status bar when hovering over the action
item in the menu. By default, the function's docstring.
menu : str
The name of the menu where the action should be added. It is automatically created
if it doesn't exist.
submenu : str
The name of the submenu where the action should be added. It is automatically created
if it doesn't exist.
view : QWidget
A view that belongs to the GUI, if the actions are to be added to the view's menu bar.
view_submenu : str
The name of a submenu in the view menu.
checkable : boolean
Whether the action is checkable (toggle on/off).
checked : boolean
Whether the checkable action is initially checked or not.
show_shortcut : boolean
Whether to show the shortcut in the Help action that displays all GUI shortcuts.
icon : str
Hexadecimal code of the font-awesome icon.
toolbar : boolean
Whether to add the action to the toolbar.
"""
param_names = sorted(inspect.signature(Actions.add).parameters)
l = locals()
kwargs = {param_name: l[param_name] for param_name in param_names if param_name != 'self'}
if callback is None:
# Allow to use either add(func) or @add or @add(...).
kwargs.pop('callback', None)
return partial(self.add, **kwargs)
assert callback
# Get the name from the callback function if needed.
name = name or callback.__name__
alias = alias or self._default_snippets.get(name, _alias(name)).split(' ')[0]
name = name.replace('&', '')
shortcut = shortcut or self._default_shortcuts.get(name, None)
# Skip existing action.
if name in self._actions_dict:
return
# Set the status tip from the function's docstring.
docstring = docstring or callback.__doc__ or name
docstring = re.sub(r'[ \t\r\f\v]{2,}', ' ', docstring.strip())
# Create and register the action.
kwargs.update(name=name, alias=alias, shortcut=shortcut, docstring=docstring)
action = _create_qaction(self.gui, **kwargs)
action_obj = Bunch(qaction=action, **kwargs)
if verbose and not name.startswith('_'):
logger.log(5, "Add action `%s` (%s).", name, _get_shortcut_string(action.shortcut()))
self.gui.addAction(action)
# Do not show private actions in the menu.
if not name.startswith('_'):
# Find the menu in which the action should be added.
qmenu = self._get_menu(
menu=menu, submenu=submenu, view=view, view_submenu=view_submenu)
if qmenu:
qmenu.addAction(action)
# Add the action to the toolbar.
if toolbar:
self.gui._toolbar.show()
self.gui._toolbar.addAction(action)
self._actions_dict[name] = action_obj
# Register the alias -> name mapping.
self._aliases[alias] = name
# Set the callback method.
if callback:
setattr(self, name.lower().replace(' ', '_').replace(':', ''), callback)
def separator(self, **kwargs):
"""Add a separator.
Parameters
----------
menu : str
The name of the menu where the separator should be added. It is automatically created
if it doesn't exist.
submenu : str
The name of the submenu where the separator should be added. It is automatically
created if it doesn't exist.
view : QWidget
A view that belongs to the GUI, if the separator is to be added to the view's menu bar.
view_submenu : str
The name of a submenu in the view menu.
"""
self._get_menu(**kwargs).addSeparator()
def disable(self, name=None):
"""Disable all actions, or only one if a name is passed."""
if name is None:
for name in self._actions_dict:
self.disable(name)
return
self._actions_dict[name].qaction.setEnabled(False)
def enable(self, name=None):
"""Enable all actions, or only one if a name is passed.."""
if name is None:
for name in self._actions_dict:
self.enable(name)
return
self._actions_dict[name].qaction.setEnabled(True)
def get(self, name):
"""Get a QAction instance from its name."""
return self._actions_dict[name].qaction if name in self._actions_dict else None
def run(self, name, *args):
"""Run an action as specified by its name."""
assert isinstance(name, str)
# Resolve the alias if it is an alias.
name = self._aliases.get(name, name)
# Get the action.
action = self._actions_dict.get(name, None)
if not action:
raise ValueError("Action `{}` doesn't exist.".format(name))
if not name.startswith('_'):
logger.debug("Execute action `%s`.", name)
try:
return action.callback(*args)
except TypeError as e:
logger.warning("Invalid action arguments: " + str(e))
return
def remove(self, name):
"""Remove an action."""
self.gui.removeAction(self._actions_dict[name].qaction)
del self._actions_dict[name]
delattr(self, name)
def remove_all(self):
"""Remove all actions."""
names = sorted(self._actions_dict.keys())
for name in names:
self.remove(name)
@property
def shortcuts(self):
"""A dictionary mapping action names to keyboard shortcuts."""
out = {}
for name in sorted(self._actions_dict):
action = self._actions_dict[name]
if not action.show_shortcut:
continue
# Discard actions without shortcut and without an alias.
if not action.shortcut and not action.alias:
continue
# Only show alias for actions with no shortcut.
alias_str = ' (:%s)' % action.alias if action.alias != name else ''
shortcut = action.shortcut or '-'
shortcut = shortcut if isinstance(action.shortcut, str) else ', '.join(shortcut)
out[name] = '%s%s' % (shortcut, alias_str)
return out
def show_shortcuts(self):
"""Display all shortcuts in the console."""
show_shortcuts_snippets(self)
def __contains__(self, name):
"""Whether the Actions group contains a specified action."""
return name in self._actions_dict
def __repr__(self):
return '<Actions {}>'.format(sorted(self._actions_dict))
# -----------------------------------------------------------------------------
# Snippets
# -----------------------------------------------------------------------------
class Snippets(object):
"""Provide keyboard snippets to quickly execute actions from a GUI.
This class attaches to a GUI and an `Actions` instance. To every command
is associated a snippet with the same name, or with an alias as indicated
in the action. The arguments of the action's callback functions can be
provided in the snippet's command with a simple syntax. For example, the
following command:
```
:my_action string 3-6
```
corresponds to:
```python
my_action('string', (3, 4, 5, 6))
```
The snippet mode is activated with the `:` keyboard shortcut. A snippet
command is activated with `Enter`, and one can leave the snippet mode
with `Escape`.
When the snippet mode is enabled (with `:`), this object adds a hidden Qt action
for every keystroke. These actions are removed when the snippet mode is disabled.
Constructor
-----------
gui : GUI instance
"""
# HACK: Unicode characters do not seem to work on Python 2
cursor = '\u200A\u258C'
# Allowed characters in snippet mode.
# A Qt shortcut will be created for every character.
_snippet_chars = r"abcdefghijklmnopqrstuvwxyz0123456789 ,.;?!_-+~=*/\(){}[]<>&|"
def __init__(self, gui):
self.gui = gui
self._status_message = gui.status_message
self.actions = Actions(gui, name='Snippets', menu='&File')
# Register snippet mode shortcut.
@self.actions.add(shortcut=':')
def enable_snippet_mode():
"""Enable the snippet mode (type action alias in the status
bar)."""
self.mode_on()
self._create_snippet_actions()
self.mode_off()
@property
def command(self):
"""This is used to write a snippet message in the status bar. A cursor is appended at
the end."""
msg = self.gui.status_message
n = len(msg)
n_cur = len(self.cursor)
return msg[:n - n_cur]
@command.setter
def command(self, value):
value += self.cursor
self.gui.unlock_status()
self.gui.status_message = value
self.gui.lock_status()
def _backspace(self):
"""Erase the last character in the snippet command."""
if self.command == ':':
return
logger.log(5, "Snippet keystroke `Backspace`.")
self.command = self.command[:-1]
def _enter(self):
"""Disable the snippet mode and execute the command."""
command = self.command
logger.log(5, "Snippet keystroke `Enter`.")
# NOTE: we need to set back the actions (mode_off) before running
# the command.
self.mode_off()
self.run(command)
def _create_snippet_actions(self):
"""Add mock Qt actions for snippet keystrokes.
Used to enable snippet mode.
"""
# One action per allowed character.
for i, char in enumerate(self._snippet_chars):
def _make_func(char):
def callback():
logger.log(5, "Snippet keystroke `%s`.", char)
self.command += char
return callback
# Lowercase letters.
self.actions.add(
name='_snippet_{}'.format(i),
shortcut=char,
callback=_make_func(char))
# Uppercase letters.
if char in self._snippet_chars[:26]:
self.actions.add(
name='_snippet_{}_upper'.format(i),
shortcut='shift+' + char,
callback=_make_func(char.upper()))
self.actions.add(
name='_snippet_backspace', shortcut='backspace', callback=self._backspace)
self.actions.add(
name='_snippet_activate', shortcut=('enter', 'return'), callback=self._enter)
self.actions.add(
name='_snippet_disable', shortcut='escape', callback=self.mode_off)
def run(self, snippet):
"""Execute a snippet command.
May be overridden.
"""
assert snippet[0] == ':'
snippet = snippet[1:]
snippet_args = _parse_snippet(snippet)
name = snippet_args[0]
logger.debug("Processing snippet `%s`.", snippet)
try:
# Try to run the snippet on all attached Actions instances.
for actions in self.gui.actions:
try:
actions.run(name, *snippet_args[1:])
return
except ValueError:
# This Actions instance doesn't contain the requested
# snippet, trying the next attached Actions instance.
pass
logger.warning("Couldn't find action `%s`.", name)
except Exception as e:
logger.warning("Error when executing snippet: \"%s\".", str(e))
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
def is_mode_on(self):
"""Whether the snippet mode is enabled."""
return self.command.startswith(':')
def mode_on(self):
"""Enable the snippet mode."""
logger.debug("Snippet mode enabled, press `escape` to leave this mode.")
# Save the current status message.
self._status_message = self.gui.status_message
self.gui.lock_status()
# Silent all actions except the Snippets actions.
for actions in self.gui.actions:
if actions != self.actions:
actions.disable()
self.actions.enable()
self.command = ':'
def mode_off(self):
"""Disable the snippet mode."""
self.gui.unlock_status()
# Reset the GUI status message that was set before the mode was
# activated.
self.gui.status_message = self._status_message
# Re-enable all actions except the Snippets actions.
self.actions.disable()
for actions in self.gui.actions:
if actions != self.actions:
actions.enable()
# The `:` shortcut should always be enabled.
self.actions.enable('enable_snippet_mode')
| <filename>phy/gui/actions.py
# -*- coding: utf-8 -*-
"""Actions and snippets."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import inspect
from functools import partial, wraps
import logging
import re
import sys
import traceback
from .qt import QKeySequence, QAction, require_qt, input_dialog, busy_cursor, _get_icon
from phylib.utils import Bunch
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Snippet parsing utilities
# -----------------------------------------------------------------------------
def _parse_arg(s):
"""Parse a number or string."""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return s
def _parse_list(s):
"""Parse a comma-separated list of values (strings or numbers)."""
# Range: 'x-y'
if '-' in s:
m, M = map(_parse_arg, s.split('-'))
return list(range(m, M + 1))
# List of ids: 'x,y,z'
elif ',' in s:
return list(map(_parse_arg, s.split(',')))
else:
return _parse_arg(s)
def _parse_snippet(s):
"""Parse an entire snippet command."""
return tuple(map(_parse_list, s.split(' ')))
def _prompt_args(title, docstring, default=None):
"""Display a prompt dialog requesting function arguments.
'default' is a function returning the default value for the proposed input dialog.
"""
# There are args, need to display the dialog.
# Extract Example: `...` in the docstring to put a predefined text
# in the input dialog.
logger.debug("Prompting arguments for %s", title)
r = re.search('Example: `([^`]+)`', docstring)
docstring_ = docstring[:r.start()].strip() if r else docstring
try:
text = str(default()) if default else (r.group(1) if r else None)
except Exception as e: # pragma: no cover
logger.error("Error while handling user input: %s", str(e))
return
s, ok = input_dialog(title, docstring_, text)
if not ok or not s:
return
# Parse user-supplied arguments and call the function.
args = _parse_snippet(s)
return args
# -----------------------------------------------------------------------------
# Show shortcut utility functions
# -----------------------------------------------------------------------------
def _get_shortcut_string(shortcut):
"""Return a string representation of a shortcut."""
if not shortcut:
return ''
if isinstance(shortcut, (tuple, list)):
return ', '.join([_get_shortcut_string(s) for s in shortcut])
if isinstance(shortcut, str):
if hasattr(QKeySequence, shortcut):
shortcut = QKeySequence(getattr(QKeySequence, shortcut))
else:
return shortcut.lower()
assert isinstance(shortcut, QKeySequence)
s = shortcut.toString() or ''
return str(s).lower()
def _get_qkeysequence(shortcut):
"""Return a QKeySequence or list of QKeySequence from a shortcut string."""
if shortcut is None:
return []
if isinstance(shortcut, (tuple, list)):
return [_get_qkeysequence(s) for s in shortcut]
assert isinstance(shortcut, str)
if hasattr(QKeySequence, shortcut):
return QKeySequence(getattr(QKeySequence, shortcut))
sequence = QKeySequence.fromString(shortcut)
assert not sequence.isEmpty()
return sequence
def _show_shortcuts(shortcuts):
"""Display shortcuts."""
out = []
for n in sorted(shortcuts):
shortcut = _get_shortcut_string(shortcuts[n])
if not n.startswith('_') and not shortcut.startswith('-'):
out.append('- {0:<40} {1:s}'.format(n, shortcut))
if out:
print('Keyboard shortcuts')
print('\n'.join(out))
print('')
def _show_snippets(snippets):
"""Display snippets."""
out = []
for n in sorted(snippets):
snippet = snippets[n]
if not n.startswith('_'):
out.append('- {0:<40} :{1:s}'.format(n, snippet))
if out:
print('Snippets')
print('\n'.join(out))
print('')
def show_shortcuts_snippets(actions):
"""Show the shortcuts and snippets of an Actions instance."""
print(actions.name)
print('-' * len(actions.name))
print()
_show_shortcuts(actions.shortcuts)
_show_snippets(actions._default_snippets)
# -----------------------------------------------------------------------------
# Actions
# -----------------------------------------------------------------------------
def _alias(name):
# Get the alias from the character after & if it exists.
alias = name[name.index('&') + 1] if '&' in name else name
alias = alias.replace(' ', '_').lower()
return alias
def _expected_args(f):
if isinstance(f, partial):
argspec = inspect.getfullargspec(f.func)
else:
argspec = inspect.getfullargspec(f)
f_args = argspec.args
if 'self' in f_args:
f_args.remove('self')
# Remove arguments with defaults from the list.
if len(argspec.defaults or ()):
f_args = f_args[:-len(argspec.defaults)]
# Remove arguments supplied in a partial.
if isinstance(f, partial):
f_args = f_args[len(f.args):]
f_args = [arg for arg in f_args if arg not in f.keywords]
return tuple(f_args)
@require_qt
def _create_qaction(gui, **kwargs):
# Create the QAction instance.
name = kwargs.get('name', '')
name = name[0].upper() + name[1:].replace('_', ' ')
action = QAction(name, gui)
# Show an input dialog if there are args.
callback = kwargs.get('callback', None)
title = getattr(callback, '__name__', 'action')
# Number of expected arguments.
n_args = kwargs.get('n_args', None) or len(_expected_args(callback))
@wraps(callback)
def wrapped(is_checked, *args):
if kwargs.get('checkable', None):
args = (is_checked,) + args
if kwargs.get('prompt', None):
args += _prompt_args(
title, docstring, default=kwargs.get('prompt_default', None)) or ()
if not args: # pragma: no cover
logger.debug("User cancelled input prompt, aborting.")
return
if len(args) < n_args:
logger.warning(
"Invalid function arguments: expecting %d but got %d", n_args, len(args))
return
try:
# Set a busy cursor if set_busy is True.
with busy_cursor(kwargs.get('set_busy', None)):
return callback(*args)
except Exception: # pragma: no cover
logger.warning("Error when executing action %s.", name)
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
action.triggered.connect(wrapped)
sequence = _get_qkeysequence(kwargs.get('shortcut', None))
if not isinstance(sequence, (tuple, list)):
sequence = [sequence]
action.setShortcuts(sequence)
assert kwargs.get('docstring', None)
docstring = re.sub(r'\s+', ' ', kwargs.get('docstring', None))
docstring += ' (alias: {})'.format(kwargs.get('alias', None))
action.setStatusTip(docstring)
action.setWhatsThis(docstring)
action.setCheckable(kwargs.get('checkable', None))
action.setChecked(kwargs.get('checked', None))
if kwargs.get('icon', None):
action.setIcon(_get_icon(kwargs['icon']))
return action
class Actions(object):
"""Group of actions bound to a GUI.
This class attaches to a GUI and implements the following features:
* Add and remove actions
* Keyboard shortcuts for the actions
* Display all shortcuts
Constructor
-----------
gui : GUI instance
name : str
Name of this group of actions.
menu : str
Name of the GUI menu that will contain the actions.
submenu : str
Name of the GUI submenu that will contain the actions.
default_shortcuts : dict
Map action names to keyboard shortcuts (regular strings).
default_snippets : dict
Map action names to snippets (regular strings).
"""
def __init__(
self, gui, name=None, menu=None, submenu=None, view=None,
insert_menu_before=None, default_shortcuts=None, default_snippets=None):
self._actions_dict = {}
self._aliases = {}
self._default_shortcuts = default_shortcuts or {}
self._default_snippets = default_snippets or {}
assert name
self.name = name
self.menu = menu
self.submenu = submenu
self.view = view
self.view_submenu = None
self.insert_menu_before = insert_menu_before
self._view_submenus = {}
self.gui = gui
gui.actions.append(self)
# Create the menu when creating the Actions instance.
if menu:
gui.get_menu(menu, insert_menu_before)
def _get_menu(self, menu=None, submenu=None, view=None, view_submenu=None):
"""Return the QMenu depending on a combination of keyword arguments."""
# Defaults.
menu = menu or self.menu
submenu = submenu or self.submenu
view = view or self.view
view_submenu = view_submenu or self.view_submenu
# If the action is a view action, it should be added to the view's menu in the dock widget.
if view:
if view_submenu and view_submenu not in self._view_submenus:
self._view_submenus[view_submenu] = view.dock._menu.addMenu(view_submenu)
if view_submenu:
return self._view_submenus[view_submenu]
else:
return view.dock._menu
# Create the submenu if there is one.
if submenu:
# Create the submenu.
self.gui.get_submenu(menu, submenu)
# Make sure the action gets added to the submenu.
menu = submenu
if menu:
return self.gui.get_menu(menu)
def add(self, callback=None, name=None, shortcut=None, alias=None, prompt=False, n_args=None,
docstring=None, menu=None, submenu=None, view=None, view_submenu=None, verbose=True,
checkable=False, checked=False, set_busy=False, prompt_default=None,
show_shortcut=True, icon=None, toolbar=False):
"""Add an action with a keyboard shortcut.
Parameters
----------
callback : function
Take no argument if checkable is False, or a boolean (checked) if it is True
name : str
Action name, the callback's name by default.
shortcut : str
The keyboard shortcut for this action.
alias : str
Snippet, the name by default.
prompt : boolean
Whether this action should display a dialog with an input box where the user can
write arguments to the callback function.
n_args : int
If prompt is True, specify the number of expected arguments.
set_busy : boolean
Whether to use a busy cursor while performing the action.
prompt_default : str
The default text in the input text box, if prompt is True.
docstring : str
The action docstring, to be displayed in the status bar when hovering over the action
item in the menu. By default, the function's docstring.
menu : str
The name of the menu where the action should be added. It is automatically created
if it doesn't exist.
submenu : str
The name of the submenu where the action should be added. It is automatically created
if it doesn't exist.
view : QWidget
A view that belongs to the GUI, if the actions are to be added to the view's menu bar.
view_submenu : str
The name of a submenu in the view menu.
checkable : boolean
Whether the action is checkable (toggle on/off).
checked : boolean
Whether the checkable action is initially checked or not.
show_shortcut : boolean
Whether to show the shortcut in the Help action that displays all GUI shortcuts.
icon : str
Hexadecimal code of the font-awesome icon.
toolbar : boolean
Whether to add the action to the toolbar.
"""
param_names = sorted(inspect.signature(Actions.add).parameters)
l = locals()
kwargs = {param_name: l[param_name] for param_name in param_names if param_name != 'self'}
if callback is None:
# Allow to use either add(func) or @add or @add(...).
kwargs.pop('callback', None)
return partial(self.add, **kwargs)
assert callback
# Get the name from the callback function if needed.
name = name or callback.__name__
alias = alias or self._default_snippets.get(name, _alias(name)).split(' ')[0]
name = name.replace('&', '')
shortcut = shortcut or self._default_shortcuts.get(name, None)
# Skip existing action.
if name in self._actions_dict:
return
# Set the status tip from the function's docstring.
docstring = docstring or callback.__doc__ or name
docstring = re.sub(r'[ \t\r\f\v]{2,}', ' ', docstring.strip())
# Create and register the action.
kwargs.update(name=name, alias=alias, shortcut=shortcut, docstring=docstring)
action = _create_qaction(self.gui, **kwargs)
action_obj = Bunch(qaction=action, **kwargs)
if verbose and not name.startswith('_'):
logger.log(5, "Add action `%s` (%s).", name, _get_shortcut_string(action.shortcut()))
self.gui.addAction(action)
# Do not show private actions in the menu.
if not name.startswith('_'):
# Find the menu in which the action should be added.
qmenu = self._get_menu(
menu=menu, submenu=submenu, view=view, view_submenu=view_submenu)
if qmenu:
qmenu.addAction(action)
# Add the action to the toolbar.
if toolbar:
self.gui._toolbar.show()
self.gui._toolbar.addAction(action)
self._actions_dict[name] = action_obj
# Register the alias -> name mapping.
self._aliases[alias] = name
# Set the callback method.
if callback:
setattr(self, name.lower().replace(' ', '_').replace(':', ''), callback)
def separator(self, **kwargs):
"""Add a separator.
Parameters
----------
menu : str
The name of the menu where the separator should be added. It is automatically created
if it doesn't exist.
submenu : str
The name of the submenu where the separator should be added. It is automatically
created if it doesn't exist.
view : QWidget
A view that belongs to the GUI, if the separator is to be added to the view's menu bar.
view_submenu : str
The name of a submenu in the view menu.
"""
self._get_menu(**kwargs).addSeparator()
def disable(self, name=None):
"""Disable all actions, or only one if a name is passed."""
if name is None:
for name in self._actions_dict:
self.disable(name)
return
self._actions_dict[name].qaction.setEnabled(False)
def enable(self, name=None):
"""Enable all actions, or only one if a name is passed.."""
if name is None:
for name in self._actions_dict:
self.enable(name)
return
self._actions_dict[name].qaction.setEnabled(True)
def get(self, name):
"""Get a QAction instance from its name."""
return self._actions_dict[name].qaction if name in self._actions_dict else None
def run(self, name, *args):
"""Run an action as specified by its name."""
assert isinstance(name, str)
# Resolve the alias if it is an alias.
name = self._aliases.get(name, name)
# Get the action.
action = self._actions_dict.get(name, None)
if not action:
raise ValueError("Action `{}` doesn't exist.".format(name))
if not name.startswith('_'):
logger.debug("Execute action `%s`.", name)
try:
return action.callback(*args)
except TypeError as e:
logger.warning("Invalid action arguments: " + str(e))
return
def remove(self, name):
"""Remove an action."""
self.gui.removeAction(self._actions_dict[name].qaction)
del self._actions_dict[name]
delattr(self, name)
def remove_all(self):
"""Remove all actions."""
names = sorted(self._actions_dict.keys())
for name in names:
self.remove(name)
@property
def shortcuts(self):
"""A dictionary mapping action names to keyboard shortcuts."""
out = {}
for name in sorted(self._actions_dict):
action = self._actions_dict[name]
if not action.show_shortcut:
continue
# Discard actions without shortcut and without an alias.
if not action.shortcut and not action.alias:
continue
# Only show alias for actions with no shortcut.
alias_str = ' (:%s)' % action.alias if action.alias != name else ''
shortcut = action.shortcut or '-'
shortcut = shortcut if isinstance(action.shortcut, str) else ', '.join(shortcut)
out[name] = '%s%s' % (shortcut, alias_str)
return out
def show_shortcuts(self):
"""Display all shortcuts in the console."""
show_shortcuts_snippets(self)
def __contains__(self, name):
"""Whether the Actions group contains a specified action."""
return name in self._actions_dict
def __repr__(self):
return '<Actions {}>'.format(sorted(self._actions_dict))
# -----------------------------------------------------------------------------
# Snippets
# -----------------------------------------------------------------------------
class Snippets(object):
"""Provide keyboard snippets to quickly execute actions from a GUI.
This class attaches to a GUI and an `Actions` instance. To every command
is associated a snippet with the same name, or with an alias as indicated
in the action. The arguments of the action's callback functions can be
provided in the snippet's command with a simple syntax. For example, the
following command:
```
:my_action string 3-6
```
corresponds to:
```python
my_action('string', (3, 4, 5, 6))
```
The snippet mode is activated with the `:` keyboard shortcut. A snippet
command is activated with `Enter`, and one can leave the snippet mode
with `Escape`.
When the snippet mode is enabled (with `:`), this object adds a hidden Qt action
for every keystroke. These actions are removed when the snippet mode is disabled.
Constructor
-----------
gui : GUI instance
"""
# HACK: Unicode characters do not seem to work on Python 2
cursor = '\u200A\u258C'
# Allowed characters in snippet mode.
# A Qt shortcut will be created for every character.
_snippet_chars = r"abcdefghijklmnopqrstuvwxyz0123456789 ,.;?!_-+~=*/\(){}[]<>&|"
def __init__(self, gui):
self.gui = gui
self._status_message = gui.status_message
self.actions = Actions(gui, name='Snippets', menu='&File')
# Register snippet mode shortcut.
@self.actions.add(shortcut=':')
def enable_snippet_mode():
"""Enable the snippet mode (type action alias in the status
bar)."""
self.mode_on()
self._create_snippet_actions()
self.mode_off()
@property
def command(self):
"""This is used to write a snippet message in the status bar. A cursor is appended at
the end."""
msg = self.gui.status_message
n = len(msg)
n_cur = len(self.cursor)
return msg[:n - n_cur]
@command.setter
def command(self, value):
value += self.cursor
self.gui.unlock_status()
self.gui.status_message = value
self.gui.lock_status()
def _backspace(self):
"""Erase the last character in the snippet command."""
if self.command == ':':
return
logger.log(5, "Snippet keystroke `Backspace`.")
self.command = self.command[:-1]
def _enter(self):
"""Disable the snippet mode and execute the command."""
command = self.command
logger.log(5, "Snippet keystroke `Enter`.")
# NOTE: we need to set back the actions (mode_off) before running
# the command.
self.mode_off()
self.run(command)
def _create_snippet_actions(self):
"""Add mock Qt actions for snippet keystrokes.
Used to enable snippet mode.
"""
# One action per allowed character.
for i, char in enumerate(self._snippet_chars):
def _make_func(char):
def callback():
logger.log(5, "Snippet keystroke `%s`.", char)
self.command += char
return callback
# Lowercase letters.
self.actions.add(
name='_snippet_{}'.format(i),
shortcut=char,
callback=_make_func(char))
# Uppercase letters.
if char in self._snippet_chars[:26]:
self.actions.add(
name='_snippet_{}_upper'.format(i),
shortcut='shift+' + char,
callback=_make_func(char.upper()))
self.actions.add(
name='_snippet_backspace', shortcut='backspace', callback=self._backspace)
self.actions.add(
name='_snippet_activate', shortcut=('enter', 'return'), callback=self._enter)
self.actions.add(
name='_snippet_disable', shortcut='escape', callback=self.mode_off)
def run(self, snippet):
"""Execute a snippet command.
May be overridden.
"""
assert snippet[0] == ':'
snippet = snippet[1:]
snippet_args = _parse_snippet(snippet)
name = snippet_args[0]
logger.debug("Processing snippet `%s`.", snippet)
try:
# Try to run the snippet on all attached Actions instances.
for actions in self.gui.actions:
try:
actions.run(name, *snippet_args[1:])
return
except ValueError:
# This Actions instance doesn't contain the requested
# snippet, trying the next attached Actions instance.
pass
logger.warning("Couldn't find action `%s`.", name)
except Exception as e:
logger.warning("Error when executing snippet: \"%s\".", str(e))
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
def is_mode_on(self):
"""Whether the snippet mode is enabled."""
return self.command.startswith(':')
def mode_on(self):
"""Enable the snippet mode."""
logger.debug("Snippet mode enabled, press `escape` to leave this mode.")
# Save the current status message.
self._status_message = self.gui.status_message
self.gui.lock_status()
# Silent all actions except the Snippets actions.
for actions in self.gui.actions:
if actions != self.actions:
actions.disable()
self.actions.enable()
self.command = ':'
def mode_off(self):
"""Disable the snippet mode."""
self.gui.unlock_status()
# Reset the GUI status message that was set before the mode was
# activated.
self.gui.status_message = self._status_message
# Re-enable all actions except the Snippets actions.
self.actions.disable()
for actions in self.gui.actions:
if actions != self.actions:
actions.enable()
# The `:` shortcut should always be enabled.
self.actions.enable('enable_snippet_mode')
| en | 0.681662 | # -*- coding: utf-8 -*- Actions and snippets. # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Snippet parsing utilities # ----------------------------------------------------------------------------- Parse a number or string. Parse a comma-separated list of values (strings or numbers). # Range: 'x-y' # List of ids: 'x,y,z' Parse an entire snippet command. Display a prompt dialog requesting function arguments. 'default' is a function returning the default value for the proposed input dialog. # There are args, need to display the dialog. # Extract Example: `...` in the docstring to put a predefined text # in the input dialog. # pragma: no cover # Parse user-supplied arguments and call the function. # ----------------------------------------------------------------------------- # Show shortcut utility functions # ----------------------------------------------------------------------------- Return a string representation of a shortcut. Return a QKeySequence or list of QKeySequence from a shortcut string. Display shortcuts. Display snippets. Show the shortcuts and snippets of an Actions instance. # ----------------------------------------------------------------------------- # Actions # ----------------------------------------------------------------------------- # Get the alias from the character after & if it exists. # Remove arguments with defaults from the list. # Remove arguments supplied in a partial. # Create the QAction instance. # Show an input dialog if there are args. # Number of expected arguments. # pragma: no cover # Set a busy cursor if set_busy is True. # pragma: no cover Group of actions bound to a GUI. This class attaches to a GUI and implements the following features: * Add and remove actions * Keyboard shortcuts for the actions * Display all shortcuts Constructor ----------- gui : GUI instance name : str Name of this group of actions. menu : str Name of the GUI menu that will contain the actions. submenu : str Name of the GUI submenu that will contain the actions. default_shortcuts : dict Map action names to keyboard shortcuts (regular strings). default_snippets : dict Map action names to snippets (regular strings). # Create the menu when creating the Actions instance. Return the QMenu depending on a combination of keyword arguments. # Defaults. # If the action is a view action, it should be added to the view's menu in the dock widget. # Create the submenu if there is one. # Create the submenu. # Make sure the action gets added to the submenu. Add an action with a keyboard shortcut. Parameters ---------- callback : function Take no argument if checkable is False, or a boolean (checked) if it is True name : str Action name, the callback's name by default. shortcut : str The keyboard shortcut for this action. alias : str Snippet, the name by default. prompt : boolean Whether this action should display a dialog with an input box where the user can write arguments to the callback function. n_args : int If prompt is True, specify the number of expected arguments. set_busy : boolean Whether to use a busy cursor while performing the action. prompt_default : str The default text in the input text box, if prompt is True. docstring : str The action docstring, to be displayed in the status bar when hovering over the action item in the menu. By default, the function's docstring. menu : str The name of the menu where the action should be added. It is automatically created if it doesn't exist. submenu : str The name of the submenu where the action should be added. It is automatically created if it doesn't exist. view : QWidget A view that belongs to the GUI, if the actions are to be added to the view's menu bar. view_submenu : str The name of a submenu in the view menu. checkable : boolean Whether the action is checkable (toggle on/off). checked : boolean Whether the checkable action is initially checked or not. show_shortcut : boolean Whether to show the shortcut in the Help action that displays all GUI shortcuts. icon : str Hexadecimal code of the font-awesome icon. toolbar : boolean Whether to add the action to the toolbar. # Allow to use either add(func) or @add or @add(...). # Get the name from the callback function if needed. # Skip existing action. # Set the status tip from the function's docstring. # Create and register the action. # Do not show private actions in the menu. # Find the menu in which the action should be added. # Add the action to the toolbar. # Register the alias -> name mapping. # Set the callback method. Add a separator. Parameters ---------- menu : str The name of the menu where the separator should be added. It is automatically created if it doesn't exist. submenu : str The name of the submenu where the separator should be added. It is automatically created if it doesn't exist. view : QWidget A view that belongs to the GUI, if the separator is to be added to the view's menu bar. view_submenu : str The name of a submenu in the view menu. Disable all actions, or only one if a name is passed. Enable all actions, or only one if a name is passed.. Get a QAction instance from its name. Run an action as specified by its name. # Resolve the alias if it is an alias. # Get the action. Remove an action. Remove all actions. A dictionary mapping action names to keyboard shortcuts. # Discard actions without shortcut and without an alias. # Only show alias for actions with no shortcut. Display all shortcuts in the console. Whether the Actions group contains a specified action. # ----------------------------------------------------------------------------- # Snippets # ----------------------------------------------------------------------------- Provide keyboard snippets to quickly execute actions from a GUI. This class attaches to a GUI and an `Actions` instance. To every command is associated a snippet with the same name, or with an alias as indicated in the action. The arguments of the action's callback functions can be provided in the snippet's command with a simple syntax. For example, the following command: ``` :my_action string 3-6 ``` corresponds to: ```python my_action('string', (3, 4, 5, 6)) ``` The snippet mode is activated with the `:` keyboard shortcut. A snippet command is activated with `Enter`, and one can leave the snippet mode with `Escape`. When the snippet mode is enabled (with `:`), this object adds a hidden Qt action for every keystroke. These actions are removed when the snippet mode is disabled. Constructor ----------- gui : GUI instance # HACK: Unicode characters do not seem to work on Python 2 # Allowed characters in snippet mode. # A Qt shortcut will be created for every character. # Register snippet mode shortcut. Enable the snippet mode (type action alias in the status bar). This is used to write a snippet message in the status bar. A cursor is appended at the end. Erase the last character in the snippet command. Disable the snippet mode and execute the command. # NOTE: we need to set back the actions (mode_off) before running # the command. Add mock Qt actions for snippet keystrokes. Used to enable snippet mode. # One action per allowed character. # Lowercase letters. # Uppercase letters. Execute a snippet command. May be overridden. # Try to run the snippet on all attached Actions instances. # This Actions instance doesn't contain the requested # snippet, trying the next attached Actions instance. Whether the snippet mode is enabled. Enable the snippet mode. # Save the current status message. # Silent all actions except the Snippets actions. Disable the snippet mode. # Reset the GUI status message that was set before the mode was # activated. # Re-enable all actions except the Snippets actions. # The `:` shortcut should always be enabled. | 2.36637 | 2 |
PP4E-Examples-1.4/Examples/PP4E/Tools/cleanpyc.py | AngelLiang/PP4E | 0 | 8739 | """
delete all .pyc bytecode files in a directory tree: use the
command line arg as root if given, else current working dir
"""
import os, sys
findonly = False
rootdir = os.getcwd() if len(sys.argv) == 1 else sys.argv[1]
found = removed = 0
for (thisDirLevel, subsHere, filesHere) in os.walk(rootdir):
for filename in filesHere:
if filename.endswith('.pyc'):
fullname = os.path.join(thisDirLevel, filename)
print('=>', fullname)
if not findonly:
try:
os.remove(fullname)
removed += 1
except:
type, inst = sys.exc_info()[:2]
print('*'*4, 'Failed:', filename, type, inst)
found += 1
print('Found', found, 'files, removed', removed)
| """
delete all .pyc bytecode files in a directory tree: use the
command line arg as root if given, else current working dir
"""
import os, sys
findonly = False
rootdir = os.getcwd() if len(sys.argv) == 1 else sys.argv[1]
found = removed = 0
for (thisDirLevel, subsHere, filesHere) in os.walk(rootdir):
for filename in filesHere:
if filename.endswith('.pyc'):
fullname = os.path.join(thisDirLevel, filename)
print('=>', fullname)
if not findonly:
try:
os.remove(fullname)
removed += 1
except:
type, inst = sys.exc_info()[:2]
print('*'*4, 'Failed:', filename, type, inst)
found += 1
print('Found', found, 'files, removed', removed)
| en | 0.866688 | delete all .pyc bytecode files in a directory tree: use the command line arg as root if given, else current working dir | 2.971683 | 3 |
apps.py | louxfaure/sudoc_recouv | 1 | 8740 | <reponame>louxfaure/sudoc_recouv
from django.apps import AppConfig
class SudocRecouvConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'sudoc_recouv'
verbose_name = 'Analyses de recouvrement SUDOC'
| from django.apps import AppConfig
class SudocRecouvConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'sudoc_recouv'
verbose_name = 'Analyses de recouvrement SUDOC' | none | 1 | 1.236604 | 1 |
|
src/states.py | amancevice/terraform-aws-slack-interactive-components | 24 | 8741 | import boto3
from logger import logger
class States:
def __init__(self, boto3_session=None):
self.boto3_session = boto3_session or boto3.Session()
self.client = self.boto3_session.client('stepfunctions')
def fail(self, task_token, error, cause):
params = dict(taskToken=task_token, error=error, cause=cause)
logger.info('SEND TASK FAILURE %s', logger.json(params))
return self.client.send_task_failure(**params)
def heartbeat(self, task_token):
params = dict(taskToken=task_token)
logger.info('SEND TASK HEARTBEAT %s', logger.json(params))
return self.client.send_task_heartbeat(**params)
def succeed(self, task_token, output):
params = dict(taskToken=task_token, output=output)
logger.info('SEND TASK SUCCESS %s', logger.json(params))
return self.client.send_task_success(**params)
| import boto3
from logger import logger
class States:
def __init__(self, boto3_session=None):
self.boto3_session = boto3_session or boto3.Session()
self.client = self.boto3_session.client('stepfunctions')
def fail(self, task_token, error, cause):
params = dict(taskToken=task_token, error=error, cause=cause)
logger.info('SEND TASK FAILURE %s', logger.json(params))
return self.client.send_task_failure(**params)
def heartbeat(self, task_token):
params = dict(taskToken=task_token)
logger.info('SEND TASK HEARTBEAT %s', logger.json(params))
return self.client.send_task_heartbeat(**params)
def succeed(self, task_token, output):
params = dict(taskToken=task_token, output=output)
logger.info('SEND TASK SUCCESS %s', logger.json(params))
return self.client.send_task_success(**params)
| none | 1 | 2.467343 | 2 |
|
apps/controllerx/cx_core/type/light_controller.py | clach04/controllerx | 0 | 8742 | from typing import Any, Dict, Optional, Type, Union
from cx_const import Light, PredefinedActionsMapping
from cx_core.color_helper import get_color_wheel
from cx_core.controller import action
from cx_core.feature_support.light import LightSupport
from cx_core.integration import EventData
from cx_core.integration.deconz import DeCONZIntegration
from cx_core.integration.z2m import Z2MIntegration
from cx_core.release_hold_controller import ReleaseHoldController
from cx_core.stepper import Stepper
from cx_core.stepper.circular_stepper import CircularStepper
from cx_core.stepper.minmax_stepper import MinMaxStepper
from cx_core.type_controller import Entity, TypeController
DEFAULT_MANUAL_STEPS = 10
DEFAULT_AUTOMATIC_STEPS = 10
DEFAULT_MIN_BRIGHTNESS = 1
DEFAULT_MAX_BRIGHTNESS = 255
DEFAULT_MIN_WHITE_VALUE = 1
DEFAULT_MAX_WHITE_VALUE = 255
DEFAULT_MIN_COLOR_TEMP = 153
DEFAULT_MAX_COLOR_TEMP = 500
DEFAULT_TRANSITION = 300
DEFAULT_ADD_TRANSITION = True
DEFAULT_TRANSITION_TURN_TOGGLE = False
ColorMode = str
# Once the minimum supported version of Python is 3.8,
# we can declare the ColorMode as a Literal
# ColorMode = Literal["auto", "xy_color", "color_temp"]
class LightEntity(Entity):
color_mode: ColorMode
def __init__(self, name: str, color_mode: ColorMode = "auto") -> None:
super().__init__(name)
self.color_mode = color_mode
class LightController(TypeController[LightEntity], ReleaseHoldController):
"""
This is the main class that controls the lights for different devices.
Type of actions:
- On/Off/Toggle
- Brightness click and hold
- Color temperature click and hold
- xy color click and hold
If a light supports xy_color and color_temperature, then xy_color will be the
default functionality. Parameters taken:
- controller (required): Inherited from Controller
- light (required): This is either the light entity name or a dictionary as
{name: string, color_mode: auto | xy_color | color_temp}
- delay (optional): Inherited from ReleaseHoldController
- manual_steps (optional): Number of steps to go from min to max when clicking.
- automatic_steps (optional): Number of steps to go from min to max when smoothing.
"""
ATTRIBUTE_BRIGHTNESS = "brightness"
ATTRIBUTE_WHITE_VALUE = "white_value"
# With the following attribute, it will select color_temp or xy_color, depending on the light.
ATTRIBUTE_COLOR = "color"
ATTRIBUTE_COLOR_TEMP = "color_temp"
ATTRIBUTE_XY_COLOR = "xy_color"
index_color = 0
value_attribute = None
# These are intermediate variables to store the checked value
smooth_power_on_check: bool
remove_transition_check: bool
domains = ["light"]
entity_arg = "light"
async def init(self) -> None:
manual_steps = self.args.get("manual_steps", DEFAULT_MANUAL_STEPS)
automatic_steps = self.args.get("automatic_steps", DEFAULT_AUTOMATIC_STEPS)
self.min_brightness = self.args.get("min_brightness", DEFAULT_MIN_BRIGHTNESS)
self.max_brightness = self.args.get("max_brightness", DEFAULT_MAX_BRIGHTNESS)
self.min_white_value = self.args.get("min_white_value", DEFAULT_MIN_WHITE_VALUE)
self.max_white_value = self.args.get("max_white_value", DEFAULT_MAX_WHITE_VALUE)
self.min_color_temp = self.args.get("min_color_temp", DEFAULT_MIN_COLOR_TEMP)
self.max_color_temp = self.args.get("max_color_temp", DEFAULT_MAX_COLOR_TEMP)
self.transition = self.args.get("transition", DEFAULT_TRANSITION)
self.color_wheel = get_color_wheel(
self.args.get("color_wheel", "default_color_wheel")
)
color_stepper = CircularStepper(
0, len(self.color_wheel) - 1, len(self.color_wheel)
)
self.manual_steppers: Dict[str, Stepper] = {
LightController.ATTRIBUTE_BRIGHTNESS: MinMaxStepper(
self.min_brightness, self.max_brightness, manual_steps
),
LightController.ATTRIBUTE_WHITE_VALUE: MinMaxStepper(
self.min_white_value, self.max_white_value, manual_steps
),
LightController.ATTRIBUTE_COLOR_TEMP: MinMaxStepper(
self.min_color_temp, self.max_color_temp, manual_steps
),
LightController.ATTRIBUTE_XY_COLOR: color_stepper,
}
self.automatic_steppers: Dict[str, Stepper] = {
LightController.ATTRIBUTE_BRIGHTNESS: MinMaxStepper(
self.min_brightness, self.max_brightness, automatic_steps
),
LightController.ATTRIBUTE_WHITE_VALUE: MinMaxStepper(
self.min_white_value, self.max_white_value, automatic_steps
),
LightController.ATTRIBUTE_COLOR_TEMP: MinMaxStepper(
self.min_color_temp, self.max_color_temp, automatic_steps
),
LightController.ATTRIBUTE_XY_COLOR: color_stepper,
}
self.smooth_power_on = self.args.get(
"smooth_power_on", self.supports_smooth_power_on()
)
self.add_transition = self.args.get("add_transition", DEFAULT_ADD_TRANSITION)
self.add_transition_turn_toggle = self.args.get(
"add_transition_turn_toggle", DEFAULT_TRANSITION_TURN_TOGGLE
)
await super().init()
def _get_entity_type(self) -> Type[LightEntity]:
return LightEntity
def get_predefined_actions_mapping(self) -> PredefinedActionsMapping:
return {
Light.ON: self.on,
Light.OFF: self.off,
Light.TOGGLE: self.toggle,
Light.TOGGLE_FULL_BRIGHTNESS: (
self.toggle_full,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.TOGGLE_FULL_WHITE_VALUE: (
self.toggle_full,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.TOGGLE_FULL_COLOR_TEMP: (
self.toggle_full,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.TOGGLE_MIN_BRIGHTNESS: (
self.toggle_min,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.TOGGLE_MIN_WHITE_VALUE: (
self.toggle_min,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.TOGGLE_MIN_COLOR_TEMP: (
self.toggle_min,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.RELEASE: self.release,
Light.ON_FULL_BRIGHTNESS: (
self.on_full,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.ON_FULL_WHITE_VALUE: (
self.on_full,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.ON_FULL_COLOR_TEMP: (
self.on_full,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.ON_MIN_BRIGHTNESS: (
self.on_min,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.ON_MIN_WHITE_VALUE: (
self.on_min,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.ON_MIN_COLOR_TEMP: (
self.on_min,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.SET_HALF_BRIGHTNESS: (
self.set_value,
(
LightController.ATTRIBUTE_BRIGHTNESS,
0.5,
),
),
Light.SET_HALF_WHITE_VALUE: (
self.set_value,
(
LightController.ATTRIBUTE_WHITE_VALUE,
0.5,
),
),
Light.SET_HALF_COLOR_TEMP: (
self.set_value,
(
LightController.ATTRIBUTE_COLOR_TEMP,
0.5,
),
),
Light.SYNC: self.sync,
Light.CLICK_BRIGHTNESS_UP: (
self.click,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.UP,
),
),
Light.CLICK_BRIGHTNESS_DOWN: (
self.click,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.DOWN,
),
),
Light.CLICK_WHITE_VALUE_UP: (
self.click,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.UP,
),
),
Light.CLICK_WHITE_VALUE_DOWN: (
self.click,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.DOWN,
),
),
Light.CLICK_COLOR_UP: (
self.click,
(
LightController.ATTRIBUTE_COLOR,
Stepper.UP,
),
),
Light.CLICK_COLOR_DOWN: (
self.click,
(
LightController.ATTRIBUTE_COLOR,
Stepper.DOWN,
),
),
Light.CLICK_COLOR_TEMP_UP: (
self.click,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.UP,
),
),
Light.CLICK_COLOR_TEMP_DOWN: (
self.click,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.DOWN,
),
),
Light.CLICK_XY_COLOR_UP: (
self.click,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.UP,
),
),
Light.CLICK_XY_COLOR_DOWN: (
self.click,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_BRIGHTNESS_UP: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.UP,
),
),
Light.HOLD_BRIGHTNESS_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.DOWN,
),
),
Light.HOLD_BRIGHTNESS_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.TOGGLE,
),
),
Light.HOLD_WHITE_VALUE_UP: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.UP,
),
),
Light.HOLD_WHITE_VALUE_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.DOWN,
),
),
Light.HOLD_WHITE_VALUE_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.TOGGLE,
),
),
Light.HOLD_COLOR_UP: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.UP,
),
),
Light.HOLD_COLOR_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_COLOR_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.TOGGLE,
),
),
Light.HOLD_COLOR_TEMP_UP: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.UP,
),
),
Light.HOLD_COLOR_TEMP_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.DOWN,
),
),
Light.HOLD_COLOR_TEMP_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.TOGGLE,
),
),
Light.HOLD_XY_COLOR_UP: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.UP,
),
),
Light.HOLD_XY_COLOR_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_XY_COLOR_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.TOGGLE,
),
),
Light.XYCOLOR_FROM_CONTROLLER: self.xycolor_from_controller,
Light.COLORTEMP_FROM_CONTROLLER: self.colortemp_from_controller,
}
async def check_remove_transition(self, on_from_user: bool) -> bool:
return (
not self.add_transition
or (on_from_user and not self.add_transition_turn_toggle)
or await self.feature_support.not_supported(LightSupport.TRANSITION)
)
async def call_light_service(self, service: str, **attributes) -> None:
if "transition" not in attributes:
attributes["transition"] = self.transition / 1000
if self.remove_transition_check:
del attributes["transition"]
await self.call_service(service, entity_id=self.entity.name, **attributes)
async def _on(self, **attributes) -> None:
await self.call_light_service("light/turn_on", **attributes)
@action
async def on(self, **attributes) -> None:
await self._on(**attributes)
async def _off(self, **attributes) -> None:
await self.call_light_service("light/turn_off", **attributes)
@action
async def off(self, **attributes) -> None:
await self._off(**attributes)
async def _toggle(self, **attributes) -> None:
await self.call_light_service("light/toggle", **attributes)
@action
async def toggle(self, **attributes) -> None:
await self._toggle(**attributes)
async def _set_value(self, attribute: str, fraction: float) -> None:
fraction = max(0, min(fraction, 1))
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
min_ = stepper.minmax.min
max_ = stepper.minmax.max
value = (max_ - min_) * fraction + min_
await self._on(**{attribute: value})
@action
async def set_value(self, attribute: str, fraction: float) -> None:
await self._set_value(attribute, fraction)
@action
async def toggle_full(self, attribute: str) -> None:
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
await self._toggle(**{attribute: stepper.minmax.max})
@action
async def toggle_min(self, attribute: str) -> None:
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
await self._toggle(**{attribute: stepper.minmax.min})
async def _on_full(self, attribute: str) -> None:
await self._set_value(attribute, 1)
@action
async def on_full(self, attribute: str) -> None:
await self._on_full(attribute)
async def _on_min(self, attribute: str) -> None:
await self._set_value(attribute, 0)
@action
async def on_min(self, attribute: str) -> None:
await self._on_min(attribute)
@action
async def sync(self) -> None:
attributes: Dict[Any, Any] = {}
try:
color_attribute = await self.get_attribute(LightController.ATTRIBUTE_COLOR)
if color_attribute == LightController.ATTRIBUTE_COLOR_TEMP:
attributes[color_attribute] = 370 # 2700K light
else:
attributes[color_attribute] = (0.323, 0.329) # white colour
except ValueError:
self.log(
"⚠️ `sync` action will only change brightness",
level="WARNING",
ascii_encode=False,
)
await self._on(**attributes, brightness=self.max_brightness)
@action
async def xycolor_from_controller(self, extra: Optional[EventData]) -> None:
if extra is None:
self.log("No event data present", level="WARNING")
return
if isinstance(self.integration, Z2MIntegration):
if "action_color" not in extra:
self.log(
"`action_color` is not present in the MQTT payload", level="WARNING"
)
return
xy_color = extra["action_color"]
await self._on(xy_color=(xy_color["x"], xy_color["y"]))
elif isinstance(self.integration, DeCONZIntegration):
if "xy" not in extra:
self.log("`xy` is not present in the deCONZ event", level="WARNING")
return
await self._on(xy_color=extra["xy"])
@action
async def colortemp_from_controller(self, extra: Optional[EventData]) -> None:
if extra is None:
self.log("No event data present", level="WARNING")
return
if isinstance(self.integration, Z2MIntegration):
if "action_color_temperature" not in extra:
self.log(
"`action_color_temperature` is not present in the MQTT payload",
level="WARNING",
)
return
await self._on(color_temp=extra["action_color_temperature"])
async def get_attribute(self, attribute: str) -> str:
if attribute == LightController.ATTRIBUTE_COLOR:
if self.entity.color_mode == "auto":
if await self.feature_support.is_supported(LightSupport.COLOR):
return LightController.ATTRIBUTE_XY_COLOR
elif await self.feature_support.is_supported(LightSupport.COLOR_TEMP):
return LightController.ATTRIBUTE_COLOR_TEMP
else:
raise ValueError(
"This light does not support xy_color or color_temp"
)
else:
return self.entity.color_mode
else:
return attribute
async def get_value_attribute(self, attribute: str) -> Union[float, int]:
if self.smooth_power_on_check:
return 0
if attribute == LightController.ATTRIBUTE_XY_COLOR:
return 0
elif (
attribute == LightController.ATTRIBUTE_BRIGHTNESS
or attribute == LightController.ATTRIBUTE_WHITE_VALUE
or attribute == LightController.ATTRIBUTE_COLOR_TEMP
):
value = await self.get_entity_state(self.entity.name, attribute)
if value is None:
raise ValueError(
f"Value for `{attribute}` attribute could not be retrieved "
f"from `{self.entity.name}`. "
"Check the FAQ to know more about this error: "
"https://xaviml.github.io/controllerx/faq"
)
else:
try:
return float(value)
except ValueError:
raise ValueError(
f"Attribute `{attribute}` with `{value}` as a value "
"could not be converted to float"
)
else:
raise ValueError(f"Attribute `{attribute}` not expected")
def check_smooth_power_on(
self, attribute: str, direction: str, light_state: str
) -> bool:
return (
direction != Stepper.DOWN
and attribute == self.ATTRIBUTE_BRIGHTNESS
and self.smooth_power_on
and light_state == "off"
)
async def before_action(self, action: str, *args, **kwargs) -> bool:
to_return = True
if action in ("click", "hold"):
attribute, direction = args
light_state: str = await self.get_entity_state(self.entity.name)
self.smooth_power_on_check = self.check_smooth_power_on(
attribute, direction, light_state
)
self.remove_transition_check = await self.check_remove_transition(
on_from_user=False
)
to_return = (light_state == "on") or self.smooth_power_on_check
else:
self.remove_transition_check = await self.check_remove_transition(
on_from_user=True
)
self.smooth_power_on_check = False
return await super().before_action(action, *args, **kwargs) and to_return
@action
async def click(self, attribute: str, direction: str) -> None:
attribute = await self.get_attribute(attribute)
self.value_attribute = await self.get_value_attribute(attribute)
await self.change_light_state(
self.value_attribute,
attribute,
direction,
self.manual_steppers[attribute],
"click",
)
@action
async def hold(self, attribute: str, direction: str) -> None: # type: ignore
attribute = await self.get_attribute(attribute)
self.value_attribute = await self.get_value_attribute(attribute)
self.log(
f"Attribute value before running the hold action: {self.value_attribute}",
level="DEBUG",
)
if direction == Stepper.TOGGLE:
self.log(
f"Previous direction: {self.automatic_steppers[attribute].previous_direction}",
level="DEBUG",
)
direction = self.automatic_steppers[attribute].get_direction(
self.value_attribute, direction
)
self.log(f"Going direction: {direction}", level="DEBUG")
await super().hold(attribute, direction)
async def hold_loop(self, attribute: str, direction: str) -> bool: # type: ignore
if self.value_attribute is None:
return True
return await self.change_light_state(
self.value_attribute,
attribute,
direction,
self.automatic_steppers[attribute],
"hold",
)
async def change_light_state(
self,
old: float,
attribute: str,
direction: str,
stepper: Stepper,
action_type: str,
) -> bool:
"""
This functions changes the state of the light depending on the previous
value and attribute. It returns True when no more changes will need to be done.
Otherwise, it returns False.
"""
attributes: Dict[str, Any]
if attribute == LightController.ATTRIBUTE_XY_COLOR:
index_color, _ = stepper.step(self.index_color, direction)
self.index_color = int(index_color)
xy_color = self.color_wheel[self.index_color]
attributes = {attribute: xy_color}
if action_type == "hold":
attributes["transition"] = self.delay / 1000
await self._on(**attributes)
# In case of xy_color mode it never finishes the loop, the hold loop
# will only stop if the hold action is called when releasing the button.
# I haven't experimented any problems with it, but a future implementation
# would be to force the loop to stop after 4 or 5 loops as a safety measure.
return False
if self.smooth_power_on_check:
await self._on_min(attribute)
# # After smooth power on, the light should not brighten up.
return True
new_state_attribute, exceeded = stepper.step(old, direction)
new_state_attribute = round(new_state_attribute, 3)
attributes = {attribute: new_state_attribute}
if action_type == "hold":
attributes["transition"] = self.delay / 1000
await self._on(**attributes)
self.value_attribute = new_state_attribute
return exceeded
def supports_smooth_power_on(self) -> bool:
"""
This function can be overrided for each device to indicate the default behaviour of the controller
when the associated light is off and an event for incrementing brightness is received.
Returns True if the associated light should be turned on with minimum brightness if an event for incrementing
brightness is received, while the lamp is off.
The behaviour can be overridden by the user with the 'smooth_power_on' option in app configuration.
"""
return False
| from typing import Any, Dict, Optional, Type, Union
from cx_const import Light, PredefinedActionsMapping
from cx_core.color_helper import get_color_wheel
from cx_core.controller import action
from cx_core.feature_support.light import LightSupport
from cx_core.integration import EventData
from cx_core.integration.deconz import DeCONZIntegration
from cx_core.integration.z2m import Z2MIntegration
from cx_core.release_hold_controller import ReleaseHoldController
from cx_core.stepper import Stepper
from cx_core.stepper.circular_stepper import CircularStepper
from cx_core.stepper.minmax_stepper import MinMaxStepper
from cx_core.type_controller import Entity, TypeController
DEFAULT_MANUAL_STEPS = 10
DEFAULT_AUTOMATIC_STEPS = 10
DEFAULT_MIN_BRIGHTNESS = 1
DEFAULT_MAX_BRIGHTNESS = 255
DEFAULT_MIN_WHITE_VALUE = 1
DEFAULT_MAX_WHITE_VALUE = 255
DEFAULT_MIN_COLOR_TEMP = 153
DEFAULT_MAX_COLOR_TEMP = 500
DEFAULT_TRANSITION = 300
DEFAULT_ADD_TRANSITION = True
DEFAULT_TRANSITION_TURN_TOGGLE = False
ColorMode = str
# Once the minimum supported version of Python is 3.8,
# we can declare the ColorMode as a Literal
# ColorMode = Literal["auto", "xy_color", "color_temp"]
class LightEntity(Entity):
color_mode: ColorMode
def __init__(self, name: str, color_mode: ColorMode = "auto") -> None:
super().__init__(name)
self.color_mode = color_mode
class LightController(TypeController[LightEntity], ReleaseHoldController):
"""
This is the main class that controls the lights for different devices.
Type of actions:
- On/Off/Toggle
- Brightness click and hold
- Color temperature click and hold
- xy color click and hold
If a light supports xy_color and color_temperature, then xy_color will be the
default functionality. Parameters taken:
- controller (required): Inherited from Controller
- light (required): This is either the light entity name or a dictionary as
{name: string, color_mode: auto | xy_color | color_temp}
- delay (optional): Inherited from ReleaseHoldController
- manual_steps (optional): Number of steps to go from min to max when clicking.
- automatic_steps (optional): Number of steps to go from min to max when smoothing.
"""
ATTRIBUTE_BRIGHTNESS = "brightness"
ATTRIBUTE_WHITE_VALUE = "white_value"
# With the following attribute, it will select color_temp or xy_color, depending on the light.
ATTRIBUTE_COLOR = "color"
ATTRIBUTE_COLOR_TEMP = "color_temp"
ATTRIBUTE_XY_COLOR = "xy_color"
index_color = 0
value_attribute = None
# These are intermediate variables to store the checked value
smooth_power_on_check: bool
remove_transition_check: bool
domains = ["light"]
entity_arg = "light"
async def init(self) -> None:
manual_steps = self.args.get("manual_steps", DEFAULT_MANUAL_STEPS)
automatic_steps = self.args.get("automatic_steps", DEFAULT_AUTOMATIC_STEPS)
self.min_brightness = self.args.get("min_brightness", DEFAULT_MIN_BRIGHTNESS)
self.max_brightness = self.args.get("max_brightness", DEFAULT_MAX_BRIGHTNESS)
self.min_white_value = self.args.get("min_white_value", DEFAULT_MIN_WHITE_VALUE)
self.max_white_value = self.args.get("max_white_value", DEFAULT_MAX_WHITE_VALUE)
self.min_color_temp = self.args.get("min_color_temp", DEFAULT_MIN_COLOR_TEMP)
self.max_color_temp = self.args.get("max_color_temp", DEFAULT_MAX_COLOR_TEMP)
self.transition = self.args.get("transition", DEFAULT_TRANSITION)
self.color_wheel = get_color_wheel(
self.args.get("color_wheel", "default_color_wheel")
)
color_stepper = CircularStepper(
0, len(self.color_wheel) - 1, len(self.color_wheel)
)
self.manual_steppers: Dict[str, Stepper] = {
LightController.ATTRIBUTE_BRIGHTNESS: MinMaxStepper(
self.min_brightness, self.max_brightness, manual_steps
),
LightController.ATTRIBUTE_WHITE_VALUE: MinMaxStepper(
self.min_white_value, self.max_white_value, manual_steps
),
LightController.ATTRIBUTE_COLOR_TEMP: MinMaxStepper(
self.min_color_temp, self.max_color_temp, manual_steps
),
LightController.ATTRIBUTE_XY_COLOR: color_stepper,
}
self.automatic_steppers: Dict[str, Stepper] = {
LightController.ATTRIBUTE_BRIGHTNESS: MinMaxStepper(
self.min_brightness, self.max_brightness, automatic_steps
),
LightController.ATTRIBUTE_WHITE_VALUE: MinMaxStepper(
self.min_white_value, self.max_white_value, automatic_steps
),
LightController.ATTRIBUTE_COLOR_TEMP: MinMaxStepper(
self.min_color_temp, self.max_color_temp, automatic_steps
),
LightController.ATTRIBUTE_XY_COLOR: color_stepper,
}
self.smooth_power_on = self.args.get(
"smooth_power_on", self.supports_smooth_power_on()
)
self.add_transition = self.args.get("add_transition", DEFAULT_ADD_TRANSITION)
self.add_transition_turn_toggle = self.args.get(
"add_transition_turn_toggle", DEFAULT_TRANSITION_TURN_TOGGLE
)
await super().init()
def _get_entity_type(self) -> Type[LightEntity]:
return LightEntity
def get_predefined_actions_mapping(self) -> PredefinedActionsMapping:
return {
Light.ON: self.on,
Light.OFF: self.off,
Light.TOGGLE: self.toggle,
Light.TOGGLE_FULL_BRIGHTNESS: (
self.toggle_full,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.TOGGLE_FULL_WHITE_VALUE: (
self.toggle_full,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.TOGGLE_FULL_COLOR_TEMP: (
self.toggle_full,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.TOGGLE_MIN_BRIGHTNESS: (
self.toggle_min,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.TOGGLE_MIN_WHITE_VALUE: (
self.toggle_min,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.TOGGLE_MIN_COLOR_TEMP: (
self.toggle_min,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.RELEASE: self.release,
Light.ON_FULL_BRIGHTNESS: (
self.on_full,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.ON_FULL_WHITE_VALUE: (
self.on_full,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.ON_FULL_COLOR_TEMP: (
self.on_full,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.ON_MIN_BRIGHTNESS: (
self.on_min,
(LightController.ATTRIBUTE_BRIGHTNESS,),
),
Light.ON_MIN_WHITE_VALUE: (
self.on_min,
(LightController.ATTRIBUTE_WHITE_VALUE,),
),
Light.ON_MIN_COLOR_TEMP: (
self.on_min,
(LightController.ATTRIBUTE_COLOR_TEMP,),
),
Light.SET_HALF_BRIGHTNESS: (
self.set_value,
(
LightController.ATTRIBUTE_BRIGHTNESS,
0.5,
),
),
Light.SET_HALF_WHITE_VALUE: (
self.set_value,
(
LightController.ATTRIBUTE_WHITE_VALUE,
0.5,
),
),
Light.SET_HALF_COLOR_TEMP: (
self.set_value,
(
LightController.ATTRIBUTE_COLOR_TEMP,
0.5,
),
),
Light.SYNC: self.sync,
Light.CLICK_BRIGHTNESS_UP: (
self.click,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.UP,
),
),
Light.CLICK_BRIGHTNESS_DOWN: (
self.click,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.DOWN,
),
),
Light.CLICK_WHITE_VALUE_UP: (
self.click,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.UP,
),
),
Light.CLICK_WHITE_VALUE_DOWN: (
self.click,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.DOWN,
),
),
Light.CLICK_COLOR_UP: (
self.click,
(
LightController.ATTRIBUTE_COLOR,
Stepper.UP,
),
),
Light.CLICK_COLOR_DOWN: (
self.click,
(
LightController.ATTRIBUTE_COLOR,
Stepper.DOWN,
),
),
Light.CLICK_COLOR_TEMP_UP: (
self.click,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.UP,
),
),
Light.CLICK_COLOR_TEMP_DOWN: (
self.click,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.DOWN,
),
),
Light.CLICK_XY_COLOR_UP: (
self.click,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.UP,
),
),
Light.CLICK_XY_COLOR_DOWN: (
self.click,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_BRIGHTNESS_UP: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.UP,
),
),
Light.HOLD_BRIGHTNESS_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.DOWN,
),
),
Light.HOLD_BRIGHTNESS_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_BRIGHTNESS,
Stepper.TOGGLE,
),
),
Light.HOLD_WHITE_VALUE_UP: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.UP,
),
),
Light.HOLD_WHITE_VALUE_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.DOWN,
),
),
Light.HOLD_WHITE_VALUE_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_WHITE_VALUE,
Stepper.TOGGLE,
),
),
Light.HOLD_COLOR_UP: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.UP,
),
),
Light.HOLD_COLOR_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_COLOR_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_COLOR,
Stepper.TOGGLE,
),
),
Light.HOLD_COLOR_TEMP_UP: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.UP,
),
),
Light.HOLD_COLOR_TEMP_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.DOWN,
),
),
Light.HOLD_COLOR_TEMP_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_COLOR_TEMP,
Stepper.TOGGLE,
),
),
Light.HOLD_XY_COLOR_UP: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.UP,
),
),
Light.HOLD_XY_COLOR_DOWN: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.DOWN,
),
),
Light.HOLD_XY_COLOR_TOGGLE: (
self.hold,
(
LightController.ATTRIBUTE_XY_COLOR,
Stepper.TOGGLE,
),
),
Light.XYCOLOR_FROM_CONTROLLER: self.xycolor_from_controller,
Light.COLORTEMP_FROM_CONTROLLER: self.colortemp_from_controller,
}
async def check_remove_transition(self, on_from_user: bool) -> bool:
return (
not self.add_transition
or (on_from_user and not self.add_transition_turn_toggle)
or await self.feature_support.not_supported(LightSupport.TRANSITION)
)
async def call_light_service(self, service: str, **attributes) -> None:
if "transition" not in attributes:
attributes["transition"] = self.transition / 1000
if self.remove_transition_check:
del attributes["transition"]
await self.call_service(service, entity_id=self.entity.name, **attributes)
async def _on(self, **attributes) -> None:
await self.call_light_service("light/turn_on", **attributes)
@action
async def on(self, **attributes) -> None:
await self._on(**attributes)
async def _off(self, **attributes) -> None:
await self.call_light_service("light/turn_off", **attributes)
@action
async def off(self, **attributes) -> None:
await self._off(**attributes)
async def _toggle(self, **attributes) -> None:
await self.call_light_service("light/toggle", **attributes)
@action
async def toggle(self, **attributes) -> None:
await self._toggle(**attributes)
async def _set_value(self, attribute: str, fraction: float) -> None:
fraction = max(0, min(fraction, 1))
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
min_ = stepper.minmax.min
max_ = stepper.minmax.max
value = (max_ - min_) * fraction + min_
await self._on(**{attribute: value})
@action
async def set_value(self, attribute: str, fraction: float) -> None:
await self._set_value(attribute, fraction)
@action
async def toggle_full(self, attribute: str) -> None:
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
await self._toggle(**{attribute: stepper.minmax.max})
@action
async def toggle_min(self, attribute: str) -> None:
stepper = self.automatic_steppers[attribute]
if isinstance(stepper, MinMaxStepper):
await self._toggle(**{attribute: stepper.minmax.min})
async def _on_full(self, attribute: str) -> None:
await self._set_value(attribute, 1)
@action
async def on_full(self, attribute: str) -> None:
await self._on_full(attribute)
async def _on_min(self, attribute: str) -> None:
await self._set_value(attribute, 0)
@action
async def on_min(self, attribute: str) -> None:
await self._on_min(attribute)
@action
async def sync(self) -> None:
attributes: Dict[Any, Any] = {}
try:
color_attribute = await self.get_attribute(LightController.ATTRIBUTE_COLOR)
if color_attribute == LightController.ATTRIBUTE_COLOR_TEMP:
attributes[color_attribute] = 370 # 2700K light
else:
attributes[color_attribute] = (0.323, 0.329) # white colour
except ValueError:
self.log(
"⚠️ `sync` action will only change brightness",
level="WARNING",
ascii_encode=False,
)
await self._on(**attributes, brightness=self.max_brightness)
@action
async def xycolor_from_controller(self, extra: Optional[EventData]) -> None:
if extra is None:
self.log("No event data present", level="WARNING")
return
if isinstance(self.integration, Z2MIntegration):
if "action_color" not in extra:
self.log(
"`action_color` is not present in the MQTT payload", level="WARNING"
)
return
xy_color = extra["action_color"]
await self._on(xy_color=(xy_color["x"], xy_color["y"]))
elif isinstance(self.integration, DeCONZIntegration):
if "xy" not in extra:
self.log("`xy` is not present in the deCONZ event", level="WARNING")
return
await self._on(xy_color=extra["xy"])
@action
async def colortemp_from_controller(self, extra: Optional[EventData]) -> None:
if extra is None:
self.log("No event data present", level="WARNING")
return
if isinstance(self.integration, Z2MIntegration):
if "action_color_temperature" not in extra:
self.log(
"`action_color_temperature` is not present in the MQTT payload",
level="WARNING",
)
return
await self._on(color_temp=extra["action_color_temperature"])
async def get_attribute(self, attribute: str) -> str:
if attribute == LightController.ATTRIBUTE_COLOR:
if self.entity.color_mode == "auto":
if await self.feature_support.is_supported(LightSupport.COLOR):
return LightController.ATTRIBUTE_XY_COLOR
elif await self.feature_support.is_supported(LightSupport.COLOR_TEMP):
return LightController.ATTRIBUTE_COLOR_TEMP
else:
raise ValueError(
"This light does not support xy_color or color_temp"
)
else:
return self.entity.color_mode
else:
return attribute
async def get_value_attribute(self, attribute: str) -> Union[float, int]:
if self.smooth_power_on_check:
return 0
if attribute == LightController.ATTRIBUTE_XY_COLOR:
return 0
elif (
attribute == LightController.ATTRIBUTE_BRIGHTNESS
or attribute == LightController.ATTRIBUTE_WHITE_VALUE
or attribute == LightController.ATTRIBUTE_COLOR_TEMP
):
value = await self.get_entity_state(self.entity.name, attribute)
if value is None:
raise ValueError(
f"Value for `{attribute}` attribute could not be retrieved "
f"from `{self.entity.name}`. "
"Check the FAQ to know more about this error: "
"https://xaviml.github.io/controllerx/faq"
)
else:
try:
return float(value)
except ValueError:
raise ValueError(
f"Attribute `{attribute}` with `{value}` as a value "
"could not be converted to float"
)
else:
raise ValueError(f"Attribute `{attribute}` not expected")
def check_smooth_power_on(
self, attribute: str, direction: str, light_state: str
) -> bool:
return (
direction != Stepper.DOWN
and attribute == self.ATTRIBUTE_BRIGHTNESS
and self.smooth_power_on
and light_state == "off"
)
async def before_action(self, action: str, *args, **kwargs) -> bool:
to_return = True
if action in ("click", "hold"):
attribute, direction = args
light_state: str = await self.get_entity_state(self.entity.name)
self.smooth_power_on_check = self.check_smooth_power_on(
attribute, direction, light_state
)
self.remove_transition_check = await self.check_remove_transition(
on_from_user=False
)
to_return = (light_state == "on") or self.smooth_power_on_check
else:
self.remove_transition_check = await self.check_remove_transition(
on_from_user=True
)
self.smooth_power_on_check = False
return await super().before_action(action, *args, **kwargs) and to_return
@action
async def click(self, attribute: str, direction: str) -> None:
attribute = await self.get_attribute(attribute)
self.value_attribute = await self.get_value_attribute(attribute)
await self.change_light_state(
self.value_attribute,
attribute,
direction,
self.manual_steppers[attribute],
"click",
)
@action
async def hold(self, attribute: str, direction: str) -> None: # type: ignore
attribute = await self.get_attribute(attribute)
self.value_attribute = await self.get_value_attribute(attribute)
self.log(
f"Attribute value before running the hold action: {self.value_attribute}",
level="DEBUG",
)
if direction == Stepper.TOGGLE:
self.log(
f"Previous direction: {self.automatic_steppers[attribute].previous_direction}",
level="DEBUG",
)
direction = self.automatic_steppers[attribute].get_direction(
self.value_attribute, direction
)
self.log(f"Going direction: {direction}", level="DEBUG")
await super().hold(attribute, direction)
async def hold_loop(self, attribute: str, direction: str) -> bool: # type: ignore
if self.value_attribute is None:
return True
return await self.change_light_state(
self.value_attribute,
attribute,
direction,
self.automatic_steppers[attribute],
"hold",
)
async def change_light_state(
self,
old: float,
attribute: str,
direction: str,
stepper: Stepper,
action_type: str,
) -> bool:
"""
This functions changes the state of the light depending on the previous
value and attribute. It returns True when no more changes will need to be done.
Otherwise, it returns False.
"""
attributes: Dict[str, Any]
if attribute == LightController.ATTRIBUTE_XY_COLOR:
index_color, _ = stepper.step(self.index_color, direction)
self.index_color = int(index_color)
xy_color = self.color_wheel[self.index_color]
attributes = {attribute: xy_color}
if action_type == "hold":
attributes["transition"] = self.delay / 1000
await self._on(**attributes)
# In case of xy_color mode it never finishes the loop, the hold loop
# will only stop if the hold action is called when releasing the button.
# I haven't experimented any problems with it, but a future implementation
# would be to force the loop to stop after 4 or 5 loops as a safety measure.
return False
if self.smooth_power_on_check:
await self._on_min(attribute)
# # After smooth power on, the light should not brighten up.
return True
new_state_attribute, exceeded = stepper.step(old, direction)
new_state_attribute = round(new_state_attribute, 3)
attributes = {attribute: new_state_attribute}
if action_type == "hold":
attributes["transition"] = self.delay / 1000
await self._on(**attributes)
self.value_attribute = new_state_attribute
return exceeded
def supports_smooth_power_on(self) -> bool:
"""
This function can be overrided for each device to indicate the default behaviour of the controller
when the associated light is off and an event for incrementing brightness is received.
Returns True if the associated light should be turned on with minimum brightness if an event for incrementing
brightness is received, while the lamp is off.
The behaviour can be overridden by the user with the 'smooth_power_on' option in app configuration.
"""
return False
| en | 0.846925 | # Once the minimum supported version of Python is 3.8, # we can declare the ColorMode as a Literal # ColorMode = Literal["auto", "xy_color", "color_temp"] This is the main class that controls the lights for different devices. Type of actions: - On/Off/Toggle - Brightness click and hold - Color temperature click and hold - xy color click and hold If a light supports xy_color and color_temperature, then xy_color will be the default functionality. Parameters taken: - controller (required): Inherited from Controller - light (required): This is either the light entity name or a dictionary as {name: string, color_mode: auto | xy_color | color_temp} - delay (optional): Inherited from ReleaseHoldController - manual_steps (optional): Number of steps to go from min to max when clicking. - automatic_steps (optional): Number of steps to go from min to max when smoothing. # With the following attribute, it will select color_temp or xy_color, depending on the light. # These are intermediate variables to store the checked value # 2700K light # white colour # type: ignore # type: ignore This functions changes the state of the light depending on the previous value and attribute. It returns True when no more changes will need to be done. Otherwise, it returns False. # In case of xy_color mode it never finishes the loop, the hold loop # will only stop if the hold action is called when releasing the button. # I haven't experimented any problems with it, but a future implementation # would be to force the loop to stop after 4 or 5 loops as a safety measure. # # After smooth power on, the light should not brighten up. This function can be overrided for each device to indicate the default behaviour of the controller when the associated light is off and an event for incrementing brightness is received. Returns True if the associated light should be turned on with minimum brightness if an event for incrementing brightness is received, while the lamp is off. The behaviour can be overridden by the user with the 'smooth_power_on' option in app configuration. | 2.642417 | 3 |
kts/core/types.py | konodyuk/kts | 18 | 8743 | from typing import Union
import pandas as pd
from kts.core.frame import KTSFrame
AnyFrame = Union[pd.DataFrame, KTSFrame]
| from typing import Union
import pandas as pd
from kts.core.frame import KTSFrame
AnyFrame = Union[pd.DataFrame, KTSFrame]
| none | 1 | 1.69837 | 2 |
|
krispy/mod_user/models.py | jlaura/krispy | 2 | 8744 | <filename>krispy/mod_user/models.py
from app import db
from flask.ext.login import UserMixin
class User(UserMixin, db.Model):
__tablename__ = 'oauth2users'
id = db.Column(db.Integer, primary_key=True)
social_id = db.Column(db.String(64), nullable=False, unique=True)
nickname = db.Column(db.String(64), nullable=False)
email = db.Column(db.String(64), nullable=True)
| <filename>krispy/mod_user/models.py
from app import db
from flask.ext.login import UserMixin
class User(UserMixin, db.Model):
__tablename__ = 'oauth2users'
id = db.Column(db.Integer, primary_key=True)
social_id = db.Column(db.String(64), nullable=False, unique=True)
nickname = db.Column(db.String(64), nullable=False)
email = db.Column(db.String(64), nullable=True)
| none | 1 | 2.216953 | 2 |
|
blog_app/blog/views.py | flxj/Django_blog | 1 | 8745 | <gh_stars>1-10
import markdown
from comments.forms import CommentForm,BookCommentForm,MovieCommentForm
from django.shortcuts import render, get_object_or_404
from.models import Post,Category,Tag, Book,Movie
#from django.http import HttpResponse
from django.views.generic import ListView, DetailView
from django.utils.text import slugify
from markdown.extensions.toc import TocExtension
from django.db.models import Q
"""
def index(request):
#post_list = Post.objects.all().order_by('-created_time')
post_list = Post.objects.all()
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
class IndexView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
paginate_by = 10
def get_context_data(self, **kwargs):
"""
在视图函数中将模板变量传递给模板是通过给 render 函数的 context 参数传递一个字典实现的,
例如 render(request, 'blog/index.html', context={'post_list': post_list}),
这里传递了一个 {'post_list': post_list} 字典给模板。
在类视图中,这个需要传递的模板变量字典是通过 get_context_data 获得的,
所以我们复写该方法,以便我们能够自己再插入一些我们自定义的模板变量进去。
"""
# 首先获得父类生成的传递给模板的字典。
context = super().get_context_data(**kwargs)
# 父类生成的字典中已有 paginator、page_obj、is_paginated 这三个模板变量,
# paginator 是 Paginator 的一个实例,
# page_obj 是 Page 的一个实例,
# is_paginated 是一个布尔变量,用于指示是否已分页。
# 例如如果规定每页 10 个数据,而本身只有 5 个数据,其实就用不着分页,此时 is_paginated=False。
# 关于什么是 Paginator,Page 类在 Django Pagination 简单分页:http://zmrenwu.com/post/34/ 中已有详细说明。
# 由于 context 是一个字典,所以调用 get 方法从中取出某个键对应的值。
paginator = context.get('paginator')
page = context.get('page_obj')
is_paginated = context.get('is_paginated')
# 调用自己写的 pagination_data 方法获得显示分页导航条需要的数据,见下方。
pagination_data = self.pagination_data(paginator, page, is_paginated)
# 将分页导航条的模板变量更新到 context 中,注意 pagination_data 方法返回的也是一个字典。
context.update(pagination_data)
# 将更新后的 context 返回,以便 ListView 使用这个字典中的模板变量去渲染模板。
# 注意此时 context 字典中已有了显示分页导航条所需的数据。
return context
def pagination_data(self, paginator, page, is_paginated):
if not is_paginated:
# 如果没有分页,则无需显示分页导航条,不用任何分页导航条的数据,因此返回一个空的字典
return {}
# 当前页左边连续的页码号,初始值为空
left = []
# 当前页右边连续的页码号,初始值为空
right = []
# 标示第 1 页页码后是否需要显示省略号
left_has_more = False
# 标示最后一页页码前是否需要显示省略号
right_has_more = False
# 标示是否需要显示第 1 页的页码号。
# 因为如果当前页左边的连续页码号中已经含有第 1 页的页码号,此时就无需再显示第 1 页的页码号,
# 其它情况下第一页的页码是始终需要显示的。
# 初始值为 False
first = False
# 标示是否需要显示最后一页的页码号。
# 需要此指示变量的理由和上面相同。
last = False
# 获得用户当前请求的页码号
page_number = page.number
# 获得分页后的总页数
total_pages = paginator.num_pages
# 获得整个分页页码列表,比如分了四页,那么就是 [1, 2, 3, 4]
page_range = paginator.page_range
if page_number == 1:
# 如果用户请求的是第一页的数据,那么当前页左边的不需要数据,因此 left=[](已默认为空)。
# 此时只要获取当前页右边的连续页码号,
# 比如分页页码列表是 [1, 2, 3, 4],那么获取的就是 right = [2, 3]。
# 注意这里只获取了当前页码后连续两个页码,你可以更改这个数字以获取更多页码。
right = page_range[page_number:page_number + 2]
# 如果最右边的页码号比最后一页的页码号减去 1 还要小,
# 说明最右边的页码号和最后一页的页码号之间还有其它页码,因此需要显示省略号,通过 right_has_more 来指示。
if right[-1] < total_pages - 1:
right_has_more = True
# 如果最右边的页码号比最后一页的页码号小,说明当前页右边的连续页码号中不包含最后一页的页码
# 所以需要显示最后一页的页码号,通过 last 来指示
if right[-1] < total_pages:
last = True
elif page_number == total_pages:
# 如果用户请求的是最后一页的数据,那么当前页右边就不需要数据,因此 right=[](已默认为空),
# 此时只要获取当前页左边的连续页码号。
# 比如分页页码列表是 [1, 2, 3, 4],那么获取的就是 left = [2, 3]
# 这里只获取了当前页码后连续两个页码,你可以更改这个数字以获取更多页码。
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
# 如果最左边的页码号比第 2 页页码号还大,
# 说明最左边的页码号和第 1 页的页码号之间还有其它页码,因此需要显示省略号,通过 left_has_more 来指示。
if left[0] > 2:
left_has_more = True
# 如果最左边的页码号比第 1 页的页码号大,说明当前页左边的连续页码号中不包含第一页的页码,
# 所以需要显示第一页的页码号,通过 first 来指示
if left[0] > 1:
first = True
else:
# 用户请求的既不是最后一页,也不是第 1 页,则需要获取当前页左右两边的连续页码号,
# 这里只获取了当前页码前后连续两个页码,你可以更改这个数字以获取更多页码。
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
right = page_range[page_number:page_number + 2]
# 是否需要显示最后一页和最后一页前的省略号
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
# 是否需要显示第 1 页和第 1 页后的省略号
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
data = {
'left': left,
'right': right,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'first': first,
'last': last,
}
return data
#显示全文
"""
def detail(request, pk):
post = get_object_or_404(Post, pk=pk)
# 阅读量 +1
post.increase_views()
post.body = markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
'markdown.extensions.tables',
])
form = CommentForm()
# 获取这篇 post 下的全部评论
comment_list = post.comment_set.all()
# 将文章、表单、以及文章下的评论列表作为模板变量传给 detail.html 模板,以便渲染相应数据。
context = {'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
"""
class PostDetailView(DetailView):
model = Post
template_name = 'blog/detail.html'
context_object_name = 'post'
def get(self, request, *args, **kwargs):
# 覆写 get 方法的目的是因为每当文章被访问一次,就得将文章阅读量 +1
# get 方法返回的是一个 HttpResponse 实例
# 之所以需要先调用父类的 get 方法,是因为只有当 get 方法被调用后,
# 才有 self.object 属性,其值为 Post 模型实例,即被访问的文章 post
response = super(PostDetailView, self).get(request, *args, **kwargs)
# 将文章阅读量 +1
# 注意 self.object 的值就是被访问的文章 post
self.object.increase_views()
# 视图必须返回一个 HttpResponse 对象
return response
def get_object(self, queryset=None):
# 覆写 get_object 方法的目的是因为需要对 post 的 body 值进行渲染
post = super(PostDetailView, self).get_object(queryset=None)
#此处先将markdown禁掉,因为显然经过markdown渲染的文本,再经过MathJax渲染就不能看了
#但是不经markdown渲染,代码段又不能正常显示,淦
#所以以后写带公式的博文,公式格式参考MathJax附带的样例,防止自己写的经过markdown渲染后抽风
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
TocExtension(slugify=slugify),
])
post.body = md.convert(post.body)
post.toc = md.toc
return post
def get_context_data(self, **kwargs):
# 覆写 get_context_data 的目的是因为除了将 post 传递给模板外(DetailView 已经帮我们完成),
# 还要把评论表单、post 下的评论列表传递给模板。
context = super(PostDetailView, self).get_context_data(**kwargs)
form = CommentForm()
comment_list = self.object.comment_set.all()
context.update({
'form': form,
'comment_list': comment_list
})
return context
#查看归档
"""
def archives(request, year, month):
post_list = Post.objects.filter(created_time__year=year,
created_time__month=month
).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
class ArchivesView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
def get_queryset(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
return super(ArchivesView, self).get_queryset().filter(created_time__year=year,
created_time__month=month
)
#查看分类文章
"""
def category(request, pk):
cate = get_object_or_404(Category, pk=pk)
post_list = Post.objects.filter(category=cate).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
class CategoryView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
def get_queryset(self):
cate = get_object_or_404(Category, pk=self.kwargs.get('pk'))
return super(CategoryView, self).get_queryset().filter(category=cate)
#查看标签文章
class TagView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
def get_queryset(self):
tag = get_object_or_404(Tag, pk=self.kwargs.get('pk'))
return super(TagView, self).get_queryset().filter(tags=tag)
#文章搜索
def search(request):
q = request.GET.get('q')
error_msg = ''
if not q:
error_msg = "请输入关键词"
return render(request, 'blog/index.html', {'error_msg': error_msg})
post_list = Post.objects.filter(Q(title__icontains=q) | Q(body__icontains=q))
return render(request, 'blog/index.html', {'error_msg': error_msg,
'post_list': post_list})
#查看书评
class BookView(ListView):
model = Book
template_name = 'blog/book.html'
context_object_name = 'book_list'
paginate_by = 20
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
paginator = context.get('paginator')
page = context.get('page_obj')
is_paginated = context.get('is_paginated')
pagination_data = self.pagination_data(paginator, page, is_paginated)
context.update(pagination_data)
return context
def pagination_data(self, paginator, page, is_paginated):
if not is_paginated:
return {}
left = []
right = []
left_has_more = False
right_has_more = False
first = False
last = False
page_number = page.number
total_pages = paginator.num_pages
page_range = paginator.page_range
if page_number == 1:
right = page_range[page_number:page_number + 2]
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
elif page_number == total_pages:
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
else:
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
right = page_range[page_number:page_number + 2]
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
data = {
'left': left,
'right': right,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'first': first,
'last': last,
}
return data
class BookDetailView(DetailView):
model = Book
template_name = 'blog/bookdetail.html'
context_object_name = 'book'
def get_object(self, queryset=None):
# 覆写 get_object 方法的目的是因为需要对 book 的 review 值进行渲染
book = super(BookDetailView, self).get_object(queryset=None)
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
#'markdown.extensions.toc',
#TocExtension(slugify=slugify),
])
book.review = md.convert(book.review)
#book.toc = md.toc
return book
def get_context_data(self, **kwargs):
context = super(BookDetailView, self).get_context_data(**kwargs)
form = BookCommentForm()
comment_list = self.object.bookcomment_set.all()
context.update({
'form': form,
'comment_list': comment_list
})
return context
#书评归档
class BookArchivesView(ListView):
model = Book
template_name = 'blog/book.html'
context_object_name = 'book_list'
def get_queryset(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
return super(BookArchivesView, self).get_queryset().filter(created_time__year=year,
created_time__month=month
)
###影评相关
class FilmView(ListView):
model = Movie
template_name = 'blog/film.html'
context_object_name = 'film_list'
paginate_by = 36
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
paginator = context.get('paginator')
page = context.get('page_obj')
is_paginated = context.get('is_paginated')
pagination_data = self.pagination_data(paginator, page, is_paginated)
context.update(pagination_data)
return context
def pagination_data(self, paginator, page, is_paginated):
if not is_paginated:
return {}
left = []
right = []
left_has_more = False
right_has_more = False
first = False
last = False
page_number = page.number
total_pages = paginator.num_pages
page_range = paginator.page_range
if page_number == 1:
right = page_range[page_number:page_number + 2]
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
elif page_number == total_pages:
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
else:
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
right = page_range[page_number:page_number + 2]
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
data = {
'left': left,
'right': right,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'first': first,
'last': last,
}
return data
class FilmDetailView(DetailView):
model = Movie
template_name = 'blog/filmdetail.html'
context_object_name = 'film'
def get_object(self, queryset=None):
film = super(FilmDetailView, self).get_object(queryset=None)
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
#'markdown.extensions.toc',
#TocExtension(slugify=slugify),
])
film.review = md.convert(film.review)
#film.toc = md.toc
return film
def get_context_data(self, **kwargs):
context = super(FilmDetailView, self).get_context_data(**kwargs)
form = MovieCommentForm()
comment_list = self.object.moviecomment_set.all()
context.update({
'form': form,
'comment_list': comment_list
})
return context
#影评归档
class FilmArchivesView(ListView):
model = Movie
template_name = 'blog/film.html'
context_object_name = 'film_list'
def get_queryset(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
return super(FilmArchivesView, self).get_queryset().filter(created_time__year=year,
created_time__month=month
)
def about(request):
return render(request, 'blog/about.html') | import markdown
from comments.forms import CommentForm,BookCommentForm,MovieCommentForm
from django.shortcuts import render, get_object_or_404
from.models import Post,Category,Tag, Book,Movie
#from django.http import HttpResponse
from django.views.generic import ListView, DetailView
from django.utils.text import slugify
from markdown.extensions.toc import TocExtension
from django.db.models import Q
"""
def index(request):
#post_list = Post.objects.all().order_by('-created_time')
post_list = Post.objects.all()
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
class IndexView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
paginate_by = 10
def get_context_data(self, **kwargs):
"""
在视图函数中将模板变量传递给模板是通过给 render 函数的 context 参数传递一个字典实现的,
例如 render(request, 'blog/index.html', context={'post_list': post_list}),
这里传递了一个 {'post_list': post_list} 字典给模板。
在类视图中,这个需要传递的模板变量字典是通过 get_context_data 获得的,
所以我们复写该方法,以便我们能够自己再插入一些我们自定义的模板变量进去。
"""
# 首先获得父类生成的传递给模板的字典。
context = super().get_context_data(**kwargs)
# 父类生成的字典中已有 paginator、page_obj、is_paginated 这三个模板变量,
# paginator 是 Paginator 的一个实例,
# page_obj 是 Page 的一个实例,
# is_paginated 是一个布尔变量,用于指示是否已分页。
# 例如如果规定每页 10 个数据,而本身只有 5 个数据,其实就用不着分页,此时 is_paginated=False。
# 关于什么是 Paginator,Page 类在 Django Pagination 简单分页:http://zmrenwu.com/post/34/ 中已有详细说明。
# 由于 context 是一个字典,所以调用 get 方法从中取出某个键对应的值。
paginator = context.get('paginator')
page = context.get('page_obj')
is_paginated = context.get('is_paginated')
# 调用自己写的 pagination_data 方法获得显示分页导航条需要的数据,见下方。
pagination_data = self.pagination_data(paginator, page, is_paginated)
# 将分页导航条的模板变量更新到 context 中,注意 pagination_data 方法返回的也是一个字典。
context.update(pagination_data)
# 将更新后的 context 返回,以便 ListView 使用这个字典中的模板变量去渲染模板。
# 注意此时 context 字典中已有了显示分页导航条所需的数据。
return context
def pagination_data(self, paginator, page, is_paginated):
if not is_paginated:
# 如果没有分页,则无需显示分页导航条,不用任何分页导航条的数据,因此返回一个空的字典
return {}
# 当前页左边连续的页码号,初始值为空
left = []
# 当前页右边连续的页码号,初始值为空
right = []
# 标示第 1 页页码后是否需要显示省略号
left_has_more = False
# 标示最后一页页码前是否需要显示省略号
right_has_more = False
# 标示是否需要显示第 1 页的页码号。
# 因为如果当前页左边的连续页码号中已经含有第 1 页的页码号,此时就无需再显示第 1 页的页码号,
# 其它情况下第一页的页码是始终需要显示的。
# 初始值为 False
first = False
# 标示是否需要显示最后一页的页码号。
# 需要此指示变量的理由和上面相同。
last = False
# 获得用户当前请求的页码号
page_number = page.number
# 获得分页后的总页数
total_pages = paginator.num_pages
# 获得整个分页页码列表,比如分了四页,那么就是 [1, 2, 3, 4]
page_range = paginator.page_range
if page_number == 1:
# 如果用户请求的是第一页的数据,那么当前页左边的不需要数据,因此 left=[](已默认为空)。
# 此时只要获取当前页右边的连续页码号,
# 比如分页页码列表是 [1, 2, 3, 4],那么获取的就是 right = [2, 3]。
# 注意这里只获取了当前页码后连续两个页码,你可以更改这个数字以获取更多页码。
right = page_range[page_number:page_number + 2]
# 如果最右边的页码号比最后一页的页码号减去 1 还要小,
# 说明最右边的页码号和最后一页的页码号之间还有其它页码,因此需要显示省略号,通过 right_has_more 来指示。
if right[-1] < total_pages - 1:
right_has_more = True
# 如果最右边的页码号比最后一页的页码号小,说明当前页右边的连续页码号中不包含最后一页的页码
# 所以需要显示最后一页的页码号,通过 last 来指示
if right[-1] < total_pages:
last = True
elif page_number == total_pages:
# 如果用户请求的是最后一页的数据,那么当前页右边就不需要数据,因此 right=[](已默认为空),
# 此时只要获取当前页左边的连续页码号。
# 比如分页页码列表是 [1, 2, 3, 4],那么获取的就是 left = [2, 3]
# 这里只获取了当前页码后连续两个页码,你可以更改这个数字以获取更多页码。
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
# 如果最左边的页码号比第 2 页页码号还大,
# 说明最左边的页码号和第 1 页的页码号之间还有其它页码,因此需要显示省略号,通过 left_has_more 来指示。
if left[0] > 2:
left_has_more = True
# 如果最左边的页码号比第 1 页的页码号大,说明当前页左边的连续页码号中不包含第一页的页码,
# 所以需要显示第一页的页码号,通过 first 来指示
if left[0] > 1:
first = True
else:
# 用户请求的既不是最后一页,也不是第 1 页,则需要获取当前页左右两边的连续页码号,
# 这里只获取了当前页码前后连续两个页码,你可以更改这个数字以获取更多页码。
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
right = page_range[page_number:page_number + 2]
# 是否需要显示最后一页和最后一页前的省略号
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
# 是否需要显示第 1 页和第 1 页后的省略号
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
data = {
'left': left,
'right': right,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'first': first,
'last': last,
}
return data
#显示全文
"""
def detail(request, pk):
post = get_object_or_404(Post, pk=pk)
# 阅读量 +1
post.increase_views()
post.body = markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
'markdown.extensions.tables',
])
form = CommentForm()
# 获取这篇 post 下的全部评论
comment_list = post.comment_set.all()
# 将文章、表单、以及文章下的评论列表作为模板变量传给 detail.html 模板,以便渲染相应数据。
context = {'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
"""
class PostDetailView(DetailView):
model = Post
template_name = 'blog/detail.html'
context_object_name = 'post'
def get(self, request, *args, **kwargs):
# 覆写 get 方法的目的是因为每当文章被访问一次,就得将文章阅读量 +1
# get 方法返回的是一个 HttpResponse 实例
# 之所以需要先调用父类的 get 方法,是因为只有当 get 方法被调用后,
# 才有 self.object 属性,其值为 Post 模型实例,即被访问的文章 post
response = super(PostDetailView, self).get(request, *args, **kwargs)
# 将文章阅读量 +1
# 注意 self.object 的值就是被访问的文章 post
self.object.increase_views()
# 视图必须返回一个 HttpResponse 对象
return response
def get_object(self, queryset=None):
# 覆写 get_object 方法的目的是因为需要对 post 的 body 值进行渲染
post = super(PostDetailView, self).get_object(queryset=None)
#此处先将markdown禁掉,因为显然经过markdown渲染的文本,再经过MathJax渲染就不能看了
#但是不经markdown渲染,代码段又不能正常显示,淦
#所以以后写带公式的博文,公式格式参考MathJax附带的样例,防止自己写的经过markdown渲染后抽风
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
TocExtension(slugify=slugify),
])
post.body = md.convert(post.body)
post.toc = md.toc
return post
def get_context_data(self, **kwargs):
# 覆写 get_context_data 的目的是因为除了将 post 传递给模板外(DetailView 已经帮我们完成),
# 还要把评论表单、post 下的评论列表传递给模板。
context = super(PostDetailView, self).get_context_data(**kwargs)
form = CommentForm()
comment_list = self.object.comment_set.all()
context.update({
'form': form,
'comment_list': comment_list
})
return context
#查看归档
"""
def archives(request, year, month):
post_list = Post.objects.filter(created_time__year=year,
created_time__month=month
).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
class ArchivesView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
def get_queryset(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
return super(ArchivesView, self).get_queryset().filter(created_time__year=year,
created_time__month=month
)
#查看分类文章
"""
def category(request, pk):
cate = get_object_or_404(Category, pk=pk)
post_list = Post.objects.filter(category=cate).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
class CategoryView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
def get_queryset(self):
cate = get_object_or_404(Category, pk=self.kwargs.get('pk'))
return super(CategoryView, self).get_queryset().filter(category=cate)
#查看标签文章
class TagView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
def get_queryset(self):
tag = get_object_or_404(Tag, pk=self.kwargs.get('pk'))
return super(TagView, self).get_queryset().filter(tags=tag)
#文章搜索
def search(request):
q = request.GET.get('q')
error_msg = ''
if not q:
error_msg = "请输入关键词"
return render(request, 'blog/index.html', {'error_msg': error_msg})
post_list = Post.objects.filter(Q(title__icontains=q) | Q(body__icontains=q))
return render(request, 'blog/index.html', {'error_msg': error_msg,
'post_list': post_list})
#查看书评
class BookView(ListView):
model = Book
template_name = 'blog/book.html'
context_object_name = 'book_list'
paginate_by = 20
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
paginator = context.get('paginator')
page = context.get('page_obj')
is_paginated = context.get('is_paginated')
pagination_data = self.pagination_data(paginator, page, is_paginated)
context.update(pagination_data)
return context
def pagination_data(self, paginator, page, is_paginated):
if not is_paginated:
return {}
left = []
right = []
left_has_more = False
right_has_more = False
first = False
last = False
page_number = page.number
total_pages = paginator.num_pages
page_range = paginator.page_range
if page_number == 1:
right = page_range[page_number:page_number + 2]
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
elif page_number == total_pages:
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
else:
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
right = page_range[page_number:page_number + 2]
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
data = {
'left': left,
'right': right,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'first': first,
'last': last,
}
return data
class BookDetailView(DetailView):
model = Book
template_name = 'blog/bookdetail.html'
context_object_name = 'book'
def get_object(self, queryset=None):
# 覆写 get_object 方法的目的是因为需要对 book 的 review 值进行渲染
book = super(BookDetailView, self).get_object(queryset=None)
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
#'markdown.extensions.toc',
#TocExtension(slugify=slugify),
])
book.review = md.convert(book.review)
#book.toc = md.toc
return book
def get_context_data(self, **kwargs):
context = super(BookDetailView, self).get_context_data(**kwargs)
form = BookCommentForm()
comment_list = self.object.bookcomment_set.all()
context.update({
'form': form,
'comment_list': comment_list
})
return context
#书评归档
class BookArchivesView(ListView):
model = Book
template_name = 'blog/book.html'
context_object_name = 'book_list'
def get_queryset(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
return super(BookArchivesView, self).get_queryset().filter(created_time__year=year,
created_time__month=month
)
###影评相关
class FilmView(ListView):
model = Movie
template_name = 'blog/film.html'
context_object_name = 'film_list'
paginate_by = 36
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
paginator = context.get('paginator')
page = context.get('page_obj')
is_paginated = context.get('is_paginated')
pagination_data = self.pagination_data(paginator, page, is_paginated)
context.update(pagination_data)
return context
def pagination_data(self, paginator, page, is_paginated):
if not is_paginated:
return {}
left = []
right = []
left_has_more = False
right_has_more = False
first = False
last = False
page_number = page.number
total_pages = paginator.num_pages
page_range = paginator.page_range
if page_number == 1:
right = page_range[page_number:page_number + 2]
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
elif page_number == total_pages:
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
else:
left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]
right = page_range[page_number:page_number + 2]
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
data = {
'left': left,
'right': right,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'first': first,
'last': last,
}
return data
class FilmDetailView(DetailView):
model = Movie
template_name = 'blog/filmdetail.html'
context_object_name = 'film'
def get_object(self, queryset=None):
film = super(FilmDetailView, self).get_object(queryset=None)
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
#'markdown.extensions.toc',
#TocExtension(slugify=slugify),
])
film.review = md.convert(film.review)
#film.toc = md.toc
return film
def get_context_data(self, **kwargs):
context = super(FilmDetailView, self).get_context_data(**kwargs)
form = MovieCommentForm()
comment_list = self.object.moviecomment_set.all()
context.update({
'form': form,
'comment_list': comment_list
})
return context
#影评归档
class FilmArchivesView(ListView):
model = Movie
template_name = 'blog/film.html'
context_object_name = 'film_list'
def get_queryset(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
return super(FilmArchivesView, self).get_queryset().filter(created_time__year=year,
created_time__month=month
)
def about(request):
return render(request, 'blog/about.html') | zh | 0.896648 | #from django.http import HttpResponse def index(request): #post_list = Post.objects.all().order_by('-created_time') post_list = Post.objects.all() return render(request, 'blog/index.html', context={'post_list': post_list}) 在视图函数中将模板变量传递给模板是通过给 render 函数的 context 参数传递一个字典实现的, 例如 render(request, 'blog/index.html', context={'post_list': post_list}), 这里传递了一个 {'post_list': post_list} 字典给模板。 在类视图中,这个需要传递的模板变量字典是通过 get_context_data 获得的, 所以我们复写该方法,以便我们能够自己再插入一些我们自定义的模板变量进去。 # 首先获得父类生成的传递给模板的字典。 # 父类生成的字典中已有 paginator、page_obj、is_paginated 这三个模板变量, # paginator 是 Paginator 的一个实例, # page_obj 是 Page 的一个实例, # is_paginated 是一个布尔变量,用于指示是否已分页。 # 例如如果规定每页 10 个数据,而本身只有 5 个数据,其实就用不着分页,此时 is_paginated=False。 # 关于什么是 Paginator,Page 类在 Django Pagination 简单分页:http://zmrenwu.com/post/34/ 中已有详细说明。 # 由于 context 是一个字典,所以调用 get 方法从中取出某个键对应的值。 # 调用自己写的 pagination_data 方法获得显示分页导航条需要的数据,见下方。 # 将分页导航条的模板变量更新到 context 中,注意 pagination_data 方法返回的也是一个字典。 # 将更新后的 context 返回,以便 ListView 使用这个字典中的模板变量去渲染模板。 # 注意此时 context 字典中已有了显示分页导航条所需的数据。 # 如果没有分页,则无需显示分页导航条,不用任何分页导航条的数据,因此返回一个空的字典 # 当前页左边连续的页码号,初始值为空 # 当前页右边连续的页码号,初始值为空 # 标示第 1 页页码后是否需要显示省略号 # 标示最后一页页码前是否需要显示省略号 # 标示是否需要显示第 1 页的页码号。 # 因为如果当前页左边的连续页码号中已经含有第 1 页的页码号,此时就无需再显示第 1 页的页码号, # 其它情况下第一页的页码是始终需要显示的。 # 初始值为 False # 标示是否需要显示最后一页的页码号。 # 需要此指示变量的理由和上面相同。 # 获得用户当前请求的页码号 # 获得分页后的总页数 # 获得整个分页页码列表,比如分了四页,那么就是 [1, 2, 3, 4] # 如果用户请求的是第一页的数据,那么当前页左边的不需要数据,因此 left=[](已默认为空)。 # 此时只要获取当前页右边的连续页码号, # 比如分页页码列表是 [1, 2, 3, 4],那么获取的就是 right = [2, 3]。 # 注意这里只获取了当前页码后连续两个页码,你可以更改这个数字以获取更多页码。 # 如果最右边的页码号比最后一页的页码号减去 1 还要小, # 说明最右边的页码号和最后一页的页码号之间还有其它页码,因此需要显示省略号,通过 right_has_more 来指示。 # 如果最右边的页码号比最后一页的页码号小,说明当前页右边的连续页码号中不包含最后一页的页码 # 所以需要显示最后一页的页码号,通过 last 来指示 # 如果用户请求的是最后一页的数据,那么当前页右边就不需要数据,因此 right=[](已默认为空), # 此时只要获取当前页左边的连续页码号。 # 比如分页页码列表是 [1, 2, 3, 4],那么获取的就是 left = [2, 3] # 这里只获取了当前页码后连续两个页码,你可以更改这个数字以获取更多页码。 # 如果最左边的页码号比第 2 页页码号还大, # 说明最左边的页码号和第 1 页的页码号之间还有其它页码,因此需要显示省略号,通过 left_has_more 来指示。 # 如果最左边的页码号比第 1 页的页码号大,说明当前页左边的连续页码号中不包含第一页的页码, # 所以需要显示第一页的页码号,通过 first 来指示 # 用户请求的既不是最后一页,也不是第 1 页,则需要获取当前页左右两边的连续页码号, # 这里只获取了当前页码前后连续两个页码,你可以更改这个数字以获取更多页码。 # 是否需要显示最后一页和最后一页前的省略号 # 是否需要显示第 1 页和第 1 页后的省略号 #显示全文 def detail(request, pk): post = get_object_or_404(Post, pk=pk) # 阅读量 +1 post.increase_views() post.body = markdown.markdown(post.body, extensions=[ 'markdown.extensions.extra', 'markdown.extensions.codehilite', 'markdown.extensions.toc', 'markdown.extensions.tables', ]) form = CommentForm() # 获取这篇 post 下的全部评论 comment_list = post.comment_set.all() # 将文章、表单、以及文章下的评论列表作为模板变量传给 detail.html 模板,以便渲染相应数据。 context = {'post': post, 'form': form, 'comment_list': comment_list } return render(request, 'blog/detail.html', context=context) # 覆写 get 方法的目的是因为每当文章被访问一次,就得将文章阅读量 +1 # get 方法返回的是一个 HttpResponse 实例 # 之所以需要先调用父类的 get 方法,是因为只有当 get 方法被调用后, # 才有 self.object 属性,其值为 Post 模型实例,即被访问的文章 post # 将文章阅读量 +1 # 注意 self.object 的值就是被访问的文章 post # 视图必须返回一个 HttpResponse 对象 # 覆写 get_object 方法的目的是因为需要对 post 的 body 值进行渲染 #此处先将markdown禁掉,因为显然经过markdown渲染的文本,再经过MathJax渲染就不能看了 #但是不经markdown渲染,代码段又不能正常显示,淦 #所以以后写带公式的博文,公式格式参考MathJax附带的样例,防止自己写的经过markdown渲染后抽风 # 覆写 get_context_data 的目的是因为除了将 post 传递给模板外(DetailView 已经帮我们完成), # 还要把评论表单、post 下的评论列表传递给模板。 #查看归档 def archives(request, year, month): post_list = Post.objects.filter(created_time__year=year, created_time__month=month ).order_by('-created_time') return render(request, 'blog/index.html', context={'post_list': post_list}) #查看分类文章 def category(request, pk): cate = get_object_or_404(Category, pk=pk) post_list = Post.objects.filter(category=cate).order_by('-created_time') return render(request, 'blog/index.html', context={'post_list': post_list}) #查看标签文章 #文章搜索 #查看书评 # 覆写 get_object 方法的目的是因为需要对 book 的 review 值进行渲染 #'markdown.extensions.toc', #TocExtension(slugify=slugify), #book.toc = md.toc #书评归档 ###影评相关 #'markdown.extensions.toc', #TocExtension(slugify=slugify), #film.toc = md.toc #影评归档 | 2.407009 | 2 |
src/command_modules/azure-cli-security/azure/cli/command_modules/security/_params.py | jfcoz/azure-cli | 1 | 8746 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands.parameters import resource_group_name_type
from knack.arguments import CLIArgumentType
from ._validators import (validate_alert_status,
validate_auto_provisioning_toggle,
validate_pricing_tier)
name_arg_type = CLIArgumentType(options_list=('--name', '-n'), metavar='NAME', help='name of the resource to be fetched')
home_region_arg_type = CLIArgumentType(options_list=('--home-region', '-hr'), metavar='HOMEREGION', help='home region that was selected for the subscription')
location_arg_type = CLIArgumentType(options_list=('--location', '-l'), metavar='LOCATION', help='location of the resource')
# Alerts
alert_status_arg_type = CLIArgumentType(options_list=('--status'), metavar='STATUS', help='target status of the alert. possible values are "dismiss" and "activate"')
# Auto Provisioning
auto_provisioning_auto_provision_arg_type = CLIArgumentType(options_list=('--auto-provision'), metavar='AUTOPROVISION', help='Automatic provisioning toggle. possible values are "on" or "off"')
# Contacts
contact_email_arg_type = CLIArgumentType(options_list=('--email'), metavar='EMAIL', help='E-mail of the security contact')
contact_phone_arg_type = CLIArgumentType(options_list=('--phone'), metavar='PHONE', help='Phone of the security contact')
contact_alert_notifications_arg_type = CLIArgumentType(options_list=('--alert-notifications'), metavar='ALERTNOTIFICATIONS', help='Whether to send mail notifications to the security contacts')
contact_alerts_admins_arg_type = CLIArgumentType(options_list=('--alerts-admins'), metavar='ALERTADMINS', help='Whether to send mail notifications to the subscription administrators')
# Pricing
pricing_tier_arg_type = CLIArgumentType(options_list=('--tier'), metavar='TIER', help='pricing tier type')
# Workspace settings
workspace_setting_target_workspace_arg_type = CLIArgumentType(options_list=('--target-workspace'), metavar='TARGETWORKSPACE', help='An ID of the workspace resource that will hold the security data')
def load_arguments(self, _):
for scope in ['alert',
'task',
'setting',
'contact',
'auto-provisioning-setting',
'discovered-security-solution',
'external-security-solution',
'jit-policy',
'location',
'pricing',
'topology',
'workspace-setting']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'resource_group_name',
options_list=['--resource-group', '-g'],
arg_type=resource_group_name_type)
c.argument(
'resource_name',
arg_type=name_arg_type)
c.argument(
'location',
arg_type=location_arg_type)
for scope in ['alert update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'status',
validator=validate_alert_status,
arg_type=alert_status_arg_type)
for scope in ['auto-provisioning-setting update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'auto_provision',
validator=validate_auto_provisioning_toggle,
arg_type=auto_provisioning_auto_provision_arg_type)
for scope in ['contact create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'email',
arg_type=contact_email_arg_type)
c.argument(
'phone',
arg_type=contact_phone_arg_type)
c.argument(
'alert_notifications',
arg_type=contact_alert_notifications_arg_type)
c.argument(
'alerts_admins',
arg_type=contact_alerts_admins_arg_type)
for scope in ['pricing create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'tier',
validator=validate_pricing_tier,
arg_type=pricing_tier_arg_type)
for scope in ['workspace-setting create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'target_workspace',
arg_type=workspace_setting_target_workspace_arg_type)
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands.parameters import resource_group_name_type
from knack.arguments import CLIArgumentType
from ._validators import (validate_alert_status,
validate_auto_provisioning_toggle,
validate_pricing_tier)
name_arg_type = CLIArgumentType(options_list=('--name', '-n'), metavar='NAME', help='name of the resource to be fetched')
home_region_arg_type = CLIArgumentType(options_list=('--home-region', '-hr'), metavar='HOMEREGION', help='home region that was selected for the subscription')
location_arg_type = CLIArgumentType(options_list=('--location', '-l'), metavar='LOCATION', help='location of the resource')
# Alerts
alert_status_arg_type = CLIArgumentType(options_list=('--status'), metavar='STATUS', help='target status of the alert. possible values are "dismiss" and "activate"')
# Auto Provisioning
auto_provisioning_auto_provision_arg_type = CLIArgumentType(options_list=('--auto-provision'), metavar='AUTOPROVISION', help='Automatic provisioning toggle. possible values are "on" or "off"')
# Contacts
contact_email_arg_type = CLIArgumentType(options_list=('--email'), metavar='EMAIL', help='E-mail of the security contact')
contact_phone_arg_type = CLIArgumentType(options_list=('--phone'), metavar='PHONE', help='Phone of the security contact')
contact_alert_notifications_arg_type = CLIArgumentType(options_list=('--alert-notifications'), metavar='ALERTNOTIFICATIONS', help='Whether to send mail notifications to the security contacts')
contact_alerts_admins_arg_type = CLIArgumentType(options_list=('--alerts-admins'), metavar='ALERTADMINS', help='Whether to send mail notifications to the subscription administrators')
# Pricing
pricing_tier_arg_type = CLIArgumentType(options_list=('--tier'), metavar='TIER', help='pricing tier type')
# Workspace settings
workspace_setting_target_workspace_arg_type = CLIArgumentType(options_list=('--target-workspace'), metavar='TARGETWORKSPACE', help='An ID of the workspace resource that will hold the security data')
def load_arguments(self, _):
for scope in ['alert',
'task',
'setting',
'contact',
'auto-provisioning-setting',
'discovered-security-solution',
'external-security-solution',
'jit-policy',
'location',
'pricing',
'topology',
'workspace-setting']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'resource_group_name',
options_list=['--resource-group', '-g'],
arg_type=resource_group_name_type)
c.argument(
'resource_name',
arg_type=name_arg_type)
c.argument(
'location',
arg_type=location_arg_type)
for scope in ['alert update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'status',
validator=validate_alert_status,
arg_type=alert_status_arg_type)
for scope in ['auto-provisioning-setting update']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'auto_provision',
validator=validate_auto_provisioning_toggle,
arg_type=auto_provisioning_auto_provision_arg_type)
for scope in ['contact create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'email',
arg_type=contact_email_arg_type)
c.argument(
'phone',
arg_type=contact_phone_arg_type)
c.argument(
'alert_notifications',
arg_type=contact_alert_notifications_arg_type)
c.argument(
'alerts_admins',
arg_type=contact_alerts_admins_arg_type)
for scope in ['pricing create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'tier',
validator=validate_pricing_tier,
arg_type=pricing_tier_arg_type)
for scope in ['workspace-setting create']:
with self.argument_context('security {}'.format(scope)) as c:
c.argument(
'target_workspace',
arg_type=workspace_setting_target_workspace_arg_type)
| en | 0.466718 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long # Alerts # Auto Provisioning # Contacts # Pricing # Workspace settings | 1.776854 | 2 |
utils/path_utils.py | kuyu12/pygame_fight_game | 1 | 8747 | import sys
IMAGES_PATH = sys.path[1] + "/Images"
BACKGROUND_IMAGES_PATH = IMAGES_PATH + '/background'
USER_INFO_BACKGROUND_PATH = BACKGROUND_IMAGES_PATH+"/blue_background.jpg"
SPRINT_IMAGE_PATH = IMAGES_PATH + '/sprite'
PROFILE_IMAGES_PATH = IMAGES_PATH + '/profile'
CONFIGURATION_FILES_PATH = sys.path[1] + "/configuration_files" | import sys
IMAGES_PATH = sys.path[1] + "/Images"
BACKGROUND_IMAGES_PATH = IMAGES_PATH + '/background'
USER_INFO_BACKGROUND_PATH = BACKGROUND_IMAGES_PATH+"/blue_background.jpg"
SPRINT_IMAGE_PATH = IMAGES_PATH + '/sprite'
PROFILE_IMAGES_PATH = IMAGES_PATH + '/profile'
CONFIGURATION_FILES_PATH = sys.path[1] + "/configuration_files" | none | 1 | 1.717004 | 2 |
|
tests/models/test_transformers.py | Alicegaz/torchok | 8 | 8748 | import unittest
import torch
from parameterized import parameterized
from src.constructor import create_backbone
from src.models.backbones.utils import list_models
from .test_segmentation import example_backbones
def inp(bsize, in_ch, w, h):
return torch.ones(bsize, in_ch, w, h)
class TestBackboneCorrectness(unittest.TestCase):
def setUp(self) -> None:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
@parameterized.expand(list_models(module='vision_transformer', exclude_filters=''))
def test_vit_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name, img_size=self.input.shape[2]).to(self.device).eval()
with torch.no_grad():
torch.jit.trace(model, self.input)
torch.cuda.empty_cache()
@parameterized.expand(list_models(module='coat', exclude_filters=''))
def test_coat_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name, img_size=self.input.shape[2]).to(self.device).eval()
with torch.no_grad():
torch.jit.trace(model, self.input)
torch.cuda.empty_cache()
@parameterized.expand(list_models(module='swin_transformer', exclude_filters=''))
def test_swin_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name).to(self.device).eval()
input = torch.rand(2, 3, *model.img_size, device=self.device)
with torch.no_grad():
torch.jit.trace(model, input)
torch.cuda.empty_cache()
| import unittest
import torch
from parameterized import parameterized
from src.constructor import create_backbone
from src.models.backbones.utils import list_models
from .test_segmentation import example_backbones
def inp(bsize, in_ch, w, h):
return torch.ones(bsize, in_ch, w, h)
class TestBackboneCorrectness(unittest.TestCase):
def setUp(self) -> None:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
@parameterized.expand(list_models(module='vision_transformer', exclude_filters=''))
def test_vit_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name, img_size=self.input.shape[2]).to(self.device).eval()
with torch.no_grad():
torch.jit.trace(model, self.input)
torch.cuda.empty_cache()
@parameterized.expand(list_models(module='coat', exclude_filters=''))
def test_coat_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name, img_size=self.input.shape[2]).to(self.device).eval()
with torch.no_grad():
torch.jit.trace(model, self.input)
torch.cuda.empty_cache()
@parameterized.expand(list_models(module='swin_transformer', exclude_filters=''))
def test_swin_torchscript_conversion(self, backbone_name):
model = create_backbone(backbone_name).to(self.device).eval()
input = torch.rand(2, 3, *model.img_size, device=self.device)
with torch.no_grad():
torch.jit.trace(model, input)
torch.cuda.empty_cache()
| none | 1 | 2.355238 | 2 |
|
aiogram/types/inline_query.py | SvineruS/aiogram | 1 | 8749 | import typing
from . import base
from . import fields
from .inline_query_result import InlineQueryResult
from .location import Location
from .user import User
class InlineQuery(base.TelegramObject):
"""
This object represents an incoming inline query.
When the user sends an empty query, your bot could return some default or trending results.
https://core.telegram.org/bots/api#inlinequery
"""
id: base.String = fields.Field()
from_user: User = fields.Field(alias='from', base=User)
location: Location = fields.Field(base=Location)
query: base.String = fields.Field()
offset: base.String = fields.Field()
async def answer(self,
results: typing.List[InlineQueryResult],
cache_time: typing.Optional[base.Integer] = None,
is_personal: typing.Optional[base.Boolean] = None,
next_offset: typing.Optional[base.String] = None,
switch_pm_text: typing.Optional[base.String] = None,
switch_pm_parameter: typing.Optional[base.String] = None):
"""
Use this method to send answers to an inline query.
No more than 50 results per query are allowed.
Source: https://core.telegram.org/bots/api#answerinlinequery
:param results: A JSON-serialized array of results for the inline query
:type results: :obj:`typing.List[types.InlineQueryResult]`
:param cache_time: The maximum amount of time in seconds that the result of the
inline query may be cached on the server. Defaults to 300.
:type cache_time: :obj:`typing.Optional[base.Integer]`
:param is_personal: Pass True, if results may be cached on the server side only
for the user that sent the query. By default, results may be returned to any user who sends the same query
:type is_personal: :obj:`typing.Optional[base.Boolean]`
:param next_offset: Pass the offset that a client should send in the
next query with the same text to receive more results.
Pass an empty string if there are no more results or if you don‘t support pagination.
Offset length can’t exceed 64 bytes.
:type next_offset: :obj:`typing.Optional[base.String]`
:param switch_pm_text: If passed, clients will display a button with specified text that
switches the user to a private chat with the bot and sends the bot a start message
with the parameter switch_pm_parameter
:type switch_pm_text: :obj:`typing.Optional[base.String]`
:param switch_pm_parameter: Deep-linking parameter for the /start message sent to the bot when
user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed.
:type switch_pm_parameter: :obj:`typing.Optional[base.String]`
:return: On success, True is returned
:rtype: :obj:`base.Boolean`
"""
return await self.bot.answer_inline_query(self.id,
results=results,
cache_time=cache_time,
is_personal=is_personal,
next_offset=next_offset,
switch_pm_text=switch_pm_text,
switch_pm_parameter=switch_pm_parameter)
| import typing
from . import base
from . import fields
from .inline_query_result import InlineQueryResult
from .location import Location
from .user import User
class InlineQuery(base.TelegramObject):
"""
This object represents an incoming inline query.
When the user sends an empty query, your bot could return some default or trending results.
https://core.telegram.org/bots/api#inlinequery
"""
id: base.String = fields.Field()
from_user: User = fields.Field(alias='from', base=User)
location: Location = fields.Field(base=Location)
query: base.String = fields.Field()
offset: base.String = fields.Field()
async def answer(self,
results: typing.List[InlineQueryResult],
cache_time: typing.Optional[base.Integer] = None,
is_personal: typing.Optional[base.Boolean] = None,
next_offset: typing.Optional[base.String] = None,
switch_pm_text: typing.Optional[base.String] = None,
switch_pm_parameter: typing.Optional[base.String] = None):
"""
Use this method to send answers to an inline query.
No more than 50 results per query are allowed.
Source: https://core.telegram.org/bots/api#answerinlinequery
:param results: A JSON-serialized array of results for the inline query
:type results: :obj:`typing.List[types.InlineQueryResult]`
:param cache_time: The maximum amount of time in seconds that the result of the
inline query may be cached on the server. Defaults to 300.
:type cache_time: :obj:`typing.Optional[base.Integer]`
:param is_personal: Pass True, if results may be cached on the server side only
for the user that sent the query. By default, results may be returned to any user who sends the same query
:type is_personal: :obj:`typing.Optional[base.Boolean]`
:param next_offset: Pass the offset that a client should send in the
next query with the same text to receive more results.
Pass an empty string if there are no more results or if you don‘t support pagination.
Offset length can’t exceed 64 bytes.
:type next_offset: :obj:`typing.Optional[base.String]`
:param switch_pm_text: If passed, clients will display a button with specified text that
switches the user to a private chat with the bot and sends the bot a start message
with the parameter switch_pm_parameter
:type switch_pm_text: :obj:`typing.Optional[base.String]`
:param switch_pm_parameter: Deep-linking parameter for the /start message sent to the bot when
user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed.
:type switch_pm_parameter: :obj:`typing.Optional[base.String]`
:return: On success, True is returned
:rtype: :obj:`base.Boolean`
"""
return await self.bot.answer_inline_query(self.id,
results=results,
cache_time=cache_time,
is_personal=is_personal,
next_offset=next_offset,
switch_pm_text=switch_pm_text,
switch_pm_parameter=switch_pm_parameter)
| en | 0.674216 | This object represents an incoming inline query. When the user sends an empty query, your bot could return some default or trending results. https://core.telegram.org/bots/api#inlinequery Use this method to send answers to an inline query. No more than 50 results per query are allowed. Source: https://core.telegram.org/bots/api#answerinlinequery :param results: A JSON-serialized array of results for the inline query :type results: :obj:`typing.List[types.InlineQueryResult]` :param cache_time: The maximum amount of time in seconds that the result of the inline query may be cached on the server. Defaults to 300. :type cache_time: :obj:`typing.Optional[base.Integer]` :param is_personal: Pass True, if results may be cached on the server side only for the user that sent the query. By default, results may be returned to any user who sends the same query :type is_personal: :obj:`typing.Optional[base.Boolean]` :param next_offset: Pass the offset that a client should send in the next query with the same text to receive more results. Pass an empty string if there are no more results or if you don‘t support pagination. Offset length can’t exceed 64 bytes. :type next_offset: :obj:`typing.Optional[base.String]` :param switch_pm_text: If passed, clients will display a button with specified text that switches the user to a private chat with the bot and sends the bot a start message with the parameter switch_pm_parameter :type switch_pm_text: :obj:`typing.Optional[base.String]` :param switch_pm_parameter: Deep-linking parameter for the /start message sent to the bot when user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed. :type switch_pm_parameter: :obj:`typing.Optional[base.String]` :return: On success, True is returned :rtype: :obj:`base.Boolean` | 2.707727 | 3 |
app/app.py | shaswat01/Disaster_Response_ETL | 0 | 8750 | import nltk
import json
import plotly
import pandas as pd
import plotly.graph_objects as go
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download(['punkt','wordnet'])
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Histogram
import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///data/DisasterResponse.db')
df = pd.read_sql_table('messages', engine)
# load model
model = joblib.load("models/model.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# Viz 1
genre = df.groupby('genre').count()['id'].sort_values()
# Viz 2
df['text length'] = df['message'].apply(lambda x: len(x.split()))
histogram = df[df['text length'] < 100].groupby('text length').count()['id']
# Viz 3
total_category = df.drop(columns=['id','message','original','genre', 'text length']).sum().sort_values(ascending=False).head(5)
# create visuals
graphs = [
{
'data': [
Bar(
x=genre.values,
y=genre.index,
orientation='h'
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Genre"
},
'xaxis': {
'title': "Counts"
}
}
},
{
'data': [
Bar(
x=histogram.index,
y=histogram.values
)
],
'layout': {
'title': 'Distribution of Messages Length',
'yaxis': {
'title': "Total Messages"
},
'xaxis': {
'title': "Total Words"
}
}
},
{
'data': [
Bar(
x=total_category.index,
y=total_category.values
)
],
'layout': {
'title': 'Total Messages per Category (Top 5)',
'yaxis': {
'title': "Total"
},
'xaxis': {
'title': "Category"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run()
#app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| import nltk
import json
import plotly
import pandas as pd
import plotly.graph_objects as go
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download(['punkt','wordnet'])
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar, Histogram
import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///data/DisasterResponse.db')
df = pd.read_sql_table('messages', engine)
# load model
model = joblib.load("models/model.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# Viz 1
genre = df.groupby('genre').count()['id'].sort_values()
# Viz 2
df['text length'] = df['message'].apply(lambda x: len(x.split()))
histogram = df[df['text length'] < 100].groupby('text length').count()['id']
# Viz 3
total_category = df.drop(columns=['id','message','original','genre', 'text length']).sum().sort_values(ascending=False).head(5)
# create visuals
graphs = [
{
'data': [
Bar(
x=genre.values,
y=genre.index,
orientation='h'
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Genre"
},
'xaxis': {
'title': "Counts"
}
}
},
{
'data': [
Bar(
x=histogram.index,
y=histogram.values
)
],
'layout': {
'title': 'Distribution of Messages Length',
'yaxis': {
'title': "Total Messages"
},
'xaxis': {
'title': "Total Words"
}
}
},
{
'data': [
Bar(
x=total_category.index,
y=total_category.values
)
],
'layout': {
'title': 'Total Messages per Category (Top 5)',
'yaxis': {
'title': "Total"
},
'xaxis': {
'title': "Category"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run()
#app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| en | 0.690963 | # load data # load model # index webpage displays cool visuals and receives user input text for model # extract data needed for visuals # Viz 1 # Viz 2 # Viz 3 # create visuals # encode plotly graphs in JSON # render web page with plotly graphs # web page that handles user query and displays model results # save user input in query # use model to predict classification for query # This will render the go.html Please see that file. #app.run(host='0.0.0.0', port=3001, debug=True) | 2.800805 | 3 |
tools/mo/openvino/tools/mo/front/mxnet/mx_reshape_reverse.py | pazamelin/openvino | 1 | 8751 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.mxnet.mx_reshape_to_reshape import MXReshapeToReshape
from openvino.tools.mo.ops.Reverse import Reverse
from openvino.tools.mo.ops.mxreshape import MXReshape
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.reshape import Reshape
from openvino.tools.mo.ops.shape import Shape
from openvino.tools.mo.ops.squeeze import Squeeze
from openvino.tools.mo.ops.unsqueeze import Unsqueeze
class MXReshapeReverse(FrontReplacementOp):
"""
If reshape layer with reverse True, special values will inferred from right to left.
The Replacer simulate the behavior. The replaced subgraph reverse input data and special dims,
and after reshape reverse output result to backward.
Resulting subgraph: reshape(reverse=True) -> reverse - reshape(reverse=False) -reverse subgraph.
"""
op = 'MXReshape'
enabled = True
def run_before(self):
return [MXReshapeToReshape]
def replace_sub_graph(self, graph: Graph, match: dict):
mxreshape = match['op']
if not mxreshape.reverse:
return
shape_node = Shape(graph, dict(name=mxreshape.id + '/Shape')).create_node()
forward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardUnsqueeze'))
forward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/ForwardReverse', axis=1)).create_node()
forward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardSqueeze'))
reshape_node = Reshape(graph, dict(name=mxreshape.id + '/Reshape')).create_node()
shape_node.in_port(0).connect(mxreshape.in_port(0).get_source())
mxreshape.in_port(0).get_connection().set_destination(reshape_node.in_port(0))
forward_reverse_unsqueeze_node.in_port(0).connect(shape_node.out_port(0))
forward_reverse_node.in_port(0).connect(forward_reverse_unsqueeze_node.out_port(0))
forward_reverse_squeeze_node.in_port(0).connect(forward_reverse_node.out_port(0))
reshape_node.in_port(1).connect(forward_reverse_squeeze_node.out_port(0))
reshape_shape_node = create_op_node_with_second_input(graph, Reshape, int64_array(np.flip(mxreshape.dim, 0)),
dict(name=str(mxreshape.id) + '/ReshapeShape'))
if np.sum(np.in1d([-2, -3, -4], mxreshape.dim), axis=0):
reshape_shape_node = MXReshape(graph, dict(name=mxreshape.id + '/Reshape',
dim=int64_array(np.flip(mxreshape.dim, 0)))).create_node()
reshape_shape_node.in_port(0).connect(reshape_node.out_port(0))
backward_shape_node = Shape(graph, dict(name=mxreshape.id + '/BackwardShape')).create_node()
backward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardUnsqueeze'))
backward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/BackwardReverse', axis=1)).create_node()
backward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardSqueeze'))
backward_reshape_node = Reshape(graph, dict(name=mxreshape.id + '/BackwardReshape')).create_node()
backward_shape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reverse_unsqueeze_node.in_port(0).connect(backward_shape_node.out_port(0))
backward_reverse_node.in_port(0).connect(backward_reverse_unsqueeze_node.out_port(0))
backward_reverse_squeeze_node.in_port(0).connect(backward_reverse_node.out_port(0))
backward_reshape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reshape_node.in_port(1).connect(backward_reverse_squeeze_node.out_port(0))
mxreshape.out_port(0).get_connection().set_source(backward_reshape_node.out_port(0))
| # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.mxnet.mx_reshape_to_reshape import MXReshapeToReshape
from openvino.tools.mo.ops.Reverse import Reverse
from openvino.tools.mo.ops.mxreshape import MXReshape
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.reshape import Reshape
from openvino.tools.mo.ops.shape import Shape
from openvino.tools.mo.ops.squeeze import Squeeze
from openvino.tools.mo.ops.unsqueeze import Unsqueeze
class MXReshapeReverse(FrontReplacementOp):
"""
If reshape layer with reverse True, special values will inferred from right to left.
The Replacer simulate the behavior. The replaced subgraph reverse input data and special dims,
and after reshape reverse output result to backward.
Resulting subgraph: reshape(reverse=True) -> reverse - reshape(reverse=False) -reverse subgraph.
"""
op = 'MXReshape'
enabled = True
def run_before(self):
return [MXReshapeToReshape]
def replace_sub_graph(self, graph: Graph, match: dict):
mxreshape = match['op']
if not mxreshape.reverse:
return
shape_node = Shape(graph, dict(name=mxreshape.id + '/Shape')).create_node()
forward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardUnsqueeze'))
forward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/ForwardReverse', axis=1)).create_node()
forward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardSqueeze'))
reshape_node = Reshape(graph, dict(name=mxreshape.id + '/Reshape')).create_node()
shape_node.in_port(0).connect(mxreshape.in_port(0).get_source())
mxreshape.in_port(0).get_connection().set_destination(reshape_node.in_port(0))
forward_reverse_unsqueeze_node.in_port(0).connect(shape_node.out_port(0))
forward_reverse_node.in_port(0).connect(forward_reverse_unsqueeze_node.out_port(0))
forward_reverse_squeeze_node.in_port(0).connect(forward_reverse_node.out_port(0))
reshape_node.in_port(1).connect(forward_reverse_squeeze_node.out_port(0))
reshape_shape_node = create_op_node_with_second_input(graph, Reshape, int64_array(np.flip(mxreshape.dim, 0)),
dict(name=str(mxreshape.id) + '/ReshapeShape'))
if np.sum(np.in1d([-2, -3, -4], mxreshape.dim), axis=0):
reshape_shape_node = MXReshape(graph, dict(name=mxreshape.id + '/Reshape',
dim=int64_array(np.flip(mxreshape.dim, 0)))).create_node()
reshape_shape_node.in_port(0).connect(reshape_node.out_port(0))
backward_shape_node = Shape(graph, dict(name=mxreshape.id + '/BackwardShape')).create_node()
backward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardUnsqueeze'))
backward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/BackwardReverse', axis=1)).create_node()
backward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardSqueeze'))
backward_reshape_node = Reshape(graph, dict(name=mxreshape.id + '/BackwardReshape')).create_node()
backward_shape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reverse_unsqueeze_node.in_port(0).connect(backward_shape_node.out_port(0))
backward_reverse_node.in_port(0).connect(backward_reverse_unsqueeze_node.out_port(0))
backward_reverse_squeeze_node.in_port(0).connect(backward_reverse_node.out_port(0))
backward_reshape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reshape_node.in_port(1).connect(backward_reverse_squeeze_node.out_port(0))
mxreshape.out_port(0).get_connection().set_source(backward_reshape_node.out_port(0))
| en | 0.607493 | # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 If reshape layer with reverse True, special values will inferred from right to left. The Replacer simulate the behavior. The replaced subgraph reverse input data and special dims, and after reshape reverse output result to backward. Resulting subgraph: reshape(reverse=True) -> reverse - reshape(reverse=False) -reverse subgraph. | 2.262639 | 2 |
Python/Simulation/Numerical_Methods/test_cubic_spline_solve.py | MattMarti/Lambda-Trajectory-Sim | 0 | 8752 | <reponame>MattMarti/Lambda-Trajectory-Sim
import unittest;
import numpy as np;
import scipy as sp;
from cubic_spline_solve import cubic_spline_solve;
from cubic_spline_fun import cubic_spline_fun;
class Test_cubic_spline_solve(unittest.TestCase):
'''
Test_cubicsplineSolve
Test case for the cubic spline solver function. This function just solves
for the spline data, so that the spline can be precomputed before code is
run. This improves code performance by removing the need to invert a
matrix every time the spline function is called.
@author: <NAME>
@date: 2019-06-16
'''
def test_nominal_01(self):
'''Test the spline solve for nominal test case'''
# Function handles for function and derivatives
f = lambda x : sp.sin(x);
df = lambda x : sp.cos(x);
# x from 0 to 30 in the correct format
xrange = np.linspace(0, 10, 20);
xkvec = np.zeros((1, xrange.shape[0]));
for i in range(0, xrange.shape[0]):
xkvec[0,i] = xrange[i];
#
# Generate function values dataset
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
# Generate parameters for clamped boundary conditions
fslope = np.ndarray((1,2));
fslope[0,0] = sp.cos(xkvec[0,0]);
fslope[0,1] = sp.cos(xkvec[0,-1]);
# Compute already tested spline
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec, xinter, fslope);
splineDataTrue = np.zeros((1, xkvec.shape[1], 5));
splineDataTrue[0,:,0] = akvec.squeeze();
splineDataTrue[0,:,1] = bkvec.squeeze();
splineDataTrue[0,:,2] = ckvec.squeeze();
splineDataTrue[0,:,3] = dkvec.squeeze();
splineDataTrue[0,:,4] = xkvec.squeeze();
# Run spline solve
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test Function truth values
error = splineDataMat - splineDataTrue;
maxerr = np.max(np.abs(error));
self.assertLess(maxerr, 1e-12, 'Spline error too high');
#
def test_multiple_01(self):
'''Test the spline works for a two dimensional case'''
# Definition for two dimensional function output
def func(x):
if type(x) is not np.ndarray:
f = np.zeros((2,1));
else:
f = np.zeros((2,x.shape[0]));
#
f[0,:] = np.sin(x);
f[1,:] = -10*x**2 + 50*x + 1000;
return f;
#
# Definition for derivative function
def dfunc(x):
if type(x) is not np.ndarray:
df = np.zeros((2,1));
else:
df = np.zeros((2,x.shape[0]));
#
df[0,:] = np.cos(x);
df[1,:] = -20*x + 50;
return df;
#
# Given
f = lambda x : func(x);
df = lambda x : dfunc(x);
xkvec = np.linspace(0, 10, 20);
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
fslope = np.ndarray((2,2)); # Clambed B.C.s
fslope[:,0] = df(xkvec[0]).squeeze();
fslope[:,1] = df(xkvec[-1]).squeeze();
# Preallocate truth spline data
m = 2;
n = xkvec.shape[0];
splineDataTrue = np.zeros((m, n, 5));
splineDataTrue[0,:,4] = xkvec;
# Run true spline for first dataset
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec[0,:], xinter, fslope[0,:]);
splineDataTrue[0,:,0] = akvec.squeeze();
splineDataTrue[0,:,1] = bkvec.squeeze();
splineDataTrue[0,:,2] = ckvec.squeeze();
splineDataTrue[0,:,3] = dkvec.squeeze();
# Run true spline for second dataset
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec[1,:], xinter, fslope[1,:]);
splineDataTrue[1,:,0] = akvec.squeeze();
splineDataTrue[1,:,1] = bkvec.squeeze();
splineDataTrue[1,:,2] = ckvec.squeeze();
splineDataTrue[1,:,3] = dkvec.squeeze();
# Run new spline
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test Function truth values
error = splineDataMat - splineDataTrue;
maxerr = np.max(np.abs(error));
self.assertLess(maxerr, 1e-12, 'Spline error too high');
#
def test_types(self):
'''Test that the function raises type errors on bad input'''
# Function handles for function and derivatives
f = lambda x : sp.sin(x);
df = lambda x : sp.cos(x);
# x from 0 to 30 in the correct format
xrange = np.linspace(0, 10, 20);
xkvec = np.zeros((1, xrange.shape[0]));
for i in range(0, xrange.shape[0]):
xkvec[0,i] = xrange[i];
#
# Generate function values dataset
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
# Generate parameters for clamped boundary conditions
fslope = np.ndarray((1,2));
fslope[0,0] = sp.cos(xkvec[0,0]);
fslope[0,1] = sp.cos(xkvec[0,-1]);
# Run function without errors
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test with various inputs for xkvec
self.assertRaises(TypeError, cubic_spline_solve, True, fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, 0.1, fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, "AA", fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, 'A', fkvec, fslope);
# Test with various inputs for xkvec
self.assertRaises(TypeError, cubic_spline_solve, xkvec, True, fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, 0.1, fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, "AA", fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, 'A', fslope);
# Test with various inputs for fslope
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, True);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, 0.1);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, "AA");
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, 'A');
#
# | import unittest;
import numpy as np;
import scipy as sp;
from cubic_spline_solve import cubic_spline_solve;
from cubic_spline_fun import cubic_spline_fun;
class Test_cubic_spline_solve(unittest.TestCase):
'''
Test_cubicsplineSolve
Test case for the cubic spline solver function. This function just solves
for the spline data, so that the spline can be precomputed before code is
run. This improves code performance by removing the need to invert a
matrix every time the spline function is called.
@author: <NAME>
@date: 2019-06-16
'''
def test_nominal_01(self):
'''Test the spline solve for nominal test case'''
# Function handles for function and derivatives
f = lambda x : sp.sin(x);
df = lambda x : sp.cos(x);
# x from 0 to 30 in the correct format
xrange = np.linspace(0, 10, 20);
xkvec = np.zeros((1, xrange.shape[0]));
for i in range(0, xrange.shape[0]):
xkvec[0,i] = xrange[i];
#
# Generate function values dataset
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
# Generate parameters for clamped boundary conditions
fslope = np.ndarray((1,2));
fslope[0,0] = sp.cos(xkvec[0,0]);
fslope[0,1] = sp.cos(xkvec[0,-1]);
# Compute already tested spline
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec, xinter, fslope);
splineDataTrue = np.zeros((1, xkvec.shape[1], 5));
splineDataTrue[0,:,0] = akvec.squeeze();
splineDataTrue[0,:,1] = bkvec.squeeze();
splineDataTrue[0,:,2] = ckvec.squeeze();
splineDataTrue[0,:,3] = dkvec.squeeze();
splineDataTrue[0,:,4] = xkvec.squeeze();
# Run spline solve
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test Function truth values
error = splineDataMat - splineDataTrue;
maxerr = np.max(np.abs(error));
self.assertLess(maxerr, 1e-12, 'Spline error too high');
#
def test_multiple_01(self):
'''Test the spline works for a two dimensional case'''
# Definition for two dimensional function output
def func(x):
if type(x) is not np.ndarray:
f = np.zeros((2,1));
else:
f = np.zeros((2,x.shape[0]));
#
f[0,:] = np.sin(x);
f[1,:] = -10*x**2 + 50*x + 1000;
return f;
#
# Definition for derivative function
def dfunc(x):
if type(x) is not np.ndarray:
df = np.zeros((2,1));
else:
df = np.zeros((2,x.shape[0]));
#
df[0,:] = np.cos(x);
df[1,:] = -20*x + 50;
return df;
#
# Given
f = lambda x : func(x);
df = lambda x : dfunc(x);
xkvec = np.linspace(0, 10, 20);
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
fslope = np.ndarray((2,2)); # Clambed B.C.s
fslope[:,0] = df(xkvec[0]).squeeze();
fslope[:,1] = df(xkvec[-1]).squeeze();
# Preallocate truth spline data
m = 2;
n = xkvec.shape[0];
splineDataTrue = np.zeros((m, n, 5));
splineDataTrue[0,:,4] = xkvec;
# Run true spline for first dataset
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec[0,:], xinter, fslope[0,:]);
splineDataTrue[0,:,0] = akvec.squeeze();
splineDataTrue[0,:,1] = bkvec.squeeze();
splineDataTrue[0,:,2] = ckvec.squeeze();
splineDataTrue[0,:,3] = dkvec.squeeze();
# Run true spline for second dataset
_, _, akvec, bkvec, ckvec, dkvec \
= cubic_spline_fun(xkvec, fkvec[1,:], xinter, fslope[1,:]);
splineDataTrue[1,:,0] = akvec.squeeze();
splineDataTrue[1,:,1] = bkvec.squeeze();
splineDataTrue[1,:,2] = ckvec.squeeze();
splineDataTrue[1,:,3] = dkvec.squeeze();
# Run new spline
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test Function truth values
error = splineDataMat - splineDataTrue;
maxerr = np.max(np.abs(error));
self.assertLess(maxerr, 1e-12, 'Spline error too high');
#
def test_types(self):
'''Test that the function raises type errors on bad input'''
# Function handles for function and derivatives
f = lambda x : sp.sin(x);
df = lambda x : sp.cos(x);
# x from 0 to 30 in the correct format
xrange = np.linspace(0, 10, 20);
xkvec = np.zeros((1, xrange.shape[0]));
for i in range(0, xrange.shape[0]):
xkvec[0,i] = xrange[i];
#
# Generate function values dataset
fkvec = f(xkvec);
xinter = np.linspace(0, 10, 1000);
# Generate parameters for clamped boundary conditions
fslope = np.ndarray((1,2));
fslope[0,0] = sp.cos(xkvec[0,0]);
fslope[0,1] = sp.cos(xkvec[0,-1]);
# Run function without errors
splineDataMat = cubic_spline_solve( xkvec, fkvec, fslope );
# Test with various inputs for xkvec
self.assertRaises(TypeError, cubic_spline_solve, True, fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, 0.1, fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, "AA", fkvec, fslope);
self.assertRaises(TypeError, cubic_spline_solve, 'A', fkvec, fslope);
# Test with various inputs for xkvec
self.assertRaises(TypeError, cubic_spline_solve, xkvec, True, fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, 0.1, fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, "AA", fslope);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, 'A', fslope);
# Test with various inputs for fslope
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, True);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, 0.1);
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, "AA");
self.assertRaises(TypeError, cubic_spline_solve, xkvec, fkvec, 'A');
#
# | en | 0.698745 | Test_cubicsplineSolve Test case for the cubic spline solver function. This function just solves for the spline data, so that the spline can be precomputed before code is run. This improves code performance by removing the need to invert a matrix every time the spline function is called. @author: <NAME> @date: 2019-06-16 Test the spline solve for nominal test case # Function handles for function and derivatives # x from 0 to 30 in the correct format # # Generate function values dataset # Generate parameters for clamped boundary conditions # Compute already tested spline # Run spline solve # Test Function truth values # Test the spline works for a two dimensional case # Definition for two dimensional function output # # # Definition for derivative function # # # Given # Clambed B.C.s # Preallocate truth spline data # Run true spline for first dataset # Run true spline for second dataset # Run new spline # Test Function truth values # Test that the function raises type errors on bad input # Function handles for function and derivatives # x from 0 to 30 in the correct format # # Generate function values dataset # Generate parameters for clamped boundary conditions # Run function without errors # Test with various inputs for xkvec # Test with various inputs for xkvec # Test with various inputs for fslope # # | 3.036769 | 3 |
PassWord.py | IQUBE-X/passGenerator | 1 | 8753 | # PassWord - The Safe Password Generator App!
# importing the tkinter module for GUI
from tkinter import *
# importing the message box widget from tkinter
from tkinter import messagebox
# importing sqlite3 for database
import sqlite3
# importing random for password generation
import random
# creating fonts
font = ('Fixedsys', 10)
font2 = ('Comic Sans MS', 9)
font3 = ('System', 9)
font4 = ('Two Cen MT', 9)
# creating a database and establishing a connection
conn = sqlite3.connect('password.db')
# creating a cursor to navigate through database
c = conn.cursor()
# creating the table
'''
c.execute("""CREATE TABLE passwords (
password text
)""")
'''
# defining the root variable
root = Tk()
# Naming the app
root.title('PassWord')
# creating a label frame to organize content
label_frame = LabelFrame(root, padx=10, pady=10, text='Password Generator', font=font)
# printing the label frame onto the screen or window
label_frame.grid(row=0, column=0, columnspan=1, padx=10, pady=10, sticky=E + W)
# creating a separate label frame to perform delete functions
delete_labelframe = LabelFrame(root, text='Delete Password', padx=10, pady=10, font=font4)
# printing delete labelframe onto the screen
delete_labelframe.grid(row=5, column=0, columnspan=1, padx=10, pady=10, sticky=E + W)
# making the text box where password is going to be displayed
e = Entry(label_frame, fg='black', bg='white')
# printing the text box to the screen
e.grid(row=0, column=0, padx=10, pady=10, columnspan=1)
# (for the delete function) to give information on input for delete function
# (for the delete function) to give information on input for delete function
info = Label(delete_labelframe, text='Password ID', fg='black', font=font2)
# printing the label onto the screen
info.grid(row=6, column=0, pady=10)
# making the entry for user to input which password
e2 = Entry(delete_labelframe, fg='black', bg='white')
# printing the entry onto the screen
e2.grid(row=6, column=1, pady=10)
# making the password generate function
def generate():
# creating lists
lowercase_letters = ['a', 'b', 'c', 'd', 'e' 'f' 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't',
'u' 'v', 'w', 'x', 'y', 'z']
# creating lists
uppercase_letters = ['A', 'B', 'C', 'D', 'E' 'F' 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U' 'V', 'W', 'X', 'Y', 'Z']
# creating lists
symbols_list = ['-', '@', '!' '$', '%' '&' '?', '#', '^']
# creating lists
numbers_list = ['1', '2', '3', '4', '5', '6', '7' '8', '9' '0']
# generating a random value from the lists
lowercase_letter = random.choice(lowercase_letters)
# generating a random value from the lists
lowercase_letter2 = random.choice(lowercase_letters)
# generating a random value from the lists
uppercase_letter = random.choice(uppercase_letters)
# generating a random value from the lists
uppercase2_letter = random.choice(uppercase_letters)
# generating a random value from the lists
symbol = random.choice(symbols_list)
# generating a random value from the lists
symbol2 = random.choice(symbols_list)
# generating a random value from the lists
number = random.choice(numbers_list)
# generating a random value from the lists
number2 = random.choice(numbers_list)
# creating a password list made of random values from previous lists
password = [lowercase_letter, uppercase_letter, uppercase2_letter, lowercase_letter2, symbol, symbol2, number,
number2]
# shuffling password list
password1 = random.sample(password, 8)
# concatenating and making final list
final_password = password1[0] + password1[1] + password1[2] + password1[3] + password1[4] + password1[5] + \
password1[6] + password1[7]
# deleting previous item from entry
e.delete(0, END)
# inserting the final password
e.insert(0, final_password)
# making a function to save the password into the database
def save_password():
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("INSERT INTO passwords VALUES (?)", (e.get(),))
e.delete(0, END)
conn.commit()
conn.close()
# making a function to show all the saved passwords
def show_password():
global passcode_label
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM passwords")
passcodes = c.fetchall()
print_code = ''
for passcode in passcodes:
print_code += str(passcode[0]) + '.' + ' ' + str(passcode[1]) + '\n'
passcode_label = Text(label_frame, height=15, width=25)
passcode_label.configure(state='normal')
passcode_label.insert(1.0, print_code)
passcode_label.grid(row=5, column=0, padx=10, pady=10)
passcode_label.configure(state='disabled')
conn.commit()
conn.close()
# making a function to hide the saved passwords
def hide_password():
passcode_label.destroy()
# making a function to delete passwords from database
def delete():
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("DELETE from passwords WHERE oid = (?)", (e2.get(),))
e2.delete(0, END)
passcode_label.destroy()
conn.commit()
conn.close()
# making a function to delete all the passwords in the database
def delete_all():
global number_of_passwords
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("SELECT rowid FROM passwords")
number_of_passwords = c.fetchall()
num_of_passwords = len(number_of_passwords)
confirmation = messagebox.askyesno('Delete All Passwords?',
'You have chosen to delete ' + str(
num_of_passwords) + ' passwords. This action cannot be reversed. Do you wish to proceed?')
if confirmation == 1:
c.execute("DELETE FROM passwords")
conn.commit()
conn.close()
# button for generating password
generate_password = Button(label_frame, text='Generate Strong Password', command=generate, font=font2)
# printing the button onto the screen
generate_password.grid(row=1, padx=10, pady=10, column=0)
# button to save password
save = Button(label_frame, text='Save Password', command=save_password, font=font2)
# printing the button onto the screen
save.grid(row=2, padx=10, pady=10, column=0)
# making a button to show all the passwords
show = Button(label_frame, text='Show Passwords', command=show_password, font=font2)
# printing the button onto the screen
show.grid(row=4, padx=10, pady=10, column=0)
# making a button to hide the shown passwords
hide = Button(label_frame, text='Hide Passwords', command=hide_password, font=font2)
# printing the button onto the screen
hide.grid(row=6, column=0, padx=10, pady=10)
# making a button to delete a password
delete = Button(delete_labelframe, text='Delete Password', command=delete, font=font2)
# printing the button onto the screen
delete.grid(row=8, padx=10, pady=10, column=1)
# making a button to delete all the passwords
delete_all = Button(delete_labelframe, text='Delete All', command=delete_all, fg='dark red', width=20, anchor=CENTER,
font=font3)
# printing the button onto the screen
delete_all.grid(row=9, column=1, padx=10, pady=10, ipadx=15)
# committing the changes to the database
conn.commit()
# closing the connection with database
conn.close()
# making the final loop
root.mainloop()
| # PassWord - The Safe Password Generator App!
# importing the tkinter module for GUI
from tkinter import *
# importing the message box widget from tkinter
from tkinter import messagebox
# importing sqlite3 for database
import sqlite3
# importing random for password generation
import random
# creating fonts
font = ('Fixedsys', 10)
font2 = ('Comic Sans MS', 9)
font3 = ('System', 9)
font4 = ('Two Cen MT', 9)
# creating a database and establishing a connection
conn = sqlite3.connect('password.db')
# creating a cursor to navigate through database
c = conn.cursor()
# creating the table
'''
c.execute("""CREATE TABLE passwords (
password text
)""")
'''
# defining the root variable
root = Tk()
# Naming the app
root.title('PassWord')
# creating a label frame to organize content
label_frame = LabelFrame(root, padx=10, pady=10, text='Password Generator', font=font)
# printing the label frame onto the screen or window
label_frame.grid(row=0, column=0, columnspan=1, padx=10, pady=10, sticky=E + W)
# creating a separate label frame to perform delete functions
delete_labelframe = LabelFrame(root, text='Delete Password', padx=10, pady=10, font=font4)
# printing delete labelframe onto the screen
delete_labelframe.grid(row=5, column=0, columnspan=1, padx=10, pady=10, sticky=E + W)
# making the text box where password is going to be displayed
e = Entry(label_frame, fg='black', bg='white')
# printing the text box to the screen
e.grid(row=0, column=0, padx=10, pady=10, columnspan=1)
# (for the delete function) to give information on input for delete function
# (for the delete function) to give information on input for delete function
info = Label(delete_labelframe, text='Password ID', fg='black', font=font2)
# printing the label onto the screen
info.grid(row=6, column=0, pady=10)
# making the entry for user to input which password
e2 = Entry(delete_labelframe, fg='black', bg='white')
# printing the entry onto the screen
e2.grid(row=6, column=1, pady=10)
# making the password generate function
def generate():
# creating lists
lowercase_letters = ['a', 'b', 'c', 'd', 'e' 'f' 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't',
'u' 'v', 'w', 'x', 'y', 'z']
# creating lists
uppercase_letters = ['A', 'B', 'C', 'D', 'E' 'F' 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U' 'V', 'W', 'X', 'Y', 'Z']
# creating lists
symbols_list = ['-', '@', '!' '$', '%' '&' '?', '#', '^']
# creating lists
numbers_list = ['1', '2', '3', '4', '5', '6', '7' '8', '9' '0']
# generating a random value from the lists
lowercase_letter = random.choice(lowercase_letters)
# generating a random value from the lists
lowercase_letter2 = random.choice(lowercase_letters)
# generating a random value from the lists
uppercase_letter = random.choice(uppercase_letters)
# generating a random value from the lists
uppercase2_letter = random.choice(uppercase_letters)
# generating a random value from the lists
symbol = random.choice(symbols_list)
# generating a random value from the lists
symbol2 = random.choice(symbols_list)
# generating a random value from the lists
number = random.choice(numbers_list)
# generating a random value from the lists
number2 = random.choice(numbers_list)
# creating a password list made of random values from previous lists
password = [lowercase_letter, uppercase_letter, uppercase2_letter, lowercase_letter2, symbol, symbol2, number,
number2]
# shuffling password list
password1 = random.sample(password, 8)
# concatenating and making final list
final_password = password1[0] + password1[1] + password1[2] + password1[3] + password1[4] + password1[5] + \
password1[6] + password1[7]
# deleting previous item from entry
e.delete(0, END)
# inserting the final password
e.insert(0, final_password)
# making a function to save the password into the database
def save_password():
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("INSERT INTO passwords VALUES (?)", (e.get(),))
e.delete(0, END)
conn.commit()
conn.close()
# making a function to show all the saved passwords
def show_password():
global passcode_label
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM passwords")
passcodes = c.fetchall()
print_code = ''
for passcode in passcodes:
print_code += str(passcode[0]) + '.' + ' ' + str(passcode[1]) + '\n'
passcode_label = Text(label_frame, height=15, width=25)
passcode_label.configure(state='normal')
passcode_label.insert(1.0, print_code)
passcode_label.grid(row=5, column=0, padx=10, pady=10)
passcode_label.configure(state='disabled')
conn.commit()
conn.close()
# making a function to hide the saved passwords
def hide_password():
passcode_label.destroy()
# making a function to delete passwords from database
def delete():
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("DELETE from passwords WHERE oid = (?)", (e2.get(),))
e2.delete(0, END)
passcode_label.destroy()
conn.commit()
conn.close()
# making a function to delete all the passwords in the database
def delete_all():
global number_of_passwords
conn = sqlite3.connect('password.db')
c = conn.cursor()
c.execute("SELECT rowid FROM passwords")
number_of_passwords = c.fetchall()
num_of_passwords = len(number_of_passwords)
confirmation = messagebox.askyesno('Delete All Passwords?',
'You have chosen to delete ' + str(
num_of_passwords) + ' passwords. This action cannot be reversed. Do you wish to proceed?')
if confirmation == 1:
c.execute("DELETE FROM passwords")
conn.commit()
conn.close()
# button for generating password
generate_password = Button(label_frame, text='Generate Strong Password', command=generate, font=font2)
# printing the button onto the screen
generate_password.grid(row=1, padx=10, pady=10, column=0)
# button to save password
save = Button(label_frame, text='Save Password', command=save_password, font=font2)
# printing the button onto the screen
save.grid(row=2, padx=10, pady=10, column=0)
# making a button to show all the passwords
show = Button(label_frame, text='Show Passwords', command=show_password, font=font2)
# printing the button onto the screen
show.grid(row=4, padx=10, pady=10, column=0)
# making a button to hide the shown passwords
hide = Button(label_frame, text='Hide Passwords', command=hide_password, font=font2)
# printing the button onto the screen
hide.grid(row=6, column=0, padx=10, pady=10)
# making a button to delete a password
delete = Button(delete_labelframe, text='Delete Password', command=delete, font=font2)
# printing the button onto the screen
delete.grid(row=8, padx=10, pady=10, column=1)
# making a button to delete all the passwords
delete_all = Button(delete_labelframe, text='Delete All', command=delete_all, fg='dark red', width=20, anchor=CENTER,
font=font3)
# printing the button onto the screen
delete_all.grid(row=9, column=1, padx=10, pady=10, ipadx=15)
# committing the changes to the database
conn.commit()
# closing the connection with database
conn.close()
# making the final loop
root.mainloop()
| en | 0.789507 | # PassWord - The Safe Password Generator App! # importing the tkinter module for GUI # importing the message box widget from tkinter # importing sqlite3 for database # importing random for password generation # creating fonts # creating a database and establishing a connection # creating a cursor to navigate through database # creating the table c.execute("""CREATE TABLE passwords (
password text
)""") # defining the root variable # Naming the app # creating a label frame to organize content # printing the label frame onto the screen or window # creating a separate label frame to perform delete functions # printing delete labelframe onto the screen # making the text box where password is going to be displayed # printing the text box to the screen # (for the delete function) to give information on input for delete function # (for the delete function) to give information on input for delete function # printing the label onto the screen # making the entry for user to input which password # printing the entry onto the screen # making the password generate function # creating lists # creating lists # creating lists # creating lists # generating a random value from the lists # generating a random value from the lists # generating a random value from the lists # generating a random value from the lists # generating a random value from the lists # generating a random value from the lists # generating a random value from the lists # generating a random value from the lists # creating a password list made of random values from previous lists # shuffling password list # concatenating and making final list # deleting previous item from entry # inserting the final password # making a function to save the password into the database # making a function to show all the saved passwords # making a function to hide the saved passwords # making a function to delete passwords from database # making a function to delete all the passwords in the database # button for generating password # printing the button onto the screen # button to save password # printing the button onto the screen # making a button to show all the passwords # printing the button onto the screen # making a button to hide the shown passwords # printing the button onto the screen # making a button to delete a password # printing the button onto the screen # making a button to delete all the passwords # printing the button onto the screen # committing the changes to the database # closing the connection with database # making the final loop | 4.522357 | 5 |
1805_number_of_different_integers_in_a_string.py | hotternative/leetcode | 0 | 8754 | <filename>1805_number_of_different_integers_in_a_string.py
from string import ascii_lowercase
ts = 'a123bc34d8ef34'
cur = []
res = set()
for c in ts:
if c in ascii_lowercase:
if cur:
s = ''.join(cur)
res.add(int(s))
cur = []
else:
cur.append(c)
else:
if cur:
s = ''.join(cur)
res.add(int(s))
print(res)
| <filename>1805_number_of_different_integers_in_a_string.py
from string import ascii_lowercase
ts = 'a123bc34d8ef34'
cur = []
res = set()
for c in ts:
if c in ascii_lowercase:
if cur:
s = ''.join(cur)
res.add(int(s))
cur = []
else:
cur.append(c)
else:
if cur:
s = ''.join(cur)
res.add(int(s))
print(res)
| none | 1 | 3.476522 | 3 |
|
app.py | ahmedriaz9908/memeapiiz | 0 | 8755 | from flask import Flask, render_template, jsonify
from reddit_handler import *
app = Flask(__name__)
meme_subreddits = ['izlam']
@app.route('/')
def index():
return render_template('index.html')
@app.route('/meme')
def one_post():
sub = random.choice(meme_subreddits)
re = get_posts(sub, 100)
r = random.choice(re)
while not is_img_link(r[1]):
r = random.choice(re)
return jsonify({
'title': r[0],
'url': r[1],
'postLink': r[2],
'subreddit': sub
})
@app.route('/sample')
def sample():
re = get_posts(random.choice(meme_subreddits), 100)
r = random.choice(re)
while not is_img_link(r[1]):
r = random.choice(re)
return render_template('sample.html', title=r[0], img_url=r[1], shortlink=r[2])
@app.route('/test')
def test():
re = get_posts(random.choice(meme_subreddits), 100)
return render_template('test.html', re=re)
@app.route('/<something>')
def not_found(something):
return render_template('not_found.html')
| from flask import Flask, render_template, jsonify
from reddit_handler import *
app = Flask(__name__)
meme_subreddits = ['izlam']
@app.route('/')
def index():
return render_template('index.html')
@app.route('/meme')
def one_post():
sub = random.choice(meme_subreddits)
re = get_posts(sub, 100)
r = random.choice(re)
while not is_img_link(r[1]):
r = random.choice(re)
return jsonify({
'title': r[0],
'url': r[1],
'postLink': r[2],
'subreddit': sub
})
@app.route('/sample')
def sample():
re = get_posts(random.choice(meme_subreddits), 100)
r = random.choice(re)
while not is_img_link(r[1]):
r = random.choice(re)
return render_template('sample.html', title=r[0], img_url=r[1], shortlink=r[2])
@app.route('/test')
def test():
re = get_posts(random.choice(meme_subreddits), 100)
return render_template('test.html', re=re)
@app.route('/<something>')
def not_found(something):
return render_template('not_found.html')
| none | 1 | 2.698476 | 3 |
|
10_compare_between_main_product_pages.py | e-davydenkova/SeleniumWebDriver_Training | 0 | 8756 | import pytest
from selenium import webdriver
import re
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.get("http://localhost/litecart/en/")
request.addfinalizer(wd.quit)
return wd
# check that product names are identical on the main page and on product page
def test_product_names(driver):
# get a product name on the main page
main_name = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light .name").text
# get a product name on a product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
product_name = driver.find_element_by_css_selector("#box-product .title").text
assert main_name == product_name, "Product names on the main page and on product page are NOT identical"
# check that prices (regular and campaign) are identical on the main page and on product page
def test_prices(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
# get a regular price on the main page
main_regular_price = prices.find_element_by_css_selector(".regular-price").text
# get a campaign price on the main page
main_campaign_price = prices.find_element_by_css_selector(".campaign-price").text
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
# get a regular price on a product page
product_regular_price = driver.find_element_by_css_selector("#box-product .price-wrapper .regular-price").text
# get a campaign price on a product page
product_campaign_price = driver.find_element_by_css_selector("#box-product .price-wrapper .campaign-price").text
assert main_regular_price == product_regular_price, "Regular prices on the main page and on the product page " \
"are NOT identical"
assert main_campaign_price == product_campaign_price, "Campaign prices on the main page and on the product page " \
"are NOT identical"
# check color of regular and campaign prices and their attributes on the main page
def test_colors_main_page(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
# get a color of the regular price on the main page
regular_color = prices.find_element_by_css_selector(".regular-price").value_of_css_property("color")
# verify that the regular price is grey (values of R,G,B are identical)
color_list = re.findall('\d+',regular_color)
assert(color_list[0] == color_list[1] == color_list[2]), "The regular price on the main page is NOT grey"
# get a color of the campaign price on the main page
campaign_color = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("color")
# verify that the campaign price is red (values of G and B are 0)
color_list = re.findall('\d+',campaign_color)
assert (color_list[1] == '0') and (color_list[2] == '0'), "The campaign price on the main page is NOT red"
regular_attr = prices.find_element_by_css_selector(".regular-price").value_of_css_property("text-decoration-line")
assert regular_attr == 'line-through', "Regular price is NOT line-through on the main page"
campaign_attr = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("font-weight")
assert (campaign_attr == 'bold') or (campaign_attr >= '700'), "Campaign price is NOT bold on the main page"
# check color of regular and campaign prices and their attributes on the product page
def test_colors_product_page(driver):
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
prices = driver.find_element_by_css_selector("#box-product .price-wrapper")
# get a color of the regular price on the main page
regular_color = prices.find_element_by_css_selector(".regular-price").value_of_css_property("color")
# verify that the regular price is grey (values of R,G,B are identical)
color_list = re.findall('\d+', regular_color)
assert (color_list[0] == color_list[1] == color_list[2]), "The regular price on the product page is NOT grey"
# get a color of the campaign price on the main page
campaign_color = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("color")
# verify that the campaign price is red (values of G and B are 0)
color_list = re.findall('\d+', campaign_color)
assert (color_list[1] == '0') and (color_list[2] == '0'), "The campaign price on the product page is NOT red"
# verify that the regular price is line-through
regular_attr = prices.find_element_by_css_selector(".regular-price").value_of_css_property(
"text-decoration-line")
assert regular_attr == 'line-through', "Regular price is NOT line-through on the product page"
# verify that the campaign price is bold
campaign_attr = prices.find_element_by_css_selector(".campaign-price").value_of_css_property(
"font-weight")
assert (campaign_attr == 'bold') or (campaign_attr >= '700'), "Campaign price is NOT bold on the product page"
# check that campaign price is bigger than regular prise on the main and product pages
def test_size_comparison(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
regular_size = prices.find_element_by_css_selector(".regular-price").size
campaign_size = prices.find_element_by_css_selector(".campaign-price").size
assert (campaign_size['height'] > regular_size['height']) and \
(campaign_size['width'] > regular_size['width']), \
"Size of campaign price is NOT bigger than size of regular price on the main page"
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
prices = driver.find_element_by_css_selector("#box-product .price-wrapper")
regular_size = prices.find_element_by_css_selector(".regular-price").size
campaign_size = prices.find_element_by_css_selector(".campaign-price").size
assert (campaign_size['height'] > regular_size['height']) and \
(campaign_size['width'] > regular_size['width']), \
"Size of campaign price is NOT bigger than size of regular price on the product page"
| import pytest
from selenium import webdriver
import re
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.get("http://localhost/litecart/en/")
request.addfinalizer(wd.quit)
return wd
# check that product names are identical on the main page and on product page
def test_product_names(driver):
# get a product name on the main page
main_name = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light .name").text
# get a product name on a product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
product_name = driver.find_element_by_css_selector("#box-product .title").text
assert main_name == product_name, "Product names on the main page and on product page are NOT identical"
# check that prices (regular and campaign) are identical on the main page and on product page
def test_prices(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
# get a regular price on the main page
main_regular_price = prices.find_element_by_css_selector(".regular-price").text
# get a campaign price on the main page
main_campaign_price = prices.find_element_by_css_selector(".campaign-price").text
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
# get a regular price on a product page
product_regular_price = driver.find_element_by_css_selector("#box-product .price-wrapper .regular-price").text
# get a campaign price on a product page
product_campaign_price = driver.find_element_by_css_selector("#box-product .price-wrapper .campaign-price").text
assert main_regular_price == product_regular_price, "Regular prices on the main page and on the product page " \
"are NOT identical"
assert main_campaign_price == product_campaign_price, "Campaign prices on the main page and on the product page " \
"are NOT identical"
# check color of regular and campaign prices and their attributes on the main page
def test_colors_main_page(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
# get a color of the regular price on the main page
regular_color = prices.find_element_by_css_selector(".regular-price").value_of_css_property("color")
# verify that the regular price is grey (values of R,G,B are identical)
color_list = re.findall('\d+',regular_color)
assert(color_list[0] == color_list[1] == color_list[2]), "The regular price on the main page is NOT grey"
# get a color of the campaign price on the main page
campaign_color = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("color")
# verify that the campaign price is red (values of G and B are 0)
color_list = re.findall('\d+',campaign_color)
assert (color_list[1] == '0') and (color_list[2] == '0'), "The campaign price on the main page is NOT red"
regular_attr = prices.find_element_by_css_selector(".regular-price").value_of_css_property("text-decoration-line")
assert regular_attr == 'line-through', "Regular price is NOT line-through on the main page"
campaign_attr = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("font-weight")
assert (campaign_attr == 'bold') or (campaign_attr >= '700'), "Campaign price is NOT bold on the main page"
# check color of regular and campaign prices and their attributes on the product page
def test_colors_product_page(driver):
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
prices = driver.find_element_by_css_selector("#box-product .price-wrapper")
# get a color of the regular price on the main page
regular_color = prices.find_element_by_css_selector(".regular-price").value_of_css_property("color")
# verify that the regular price is grey (values of R,G,B are identical)
color_list = re.findall('\d+', regular_color)
assert (color_list[0] == color_list[1] == color_list[2]), "The regular price on the product page is NOT grey"
# get a color of the campaign price on the main page
campaign_color = prices.find_element_by_css_selector(".campaign-price").value_of_css_property("color")
# verify that the campaign price is red (values of G and B are 0)
color_list = re.findall('\d+', campaign_color)
assert (color_list[1] == '0') and (color_list[2] == '0'), "The campaign price on the product page is NOT red"
# verify that the regular price is line-through
regular_attr = prices.find_element_by_css_selector(".regular-price").value_of_css_property(
"text-decoration-line")
assert regular_attr == 'line-through', "Regular price is NOT line-through on the product page"
# verify that the campaign price is bold
campaign_attr = prices.find_element_by_css_selector(".campaign-price").value_of_css_property(
"font-weight")
assert (campaign_attr == 'bold') or (campaign_attr >= '700'), "Campaign price is NOT bold on the product page"
# check that campaign price is bigger than regular prise on the main and product pages
def test_size_comparison(driver):
prices = driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light div.price-wrapper")
regular_size = prices.find_element_by_css_selector(".regular-price").size
campaign_size = prices.find_element_by_css_selector(".campaign-price").size
assert (campaign_size['height'] > regular_size['height']) and \
(campaign_size['width'] > regular_size['width']), \
"Size of campaign price is NOT bigger than size of regular price on the main page"
# open the product page
driver.find_element_by_css_selector("#box-campaigns div li.product.column.shadow.hover-light").click()
prices = driver.find_element_by_css_selector("#box-product .price-wrapper")
regular_size = prices.find_element_by_css_selector(".regular-price").size
campaign_size = prices.find_element_by_css_selector(".campaign-price").size
assert (campaign_size['height'] > regular_size['height']) and \
(campaign_size['width'] > regular_size['width']), \
"Size of campaign price is NOT bigger than size of regular price on the product page"
| en | 0.914373 | # check that product names are identical on the main page and on product page # get a product name on the main page # get a product name on a product page # check that prices (regular and campaign) are identical on the main page and on product page # get a regular price on the main page # get a campaign price on the main page # open the product page # get a regular price on a product page # get a campaign price on a product page # check color of regular and campaign prices and their attributes on the main page # get a color of the regular price on the main page # verify that the regular price is grey (values of R,G,B are identical) # get a color of the campaign price on the main page # verify that the campaign price is red (values of G and B are 0) # check color of regular and campaign prices and their attributes on the product page # open the product page # get a color of the regular price on the main page # verify that the regular price is grey (values of R,G,B are identical) # get a color of the campaign price on the main page # verify that the campaign price is red (values of G and B are 0) # verify that the regular price is line-through # verify that the campaign price is bold # check that campaign price is bigger than regular prise on the main and product pages # open the product page | 2.665517 | 3 |
pyrite/llvm.py | iahuang/pyrite | 0 | 8757 | import shutil
from pyrite import fs
from pyrite.command_line import run_command
from pyrite.errors import UserError
from pyrite.globals import Globals
from os.path import join
class LLVMInterface:
_clang_path: str
def __init__(self):
self._clang_path = self._get_clang_path()
def _get_clang_path(self) -> str:
clang_path = shutil.which(Globals.get_compiler_options().clang_command)
if not clang_path:
raise UserError(
"Pyrite requires clang to be installed, but no such installation was found."
)
return clang_path
def compile_ll(self, source: str, output_path: str) -> None:
"""
Compile the contents of [source] as LLVM IR code, outputting a binary
specified by [output_path]. If any errors arise in compilation,
raise an error.
"""
ir_path = join(self.get_build_directory(), "build.ll")
fs.write_file(
path=ir_path,
data=source
)
result = run_command([self._clang_path, ir_path, "-o", output_path])
if result.stderr:
fs.write_file(
path=join(self.get_build_directory(), "llvm_error.txt"),
data=result.stderr
)
raise UserError(
"An unexpected error occurred during the compilation process. A detailed report has been written to {}".format(
self.get_build_directory()
)
)
def get_build_directory(self) -> str:
"""
Pyrite uses a temporary working "build" directory to store files needed for LLVM/Clang
"""
cwd = Globals.get_compiler_options().cwd
return join(cwd, "_build")
| import shutil
from pyrite import fs
from pyrite.command_line import run_command
from pyrite.errors import UserError
from pyrite.globals import Globals
from os.path import join
class LLVMInterface:
_clang_path: str
def __init__(self):
self._clang_path = self._get_clang_path()
def _get_clang_path(self) -> str:
clang_path = shutil.which(Globals.get_compiler_options().clang_command)
if not clang_path:
raise UserError(
"Pyrite requires clang to be installed, but no such installation was found."
)
return clang_path
def compile_ll(self, source: str, output_path: str) -> None:
"""
Compile the contents of [source] as LLVM IR code, outputting a binary
specified by [output_path]. If any errors arise in compilation,
raise an error.
"""
ir_path = join(self.get_build_directory(), "build.ll")
fs.write_file(
path=ir_path,
data=source
)
result = run_command([self._clang_path, ir_path, "-o", output_path])
if result.stderr:
fs.write_file(
path=join(self.get_build_directory(), "llvm_error.txt"),
data=result.stderr
)
raise UserError(
"An unexpected error occurred during the compilation process. A detailed report has been written to {}".format(
self.get_build_directory()
)
)
def get_build_directory(self) -> str:
"""
Pyrite uses a temporary working "build" directory to store files needed for LLVM/Clang
"""
cwd = Globals.get_compiler_options().cwd
return join(cwd, "_build")
| en | 0.736421 | Compile the contents of [source] as LLVM IR code, outputting a binary specified by [output_path]. If any errors arise in compilation, raise an error. Pyrite uses a temporary working "build" directory to store files needed for LLVM/Clang | 2.373717 | 2 |
bag_recursive.py | eduardogerentklein/Algoritmos-Geneticos | 0 | 8758 | maxWeight = 30
value = [15, 7, 10, 5, 8, 17]
weight = [15, 3, 2, 5, 9, 20]
def bag(pos, selected):
# calcula o total
totalValue = 0
pesoTotal = 0
for i in selected:
totalValue += value[i]
pesoTotal += weight[i]
if pesoTotal > maxWeight:
return (0,0)
if pos >= len(weight):
return (totalValue, pesoTotal)
answer1 = bag(pos + 1, selected + [pos])
answer2 = bag(pos + 1, list(selected))
if answer1[0] > answer2[0]:
return answer1
else:
return answer2
bestAnswer = bag(0, [])
print(bestAnswer) | maxWeight = 30
value = [15, 7, 10, 5, 8, 17]
weight = [15, 3, 2, 5, 9, 20]
def bag(pos, selected):
# calcula o total
totalValue = 0
pesoTotal = 0
for i in selected:
totalValue += value[i]
pesoTotal += weight[i]
if pesoTotal > maxWeight:
return (0,0)
if pos >= len(weight):
return (totalValue, pesoTotal)
answer1 = bag(pos + 1, selected + [pos])
answer2 = bag(pos + 1, list(selected))
if answer1[0] > answer2[0]:
return answer1
else:
return answer2
bestAnswer = bag(0, [])
print(bestAnswer) | en | 0.364433 | # calcula o total | 3.583946 | 4 |
train.py | MEfeTiryaki/trpo | 2 | 8759 | <reponame>MEfeTiryaki/trpo
import argparse
from itertools import count
import signal
import sys
import os
import time
import numpy as np
import gym
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import scipy.optimize
import matplotlib.pyplot as plt
from value import Value
from policy import Policy
from utils import *
from trpo import trpo_step
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
# Algorithm Parameters
parser.add_argument('--gamma', type=float, default=0.995, metavar='G', help='discount factor (default: 0.995)')
parser.add_argument('--lambda-', type=float, default=0.97, metavar='G', help='gae (default: 0.97)')
# Value Function Learning Parameters
parser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G', help='(NOT USED)l2 regularization regression (default: 1e-3)')
parser.add_argument('--val-opt-iter', type=int, default=200, metavar='G', help='iteration number for value function learning(default: 200)')
parser.add_argument('--lr', type=float, default=1e-3, metavar='G', help='learning rate for value function (default: 1e-3)')
parser.add_argument('--value-memory', type=int, default=1, metavar='G', help='ratio of past value to be used to batch size (default: 1)')
parser.add_argument('--value-memory-shuffle', action='store_true',help='if not shuffled latest memory stay') # TODO: implement
# Policy Optimization parameters
parser.add_argument('--max-kl', type=float, default=1e-2, metavar='G', help='max kl value (default: 1e-2)')
parser.add_argument('--damping', type=float, default=1e-1, metavar='G', help='damping (default: 1e-1)')
parser.add_argument('--fisher-ratio', type=float, default=1, metavar='G', help='ratio of data to calcualte fisher vector product (default: 1)')
# Environment parameters
parser.add_argument('--env-name', default="Pendulum-v0", metavar='G', help='name of the environment to run')
parser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 1)')
# Training length
parser.add_argument('--batch-size', type=int, default=5000, metavar='N', help='number of steps per iteration')
parser.add_argument('--episode-length', type=int, default=1000, metavar='N', help='max step size for one episode')
parser.add_argument('--max-iteration-number', type=int, default=200, metavar='N', help='max policy iteration number')
# Rendering
parser.add_argument('--render', action='store_true', help='render the environment')
# Logging
parser.add_argument('--log-interval', type=int, default=1, metavar='N', help='interval between training status logs (default: 10)')
parser.add_argument('--log', action='store_true', help='log the results at the end')
parser.add_argument('--log-dir', type=str, default=".", metavar='N', help='log directory')
parser.add_argument('--log-prefix', type=str, default="log", metavar='N', help='log file prefix')
# Load
parser.add_argument('--load', action='store_true', help='load models')
parser.add_argument('--save', action='store_true', help='load models')
parser.add_argument('--load-dir', type=str, default=".", metavar='N', help='')
args = parser.parse_args()
env = gym.make(args.env_name)
env.seed(args.seed)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
torch.set_printoptions(profile="full")
if args.load:
policy_net = Policy(num_inputs, num_actions,30)
value_net = Value(num_inputs,30)
set_flat_params_to(value_net, loadParameterCsv(args.load_dir+"/ValueNet"))
set_flat_params_to(policy_net, loadParameterCsv(args.load_dir+"/PolicyNet"))
print("Networks are loaded from "+args.load_dir+"/")
else:
policy_net = Policy(num_inputs, num_actions,30)
value_net = Value(num_inputs,30)
def signal_handler(sig, frame):
""" Signal Handler to save the networks when shutting down via ctrl+C
Parameters:
Returns:
"""
if(args.save):
valueParam = get_flat_params_from(value_net)
policyParam = get_flat_params_from(policy_net)
saveParameterCsv(valueParam,args.load_dir+"/ValueNet")
saveParameterCsv(policyParam,args.load_dir+"/PolicyNet")
print("Networks are saved in "+args.load_dir+"/")
print('Closing!!')
env.close()
sys.exit(0)
def prepare_data(batch,valueBatch,previousBatch):
""" Get the batch data and calculate value,return and generalized advantage
Detail: TODO
Parameters:
batch (dict of arrays of numpy) : TODO
valueBatch (dict of arrays of numpy) : TODO
previousBatch (dict of arrays of numpy) : TODO
Returns:
"""
# TODO : more description above
stateList = [ torch.from_numpy(np.concatenate(x,axis=0)) for x in batch["states"]]
actionsList = [torch.from_numpy(np.concatenate(x,axis=0)) for x in batch["actions"]]
for states in stateList:
value = value_net.forward(states)
batch["values"].append(value)
advantagesList = []
returnsList = []
rewardsList = []
for rewards,values,masks in zip(batch["rewards"],batch["values"],batch["mask"]):
returns = torch.Tensor(len(rewards),1)
advantages = torch.Tensor(len(rewards),1)
deltas = torch.Tensor(len(rewards),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(len(rewards))):
returns[i] = rewards[i] + args.gamma * prev_value * masks[i] # TD
# returns[i] = rewards[i] + args.gamma * prev_return * masks[i] # Monte Carlo
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i]- values.data[i]
advantages[i] = deltas[i] + args.gamma * args.lambda_* prev_advantage* masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
returnsList.append(returns)
advantagesList.append(advantages)
rewardsList.append(torch.Tensor(rewards))
batch["states"] = torch.cat(stateList,0)
batch["actions"] = torch.cat(actionsList,0)
batch["rewards"] = torch.cat(rewardsList,0)
batch["returns"] = torch.cat(returnsList,0)
advantagesList = torch.cat(advantagesList,0)
batch["advantages"] = (advantagesList- advantagesList.mean()) / advantagesList.std()
valueBatch["states"] = torch.cat(( previousBatch["states"],batch["states"]),0)
valueBatch["targets"] = torch.cat((previousBatch["returns"],batch["returns"]),0)
def update_policy(batch):
""" Get advantage , states and action and calls trpo step
Parameters:
batch (dict of arrays of numpy) : TODO (batch is different than prepare_data by structure)
Returns:
"""
advantages = batch["advantages"]
states = batch["states"]
actions = batch["actions"]
trpo_step(policy_net, states,actions,advantages , args.max_kl, args.damping)
def update_value(valueBatch):
""" Get valueBatch and run adam optimizer to learn value function
Parameters:
valueBatch (dict of arrays of numpy) : TODO
Returns:
"""
# shuffle the data
dataSize = valueBatch["targets"].size()[0]
permutation = torch.randperm(dataSize)
input = valueBatch["states"][permutation]
target = valueBatch["targets"][permutation]
iter = args.val_opt_iter
batchSize = int(dataSize/ iter)
loss_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(value_net.parameters(), lr=args.lr)
for t in range(iter):
prediction = value_net(input[t*batchSize:t*batchSize+batchSize])
loss = loss_fn(prediction, target[t*batchSize:t*batchSize+batchSize])
# XXX : Comment out for debug
# if t%100==0:
# print("\t%f"%loss.data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def save_to_previousBatch(previousBatch,batch):
""" Save previous batch to use in future value optimization
Details: TODO
Parameters:
Returns:
"""
if args.value_memory<0:
print("Value memory should be equal or greater than zero")
elif args.value_memory>0:
if previousBatch["returns"].size() == 0:
previousBatch= {"states":batch["states"],
"returns":batch["returns"]}
else:
previous_size = previousBatch["returns"].size()[0]
size = batch["returns"].size()[0]
if previous_size/size == args.value_memory:
previousBatch["states"] = torch.cat([previousBatch["states"][size:],batch["states"]],0)
previousBatch["returns"] = torch.cat([previousBatch["returns"][size:],batch["returns"]],0)
else:
previousBatch["states"] = torch.cat([previousBatch["states"],batch["states"]],0)
previousBatch["returns"] = torch.cat([previousBatch["returns"],batch["returns"]],0)
if args.value_memory_shuffle:
permutation = torch.randperm(previousBatch["returns"].size()[0])
previousBatch["states"] = previousBatch["states"][permutation]
previousBatch["returns"] = previousBatch["returns"][permutation]
def calculate_loss(reward_sum_mean,reward_sum_std,test_number = 10):
""" Calculate mean cummulative reward for test_nubmer of trials
Parameters:
reward_sum_mean (list): holds the history of the means.
reward_sum_std (list): holds the history of the std.
Returns:
list: new value appended means
list: new value appended stds
"""
rewardSum = []
for i in range(test_number):
state = env.reset()
rewardSum.append(0)
for t in range(args.episode_length):
state, reward, done, _ = env.step(policy_net.get_action(state)[0] )
state = np.transpose(state)
rewardSum[-1] += reward
if done:
break
reward_sum_mean.append(np.array(rewardSum).mean())
reward_sum_std.append(np.array(rewardSum).std())
return reward_sum_mean, reward_sum_std
def log(rewards):
""" Saves mean and std over episodes in log file
Parameters:
Returns:
"""
# TODO : add duration to log
filename = args.log_dir+"/"+ args.log_prefix \
+ "_env_" + args.env_name \
+ "_maxIter_" + str(args.max_iteration_number) \
+ "_batchSize_" + str(args.batch_size) \
+ "_gamma_" + str(args.gamma) \
+ "_lambda_" + str(args.lambda_) \
+ "_lr_" + str(args.lr) \
+ "_valOptIter_" + str(args.val_opt_iter)
if os.path.exists(filename + "_index_0.csv"):
id = 0
file = filename + "_index_" + str(id)
while os.path.exists(file + ".csv"):
id = id +1
file = filename + "_index_" + str(id)
filename = file
else:
filename = filename + "_index_0"
import csv
filename = filename+ ".csv"
pythonVersion = sys.version_info[0]
if pythonVersion == 3:
with open(filename, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(rewards)
elif pythonVersion == 2:
with open(filename, 'w', ) as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(rewards)
def main():
"""
Parameters:
Returns:
"""
signal.signal(signal.SIGINT, signal_handler)
time_start = time.time()
reward_sum_mean,reward_sum_std = [], []
previousBatch= {"states":torch.Tensor(0) ,
"returns":torch.Tensor(0)}
reward_sum_mean,reward_sum_std = calculate_loss(reward_sum_mean,reward_sum_std)
print("Initial loss \n\tloss | mean : %6.4f / std : %6.4f"%(reward_sum_mean[-1],reward_sum_std[-1]) )
for i_episode in range(args.max_iteration_number):
time_episode_start = time.time()
# reset batches
batch = {"states":[] ,
"actions":[],
"next_states":[] ,
"rewards":[],
"returns":[],
"values":[],
"advantages":[],
"mask":[]}
valueBatch = {"states" :[],
"targets" : []}
num_steps = 0
while num_steps < args.batch_size:
state = env.reset()
reward_sum = 0
states,actions,rewards,next_states,masks = [],[],[],[],[]
steps = 0
for t in range(args.episode_length):
action = policy_net.get_action(state)[0] # agent
next_state, reward, done, info = env.step(action)
next_state = np.transpose(next_state)
mask = 0 if done else 1
masks.append(mask)
states.append(state)
actions.append(action)
next_states.append(next_state)
rewards.append(reward)
state = next_state
reward_sum += reward
steps+=1
if args.render:
env.render()
if done:
break
batch["states"].append(np.expand_dims(states, axis=1) )
batch["actions"].append(actions)
batch["next_states"].append(np.expand_dims(next_states, axis=1))
batch["rewards"].append(rewards)
batch["mask"].append(masks)
num_steps += steps
prepare_data(batch,valueBatch,previousBatch)
update_policy(batch) # First policy update to avoid overfitting
update_value(valueBatch)
save_to_previousBatch(previousBatch,batch)
print("episode %d | total: %.4f "%( i_episode, time.time()-time_episode_start))
reward_sum_mean,reward_sum_std = calculate_loss(reward_sum_mean,reward_sum_std)
print("\tloss | mean : %6.4f / std : %6.4f"%(reward_sum_mean[-1],reward_sum_std[-1]) )
if args.log:
print("Data is logged in "+args.log_dir+"/")
log(reward_sum_mean)
print("Total training duration: %.4f "%(time.time()-time_start))
env.close()
if __name__ == '__main__':
main()
| import argparse
from itertools import count
import signal
import sys
import os
import time
import numpy as np
import gym
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import scipy.optimize
import matplotlib.pyplot as plt
from value import Value
from policy import Policy
from utils import *
from trpo import trpo_step
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
# Algorithm Parameters
parser.add_argument('--gamma', type=float, default=0.995, metavar='G', help='discount factor (default: 0.995)')
parser.add_argument('--lambda-', type=float, default=0.97, metavar='G', help='gae (default: 0.97)')
# Value Function Learning Parameters
parser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G', help='(NOT USED)l2 regularization regression (default: 1e-3)')
parser.add_argument('--val-opt-iter', type=int, default=200, metavar='G', help='iteration number for value function learning(default: 200)')
parser.add_argument('--lr', type=float, default=1e-3, metavar='G', help='learning rate for value function (default: 1e-3)')
parser.add_argument('--value-memory', type=int, default=1, metavar='G', help='ratio of past value to be used to batch size (default: 1)')
parser.add_argument('--value-memory-shuffle', action='store_true',help='if not shuffled latest memory stay') # TODO: implement
# Policy Optimization parameters
parser.add_argument('--max-kl', type=float, default=1e-2, metavar='G', help='max kl value (default: 1e-2)')
parser.add_argument('--damping', type=float, default=1e-1, metavar='G', help='damping (default: 1e-1)')
parser.add_argument('--fisher-ratio', type=float, default=1, metavar='G', help='ratio of data to calcualte fisher vector product (default: 1)')
# Environment parameters
parser.add_argument('--env-name', default="Pendulum-v0", metavar='G', help='name of the environment to run')
parser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 1)')
# Training length
parser.add_argument('--batch-size', type=int, default=5000, metavar='N', help='number of steps per iteration')
parser.add_argument('--episode-length', type=int, default=1000, metavar='N', help='max step size for one episode')
parser.add_argument('--max-iteration-number', type=int, default=200, metavar='N', help='max policy iteration number')
# Rendering
parser.add_argument('--render', action='store_true', help='render the environment')
# Logging
parser.add_argument('--log-interval', type=int, default=1, metavar='N', help='interval between training status logs (default: 10)')
parser.add_argument('--log', action='store_true', help='log the results at the end')
parser.add_argument('--log-dir', type=str, default=".", metavar='N', help='log directory')
parser.add_argument('--log-prefix', type=str, default="log", metavar='N', help='log file prefix')
# Load
parser.add_argument('--load', action='store_true', help='load models')
parser.add_argument('--save', action='store_true', help='load models')
parser.add_argument('--load-dir', type=str, default=".", metavar='N', help='')
args = parser.parse_args()
env = gym.make(args.env_name)
env.seed(args.seed)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
torch.set_printoptions(profile="full")
if args.load:
policy_net = Policy(num_inputs, num_actions,30)
value_net = Value(num_inputs,30)
set_flat_params_to(value_net, loadParameterCsv(args.load_dir+"/ValueNet"))
set_flat_params_to(policy_net, loadParameterCsv(args.load_dir+"/PolicyNet"))
print("Networks are loaded from "+args.load_dir+"/")
else:
policy_net = Policy(num_inputs, num_actions,30)
value_net = Value(num_inputs,30)
def signal_handler(sig, frame):
""" Signal Handler to save the networks when shutting down via ctrl+C
Parameters:
Returns:
"""
if(args.save):
valueParam = get_flat_params_from(value_net)
policyParam = get_flat_params_from(policy_net)
saveParameterCsv(valueParam,args.load_dir+"/ValueNet")
saveParameterCsv(policyParam,args.load_dir+"/PolicyNet")
print("Networks are saved in "+args.load_dir+"/")
print('Closing!!')
env.close()
sys.exit(0)
def prepare_data(batch,valueBatch,previousBatch):
""" Get the batch data and calculate value,return and generalized advantage
Detail: TODO
Parameters:
batch (dict of arrays of numpy) : TODO
valueBatch (dict of arrays of numpy) : TODO
previousBatch (dict of arrays of numpy) : TODO
Returns:
"""
# TODO : more description above
stateList = [ torch.from_numpy(np.concatenate(x,axis=0)) for x in batch["states"]]
actionsList = [torch.from_numpy(np.concatenate(x,axis=0)) for x in batch["actions"]]
for states in stateList:
value = value_net.forward(states)
batch["values"].append(value)
advantagesList = []
returnsList = []
rewardsList = []
for rewards,values,masks in zip(batch["rewards"],batch["values"],batch["mask"]):
returns = torch.Tensor(len(rewards),1)
advantages = torch.Tensor(len(rewards),1)
deltas = torch.Tensor(len(rewards),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(len(rewards))):
returns[i] = rewards[i] + args.gamma * prev_value * masks[i] # TD
# returns[i] = rewards[i] + args.gamma * prev_return * masks[i] # Monte Carlo
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i]- values.data[i]
advantages[i] = deltas[i] + args.gamma * args.lambda_* prev_advantage* masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
returnsList.append(returns)
advantagesList.append(advantages)
rewardsList.append(torch.Tensor(rewards))
batch["states"] = torch.cat(stateList,0)
batch["actions"] = torch.cat(actionsList,0)
batch["rewards"] = torch.cat(rewardsList,0)
batch["returns"] = torch.cat(returnsList,0)
advantagesList = torch.cat(advantagesList,0)
batch["advantages"] = (advantagesList- advantagesList.mean()) / advantagesList.std()
valueBatch["states"] = torch.cat(( previousBatch["states"],batch["states"]),0)
valueBatch["targets"] = torch.cat((previousBatch["returns"],batch["returns"]),0)
def update_policy(batch):
""" Get advantage , states and action and calls trpo step
Parameters:
batch (dict of arrays of numpy) : TODO (batch is different than prepare_data by structure)
Returns:
"""
advantages = batch["advantages"]
states = batch["states"]
actions = batch["actions"]
trpo_step(policy_net, states,actions,advantages , args.max_kl, args.damping)
def update_value(valueBatch):
""" Get valueBatch and run adam optimizer to learn value function
Parameters:
valueBatch (dict of arrays of numpy) : TODO
Returns:
"""
# shuffle the data
dataSize = valueBatch["targets"].size()[0]
permutation = torch.randperm(dataSize)
input = valueBatch["states"][permutation]
target = valueBatch["targets"][permutation]
iter = args.val_opt_iter
batchSize = int(dataSize/ iter)
loss_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.Adam(value_net.parameters(), lr=args.lr)
for t in range(iter):
prediction = value_net(input[t*batchSize:t*batchSize+batchSize])
loss = loss_fn(prediction, target[t*batchSize:t*batchSize+batchSize])
# XXX : Comment out for debug
# if t%100==0:
# print("\t%f"%loss.data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def save_to_previousBatch(previousBatch,batch):
""" Save previous batch to use in future value optimization
Details: TODO
Parameters:
Returns:
"""
if args.value_memory<0:
print("Value memory should be equal or greater than zero")
elif args.value_memory>0:
if previousBatch["returns"].size() == 0:
previousBatch= {"states":batch["states"],
"returns":batch["returns"]}
else:
previous_size = previousBatch["returns"].size()[0]
size = batch["returns"].size()[0]
if previous_size/size == args.value_memory:
previousBatch["states"] = torch.cat([previousBatch["states"][size:],batch["states"]],0)
previousBatch["returns"] = torch.cat([previousBatch["returns"][size:],batch["returns"]],0)
else:
previousBatch["states"] = torch.cat([previousBatch["states"],batch["states"]],0)
previousBatch["returns"] = torch.cat([previousBatch["returns"],batch["returns"]],0)
if args.value_memory_shuffle:
permutation = torch.randperm(previousBatch["returns"].size()[0])
previousBatch["states"] = previousBatch["states"][permutation]
previousBatch["returns"] = previousBatch["returns"][permutation]
def calculate_loss(reward_sum_mean,reward_sum_std,test_number = 10):
""" Calculate mean cummulative reward for test_nubmer of trials
Parameters:
reward_sum_mean (list): holds the history of the means.
reward_sum_std (list): holds the history of the std.
Returns:
list: new value appended means
list: new value appended stds
"""
rewardSum = []
for i in range(test_number):
state = env.reset()
rewardSum.append(0)
for t in range(args.episode_length):
state, reward, done, _ = env.step(policy_net.get_action(state)[0] )
state = np.transpose(state)
rewardSum[-1] += reward
if done:
break
reward_sum_mean.append(np.array(rewardSum).mean())
reward_sum_std.append(np.array(rewardSum).std())
return reward_sum_mean, reward_sum_std
def log(rewards):
""" Saves mean and std over episodes in log file
Parameters:
Returns:
"""
# TODO : add duration to log
filename = args.log_dir+"/"+ args.log_prefix \
+ "_env_" + args.env_name \
+ "_maxIter_" + str(args.max_iteration_number) \
+ "_batchSize_" + str(args.batch_size) \
+ "_gamma_" + str(args.gamma) \
+ "_lambda_" + str(args.lambda_) \
+ "_lr_" + str(args.lr) \
+ "_valOptIter_" + str(args.val_opt_iter)
if os.path.exists(filename + "_index_0.csv"):
id = 0
file = filename + "_index_" + str(id)
while os.path.exists(file + ".csv"):
id = id +1
file = filename + "_index_" + str(id)
filename = file
else:
filename = filename + "_index_0"
import csv
filename = filename+ ".csv"
pythonVersion = sys.version_info[0]
if pythonVersion == 3:
with open(filename, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(rewards)
elif pythonVersion == 2:
with open(filename, 'w', ) as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(rewards)
def main():
"""
Parameters:
Returns:
"""
signal.signal(signal.SIGINT, signal_handler)
time_start = time.time()
reward_sum_mean,reward_sum_std = [], []
previousBatch= {"states":torch.Tensor(0) ,
"returns":torch.Tensor(0)}
reward_sum_mean,reward_sum_std = calculate_loss(reward_sum_mean,reward_sum_std)
print("Initial loss \n\tloss | mean : %6.4f / std : %6.4f"%(reward_sum_mean[-1],reward_sum_std[-1]) )
for i_episode in range(args.max_iteration_number):
time_episode_start = time.time()
# reset batches
batch = {"states":[] ,
"actions":[],
"next_states":[] ,
"rewards":[],
"returns":[],
"values":[],
"advantages":[],
"mask":[]}
valueBatch = {"states" :[],
"targets" : []}
num_steps = 0
while num_steps < args.batch_size:
state = env.reset()
reward_sum = 0
states,actions,rewards,next_states,masks = [],[],[],[],[]
steps = 0
for t in range(args.episode_length):
action = policy_net.get_action(state)[0] # agent
next_state, reward, done, info = env.step(action)
next_state = np.transpose(next_state)
mask = 0 if done else 1
masks.append(mask)
states.append(state)
actions.append(action)
next_states.append(next_state)
rewards.append(reward)
state = next_state
reward_sum += reward
steps+=1
if args.render:
env.render()
if done:
break
batch["states"].append(np.expand_dims(states, axis=1) )
batch["actions"].append(actions)
batch["next_states"].append(np.expand_dims(next_states, axis=1))
batch["rewards"].append(rewards)
batch["mask"].append(masks)
num_steps += steps
prepare_data(batch,valueBatch,previousBatch)
update_policy(batch) # First policy update to avoid overfitting
update_value(valueBatch)
save_to_previousBatch(previousBatch,batch)
print("episode %d | total: %.4f "%( i_episode, time.time()-time_episode_start))
reward_sum_mean,reward_sum_std = calculate_loss(reward_sum_mean,reward_sum_std)
print("\tloss | mean : %6.4f / std : %6.4f"%(reward_sum_mean[-1],reward_sum_std[-1]) )
if args.log:
print("Data is logged in "+args.log_dir+"/")
log(reward_sum_mean)
print("Total training duration: %.4f "%(time.time()-time_start))
env.close()
if __name__ == '__main__':
main() | en | 0.57737 | # Algorithm Parameters # Value Function Learning Parameters # TODO: implement # Policy Optimization parameters # Environment parameters # Training length # Rendering # Logging # Load Signal Handler to save the networks when shutting down via ctrl+C Parameters: Returns: Get the batch data and calculate value,return and generalized advantage Detail: TODO Parameters: batch (dict of arrays of numpy) : TODO valueBatch (dict of arrays of numpy) : TODO previousBatch (dict of arrays of numpy) : TODO Returns: # TODO : more description above # TD # returns[i] = rewards[i] + args.gamma * prev_return * masks[i] # Monte Carlo Get advantage , states and action and calls trpo step Parameters: batch (dict of arrays of numpy) : TODO (batch is different than prepare_data by structure) Returns: Get valueBatch and run adam optimizer to learn value function Parameters: valueBatch (dict of arrays of numpy) : TODO Returns: # shuffle the data # XXX : Comment out for debug # if t%100==0: # print("\t%f"%loss.data) Save previous batch to use in future value optimization Details: TODO Parameters: Returns: Calculate mean cummulative reward for test_nubmer of trials Parameters: reward_sum_mean (list): holds the history of the means. reward_sum_std (list): holds the history of the std. Returns: list: new value appended means list: new value appended stds Saves mean and std over episodes in log file Parameters: Returns: # TODO : add duration to log Parameters: Returns: # reset batches # agent # First policy update to avoid overfitting | 2.054693 | 2 |
task3/task3_xgb_cv.py | meck93/intro_ml | 0 | 8760 |
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import f_classif, SelectKBest
import numpy as np
import pandas as pd
import os
mingw_path = 'C:\\Program Files\\mingw-w64\\x86_64-7.2.0-posix-sjlj-rt_v5-rev1\\mingw64\\bin'
os.environ['PATH'] = mingw_path + ';' + os.environ['PATH']
import xgboost as xgb
# Constants
FILE_PATH_TRAIN = "./input/train.h5"
FILE_PATH_TEST = "./input/test.h5"
TEST_SIZE = 0.25
# read training file
# test_data = pd.read_hdf(FILE_PATH_TRAIN, "test")
training_data = pd.read_hdf(FILE_PATH_TRAIN, "train")
# training data
# extracting the x-values
x_values_training = training_data.copy()
x_values_training = x_values_training.drop(labels=['y'], axis=1)
x_component_training = x_values_training.values
# extracting the y-values
y_component_training = training_data['y'].values
# training the scaler
scaler = StandardScaler(with_mean=True, with_std=True)
scaler = scaler.fit(x_component_training)
# scaling the training and test data
x_train_scaled = scaler.transform(x_component_training)
# feature selection
selector = SelectKBest(f_classif, k=25)
selector = selector.fit(x_train_scaled, y_component_training)
x_train_scaled_new = selector.transform(x_train_scaled)
# splitting the training set into a training & validation set
x_train, x_val, y_train, y_val = train_test_split(x_train_scaled_new, y_component_training, test_size=TEST_SIZE, random_state=42)
# training, evaluation and test data in xgboost DMatrix
xg_train = xgb.DMatrix(x_train, label=y_train)
xg_val = xgb.DMatrix(x_val, label=y_val)
# setup parameters for xgboost
params = {}
# use softmax multi-class classification
params['objective'] = 'multi:softmax'
# scale weight of positive examples
params['silent'] = 0
params['num_class'] = 5
params['tree_method'] = 'auto'
params['seed'] = 42
# number of boosting rounds
rounds = 300
# gridsearch_params = [
# (max_depth, min_child_weight)
# for max_depth in range(6,13,2)
# for min_child_weight in range(4,9,2)
# ]
# print(gridsearch_params)
# best_params = None
# min_error = float("Inf")
# for max_depth, min_child_weight in gridsearch_params:
# print("CV with max_depth={}, min_child_weight={}".format(max_depth, min_child_weight))
# # Update our parameters
# params['max_depth'] = max_depth
# params['min_child_weight'] = min_child_weight
# # Run CV
# cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# # Update best error
# mean_error = cv_results['test-merror-mean'].min()
# boost_rounds = cv_results['test-merror-mean'].argmin()
# print("\t Multiclass Error {} for {} rounds".format(mean_error, boost_rounds))
# print()
# if mean_error < min_error:
# min_error = mean_error
# best_params = (max_depth, min_child_weight)
# print("Best params: {}, {}, MAE: {}".format(best_params[0], best_params[1], min_error))
# # grid search parameters
# gridsearch_params = []
# # tree depth, gamma, learning rate, regularization lambda
# for max_tree_depth in range(6, 11, 1):
# for gamma in range(0, 13, 2):
# for learn_rate in [0.3, 0.1, 0.05]:
# for reg_lambda in [10.0, 1.0, 0.0, 0.1, 0.01]:
# gridsearch_params.append((max_tree_depth, gamma, learn_rate, reg_lambda))
# print(gridsearch_params)
gridsearch_params = [
(max_depth, gamma)
for max_depth in range(6,13,2)
for gamma in range(0,13,2)
]
print(gridsearch_params)
best_params = None
min_test_error = float("Inf")
min_train_error = float("Inf")
file = open("output.txt", mode="w+", encoding='utf-8', newline='\n')
for max_depth, gamma in gridsearch_params:
print("CV with max_depth={}, gamma={}".format(max_depth, gamma))
file.write("CV with max_depth={}, gamma={}\n".format(max_depth, gamma))
# Update our parameters
params['max_depth'] = max_depth
params['gamma'] = gamma
# Run CV
cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# Update best error
test_error = cv_results['test-merror-mean'].min()
train_error = cv_results['train-merror-mean'].min()
boost_rounds = cv_results['test-merror-mean'].argmin()
print("Multiclass Error {} for {} rounds".format(test_error, boost_rounds))
print()
file.write("Multiclass Error - Test: {} - Train: {} for {} rounds\n".format(test_error, train_error, boost_rounds))
file.write("\n")
if test_error < min_test_error:
min_test_error = test_error
min_train_error = train_error
best_params = (max_depth, gamma)
print("Best params: {}, {}, Test Error: {}, Train Error: {}".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.write("Best params: {}, {}, Test Error: {}, Train Error: {}\n".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.close()
|
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import f_classif, SelectKBest
import numpy as np
import pandas as pd
import os
mingw_path = 'C:\\Program Files\\mingw-w64\\x86_64-7.2.0-posix-sjlj-rt_v5-rev1\\mingw64\\bin'
os.environ['PATH'] = mingw_path + ';' + os.environ['PATH']
import xgboost as xgb
# Constants
FILE_PATH_TRAIN = "./input/train.h5"
FILE_PATH_TEST = "./input/test.h5"
TEST_SIZE = 0.25
# read training file
# test_data = pd.read_hdf(FILE_PATH_TRAIN, "test")
training_data = pd.read_hdf(FILE_PATH_TRAIN, "train")
# training data
# extracting the x-values
x_values_training = training_data.copy()
x_values_training = x_values_training.drop(labels=['y'], axis=1)
x_component_training = x_values_training.values
# extracting the y-values
y_component_training = training_data['y'].values
# training the scaler
scaler = StandardScaler(with_mean=True, with_std=True)
scaler = scaler.fit(x_component_training)
# scaling the training and test data
x_train_scaled = scaler.transform(x_component_training)
# feature selection
selector = SelectKBest(f_classif, k=25)
selector = selector.fit(x_train_scaled, y_component_training)
x_train_scaled_new = selector.transform(x_train_scaled)
# splitting the training set into a training & validation set
x_train, x_val, y_train, y_val = train_test_split(x_train_scaled_new, y_component_training, test_size=TEST_SIZE, random_state=42)
# training, evaluation and test data in xgboost DMatrix
xg_train = xgb.DMatrix(x_train, label=y_train)
xg_val = xgb.DMatrix(x_val, label=y_val)
# setup parameters for xgboost
params = {}
# use softmax multi-class classification
params['objective'] = 'multi:softmax'
# scale weight of positive examples
params['silent'] = 0
params['num_class'] = 5
params['tree_method'] = 'auto'
params['seed'] = 42
# number of boosting rounds
rounds = 300
# gridsearch_params = [
# (max_depth, min_child_weight)
# for max_depth in range(6,13,2)
# for min_child_weight in range(4,9,2)
# ]
# print(gridsearch_params)
# best_params = None
# min_error = float("Inf")
# for max_depth, min_child_weight in gridsearch_params:
# print("CV with max_depth={}, min_child_weight={}".format(max_depth, min_child_weight))
# # Update our parameters
# params['max_depth'] = max_depth
# params['min_child_weight'] = min_child_weight
# # Run CV
# cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# # Update best error
# mean_error = cv_results['test-merror-mean'].min()
# boost_rounds = cv_results['test-merror-mean'].argmin()
# print("\t Multiclass Error {} for {} rounds".format(mean_error, boost_rounds))
# print()
# if mean_error < min_error:
# min_error = mean_error
# best_params = (max_depth, min_child_weight)
# print("Best params: {}, {}, MAE: {}".format(best_params[0], best_params[1], min_error))
# # grid search parameters
# gridsearch_params = []
# # tree depth, gamma, learning rate, regularization lambda
# for max_tree_depth in range(6, 11, 1):
# for gamma in range(0, 13, 2):
# for learn_rate in [0.3, 0.1, 0.05]:
# for reg_lambda in [10.0, 1.0, 0.0, 0.1, 0.01]:
# gridsearch_params.append((max_tree_depth, gamma, learn_rate, reg_lambda))
# print(gridsearch_params)
gridsearch_params = [
(max_depth, gamma)
for max_depth in range(6,13,2)
for gamma in range(0,13,2)
]
print(gridsearch_params)
best_params = None
min_test_error = float("Inf")
min_train_error = float("Inf")
file = open("output.txt", mode="w+", encoding='utf-8', newline='\n')
for max_depth, gamma in gridsearch_params:
print("CV with max_depth={}, gamma={}".format(max_depth, gamma))
file.write("CV with max_depth={}, gamma={}\n".format(max_depth, gamma))
# Update our parameters
params['max_depth'] = max_depth
params['gamma'] = gamma
# Run CV
cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True)
# Update best error
test_error = cv_results['test-merror-mean'].min()
train_error = cv_results['train-merror-mean'].min()
boost_rounds = cv_results['test-merror-mean'].argmin()
print("Multiclass Error {} for {} rounds".format(test_error, boost_rounds))
print()
file.write("Multiclass Error - Test: {} - Train: {} for {} rounds\n".format(test_error, train_error, boost_rounds))
file.write("\n")
if test_error < min_test_error:
min_test_error = test_error
min_train_error = train_error
best_params = (max_depth, gamma)
print("Best params: {}, {}, Test Error: {}, Train Error: {}".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.write("Best params: {}, {}, Test Error: {}, Train Error: {}\n".format(best_params[0], best_params[1], min_test_error, min_train_error))
file.close()
| en | 0.557208 | # Constants # read training file # test_data = pd.read_hdf(FILE_PATH_TRAIN, "test") # training data # extracting the x-values # extracting the y-values # training the scaler # scaling the training and test data # feature selection # splitting the training set into a training & validation set # training, evaluation and test data in xgboost DMatrix # setup parameters for xgboost # use softmax multi-class classification # scale weight of positive examples # number of boosting rounds # gridsearch_params = [ # (max_depth, min_child_weight) # for max_depth in range(6,13,2) # for min_child_weight in range(4,9,2) # ] # print(gridsearch_params) # best_params = None # min_error = float("Inf") # for max_depth, min_child_weight in gridsearch_params: # print("CV with max_depth={}, min_child_weight={}".format(max_depth, min_child_weight)) # # Update our parameters # params['max_depth'] = max_depth # params['min_child_weight'] = min_child_weight # # Run CV # cv_results = xgb.cv(params, xg_train, num_boost_round=rounds, seed=42, nfold=5, metrics={'merror'}, early_stopping_rounds=10, verbose_eval=True) # # Update best error # mean_error = cv_results['test-merror-mean'].min() # boost_rounds = cv_results['test-merror-mean'].argmin() # print("\t Multiclass Error {} for {} rounds".format(mean_error, boost_rounds)) # print() # if mean_error < min_error: # min_error = mean_error # best_params = (max_depth, min_child_weight) # print("Best params: {}, {}, MAE: {}".format(best_params[0], best_params[1], min_error)) # # grid search parameters # gridsearch_params = [] # # tree depth, gamma, learning rate, regularization lambda # for max_tree_depth in range(6, 11, 1): # for gamma in range(0, 13, 2): # for learn_rate in [0.3, 0.1, 0.05]: # for reg_lambda in [10.0, 1.0, 0.0, 0.1, 0.01]: # gridsearch_params.append((max_tree_depth, gamma, learn_rate, reg_lambda)) # print(gridsearch_params) # Update our parameters # Run CV # Update best error | 2.840855 | 3 |
discovery-provider/src/queries/get_plays_metrics.py | atticwip/audius-protocol | 429 | 8761 | <gh_stars>100-1000
import logging
import time
from sqlalchemy import func, desc
from src.models import Play
from src.utils import db_session
logger = logging.getLogger(__name__)
def get_plays_metrics(args):
"""
Returns metrics for play counts
Args:
args: dict The parsed args from the request
args.start_time: date The start of the query
args.limit: number The max number of responses to return
args.bucket_size: string A date_trunc operation to aggregate timestamps by
Returns:
Array of dictionaries with the play counts and timestamp
"""
db = db_session.get_db_read_replica()
with db.scoped_session() as session:
return _get_plays_metrics(session, args)
def _get_plays_metrics(session, args):
metrics_query = (
session.query(
func.date_trunc(args.get("bucket_size"), Play.created_at).label(
"timestamp"
),
func.count(Play.id).label("count"),
)
.filter(Play.created_at > args.get("start_time"))
.group_by(func.date_trunc(args.get("bucket_size"), Play.created_at))
.order_by(desc("timestamp"))
.limit(args.get("limit"))
)
metrics = metrics_query.all()
metrics = [
{"timestamp": int(time.mktime(m[0].timetuple())), "count": m[1]}
for m in metrics
]
return metrics
| import logging
import time
from sqlalchemy import func, desc
from src.models import Play
from src.utils import db_session
logger = logging.getLogger(__name__)
def get_plays_metrics(args):
"""
Returns metrics for play counts
Args:
args: dict The parsed args from the request
args.start_time: date The start of the query
args.limit: number The max number of responses to return
args.bucket_size: string A date_trunc operation to aggregate timestamps by
Returns:
Array of dictionaries with the play counts and timestamp
"""
db = db_session.get_db_read_replica()
with db.scoped_session() as session:
return _get_plays_metrics(session, args)
def _get_plays_metrics(session, args):
metrics_query = (
session.query(
func.date_trunc(args.get("bucket_size"), Play.created_at).label(
"timestamp"
),
func.count(Play.id).label("count"),
)
.filter(Play.created_at > args.get("start_time"))
.group_by(func.date_trunc(args.get("bucket_size"), Play.created_at))
.order_by(desc("timestamp"))
.limit(args.get("limit"))
)
metrics = metrics_query.all()
metrics = [
{"timestamp": int(time.mktime(m[0].timetuple())), "count": m[1]}
for m in metrics
]
return metrics | en | 0.740954 | Returns metrics for play counts Args: args: dict The parsed args from the request args.start_time: date The start of the query args.limit: number The max number of responses to return args.bucket_size: string A date_trunc operation to aggregate timestamps by Returns: Array of dictionaries with the play counts and timestamp | 2.540101 | 3 |
CAutomation/settings.py | Rich9rd/CAutomation | 0 | 8762 | """
Django settings for CAutomation project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_LOGOUT_ON_GET = False
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "none"
AUTH_USER_MODEL = 'cleaning.User'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_CONFIRM_EMAIL_ON_GET = False
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
},
'USE_SESSION_AUTH': False,
'JSON_EDITOR': True,
}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-=(#vt!5x^l3-j(e*%@p0)d_p&qd2x_#&n*^i=j38@b(26zz^mr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
# Application definition
SITE_ID = 1
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'corsheaders',
'allauth',
'allauth.account',
'allauth.socialaccount',
'drf_yasg',
'rest_framework',
'rest_framework.authtoken',
'rest_auth.registration',
'rest_auth',
'common.apps.CommonConfig',
'cleaning.apps.CleaningConfig',
]
#'corsheaders',
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
#'django.middleware.common.CommonMiddleware',
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#'corsheaders.middleware.CommonMiddleware',
ROOT_URLCONF = 'CAutomation.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CAutomation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default='postgres://mzqgdpoeqiolgg:<EMAIL>:5432/d96ohaomhouuat'
),
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ALLOW_ALL_ORIGINS = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| """
Django settings for CAutomation project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_LOGOUT_ON_GET = False
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "none"
AUTH_USER_MODEL = 'cleaning.User'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_CONFIRM_EMAIL_ON_GET = False
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
},
'USE_SESSION_AUTH': False,
'JSON_EDITOR': True,
}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-=(#vt!5x^l3-j(e*%@p0)d_p&qd2x_#&n*^i=j38@b(26zz^mr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
REST_FRAMEWORK = {
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
# Application definition
SITE_ID = 1
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'corsheaders',
'allauth',
'allauth.account',
'allauth.socialaccount',
'drf_yasg',
'rest_framework',
'rest_framework.authtoken',
'rest_auth.registration',
'rest_auth',
'common.apps.CommonConfig',
'cleaning.apps.CleaningConfig',
]
#'corsheaders',
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
#'django.middleware.common.CommonMiddleware',
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#'corsheaders.middleware.CommonMiddleware',
ROOT_URLCONF = 'CAutomation.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CAutomation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default='postgres://mzqgdpoeqiolgg:<EMAIL>:5432/d96ohaomhouuat'
),
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ALLOW_ALL_ORIGINS = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| en | 0.610859 | Django settings for CAutomation project. Generated by 'django-admin startproject' using Django 3.2.4. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ # Build paths inside the project like this: BASE_DIR / 'subdir'. # Needed to login by username in Django admin, regardless of `allauth` # `allauth` specific authentication methods, such as login by e-mail # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! #vt!5x^l3-j(e*%@p0)d_p&qd2x_#&n*^i=j38@b(26zz^mr' # SECURITY WARNING: don't run with debug turned on in production! # Application definition #'corsheaders', #'django.middleware.common.CommonMiddleware', #'corsheaders.middleware.CommonMiddleware', # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field | 1.843328 | 2 |
calculators/credit_card_calculator.py | wanderindev/financial-calculator-backend | 2 | 8763 | from .calculator import Calculator
# noinspection PyTypeChecker
class CreditCardCalculator(Calculator):
def __init__(self, **kwargs):
super(CreditCardCalculator, self).__init__(**kwargs)
self.cc_debt = self.get_float(kwargs.get("cc_debt", 0))
self.add_c = self.get_float(kwargs.get("add_c", 0))
self.min_p_perc = self.get_float(kwargs.get("min_p_perc", 0))
self.min_p = self.get_float(kwargs.get("min_p", 0))
self.fix_p = self.get_float(kwargs.get("fix_p", 0))
self.payments = []
self.payments_p = []
def get_payment_cc(self) -> float:
_rate = self.rate / (100 * self.freq)
_min_p_perc = self.min_p_perc / 100
_min_p = self.min_p
_fix_p = self.fix_p
b = self.cc_debt
per = 0
while b > 0:
i = b * _rate
p = max(b * _min_p_perc, _min_p, _fix_p)
if b + i < p:
p = b + i
b += i - p
per += 1
self.periods.append(per)
self.payments.append(p)
self.payments_p.append(p - i)
self.interests.append(i)
self.balances.append(b)
return self.payments[0]
def get_rate_cc(self) -> float:
return self.rate + self.add_c * 1200 / self.cc_debt
| from .calculator import Calculator
# noinspection PyTypeChecker
class CreditCardCalculator(Calculator):
def __init__(self, **kwargs):
super(CreditCardCalculator, self).__init__(**kwargs)
self.cc_debt = self.get_float(kwargs.get("cc_debt", 0))
self.add_c = self.get_float(kwargs.get("add_c", 0))
self.min_p_perc = self.get_float(kwargs.get("min_p_perc", 0))
self.min_p = self.get_float(kwargs.get("min_p", 0))
self.fix_p = self.get_float(kwargs.get("fix_p", 0))
self.payments = []
self.payments_p = []
def get_payment_cc(self) -> float:
_rate = self.rate / (100 * self.freq)
_min_p_perc = self.min_p_perc / 100
_min_p = self.min_p
_fix_p = self.fix_p
b = self.cc_debt
per = 0
while b > 0:
i = b * _rate
p = max(b * _min_p_perc, _min_p, _fix_p)
if b + i < p:
p = b + i
b += i - p
per += 1
self.periods.append(per)
self.payments.append(p)
self.payments_p.append(p - i)
self.interests.append(i)
self.balances.append(b)
return self.payments[0]
def get_rate_cc(self) -> float:
return self.rate + self.add_c * 1200 / self.cc_debt
| en | 0.214864 | # noinspection PyTypeChecker | 3.12905 | 3 |
setup.py | phaustin/MyST-Parser | 0 | 8764 | """myst-parser package setup."""
from importlib import import_module
from setuptools import find_packages, setup
setup(
name="myst-parser",
version=import_module("myst_parser").__version__,
description=(
"An extended commonmark compliant parser, " "with bridges to docutils & sphinx."
),
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/executablebooks/MyST-Parser",
project_urls={"Documentation": "https://myst-parser.readthedocs.io"},
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(),
entry_points={
"console_scripts": ["myst-benchmark = myst_parser.cli.benchmark:main"]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
"Framework :: Sphinx :: Extension",
],
keywords="markdown lexer parser development docutils sphinx",
python_requires=">=3.6",
install_requires=["markdown-it-py~=0.4.5"],
extras_require={
"sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"],
"code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"],
"testing": [
"coverage",
"pytest>=3.6,<4",
"pytest-cov",
"pytest-regressions",
"beautifulsoup4",
],
"rtd": ["sphinxcontrib-bibtex", "ipython", "sphinx-book-theme", "sphinx_tabs"],
},
zip_safe=True,
)
| """myst-parser package setup."""
from importlib import import_module
from setuptools import find_packages, setup
setup(
name="myst-parser",
version=import_module("myst_parser").__version__,
description=(
"An extended commonmark compliant parser, " "with bridges to docutils & sphinx."
),
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/executablebooks/MyST-Parser",
project_urls={"Documentation": "https://myst-parser.readthedocs.io"},
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(),
entry_points={
"console_scripts": ["myst-benchmark = myst_parser.cli.benchmark:main"]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
"Framework :: Sphinx :: Extension",
],
keywords="markdown lexer parser development docutils sphinx",
python_requires=">=3.6",
install_requires=["markdown-it-py~=0.4.5"],
extras_require={
"sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"],
"code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"],
"testing": [
"coverage",
"pytest>=3.6,<4",
"pytest-cov",
"pytest-regressions",
"beautifulsoup4",
],
"rtd": ["sphinxcontrib-bibtex", "ipython", "sphinx-book-theme", "sphinx_tabs"],
},
zip_safe=True,
)
| en | 0.387349 | myst-parser package setup. | 1.600544 | 2 |
python/tests/extractor/refmt.py | kho/cdec | 114 | 8765 | #!/usr/bin/env python
import collections, sys
lines = []
f = collections.defaultdict(int)
fe = collections.defaultdict(lambda: collections.defaultdict(int))
for line in sys.stdin:
tok = [x.strip() for x in line.split('|||')]
count = int(tok[4])
f[tok[1]] += count
fe[tok[1]][tok[2]] += count
lines.append(tok)
for tok in lines:
feat = 'IsSingletonF={0}.0 IsSingletonFE={1}.0'.format(
0 if f[tok[1]] > 1 else 1,
0 if fe[tok[1]][tok[2]] > 1 else 1)
print ' ||| '.join((tok[0], tok[1], tok[2], feat, tok[3]))
| #!/usr/bin/env python
import collections, sys
lines = []
f = collections.defaultdict(int)
fe = collections.defaultdict(lambda: collections.defaultdict(int))
for line in sys.stdin:
tok = [x.strip() for x in line.split('|||')]
count = int(tok[4])
f[tok[1]] += count
fe[tok[1]][tok[2]] += count
lines.append(tok)
for tok in lines:
feat = 'IsSingletonF={0}.0 IsSingletonFE={1}.0'.format(
0 if f[tok[1]] > 1 else 1,
0 if fe[tok[1]][tok[2]] > 1 else 1)
print ' ||| '.join((tok[0], tok[1], tok[2], feat, tok[3]))
| ru | 0.26433 | #!/usr/bin/env python | 2.6582 | 3 |
blog/models.py | tomitokko/django-blog-with-astradb | 3 | 8766 | from django.db import models
import uuid
from datetime import datetime
from cassandra.cqlengine import columns
from django_cassandra_engine.models import DjangoCassandraModel
# Create your models here.
class PostModel(DjangoCassandraModel):
id = columns.UUID(primary_key=True, default=uuid.uuid4)
title = columns.Text(required=True)
body = columns.Text(required=True)
created_at = columns.DateTime(default=datetime.now) | from django.db import models
import uuid
from datetime import datetime
from cassandra.cqlengine import columns
from django_cassandra_engine.models import DjangoCassandraModel
# Create your models here.
class PostModel(DjangoCassandraModel):
id = columns.UUID(primary_key=True, default=uuid.uuid4)
title = columns.Text(required=True)
body = columns.Text(required=True)
created_at = columns.DateTime(default=datetime.now) | en | 0.963489 | # Create your models here. | 2.496449 | 2 |
fedex/services/availability_commitment_service.py | miczone/python-fedex | 0 | 8767 | <filename>fedex/services/availability_commitment_service.py<gh_stars>0
"""
Service Availability and Commitment Module
This package contains the shipping methods defined by Fedex's
ValidationAvailabilityAndCommitmentService WSDL file. Each is encapsulated in a class for
easy access. For more details on each, refer to the respective class's
documentation.
"""
import datetime
from ..base_service import FedexBaseService
class FedexAvailabilityCommitmentRequest(FedexBaseService):
"""
This class allows you validate service availability
"""
def __init__(self, config_obj, *args, **kwargs):
"""
@type config_obj: L{FedexConfig}
@param config_obj: A valid FedexConfig object.
"""
self._config_obj = config_obj
# Holds version info for the VersionId SOAP object.
self._version_info = {
'service_id': 'vacs',
'major': '14',
'intermediate': '0',
'minor': '0'
}
self.CarrierCode = None
"""@ivar: Carrier Code Default to Fedex (FDXE), or can bbe FDXG."""
self.Origin = None
"""@ivar: Holds Origin Address WSDL object."""
self.Destination = None
"""@ivar: Holds Destination Address WSDL object."""
self.ShipDate = None
"""@ivar: Ship Date date WSDL object."""
self.Service = None
"""@ivar: Service type, if set to None will get all available service information."""
self.Packaging = None
"""@ivar: Type of packaging to narrow down available shipping options or defaults to YOUR_PACKAGING."""
# Call the parent FedexBaseService class for basic setup work.
# Shortened the name of the wsdl, otherwise suds did not load it properly.
# Suds throws the following error when using the long file name from FedEx:
#
# File "/Library/Python/2.7/site-packages/suds/wsdl.py", line 878, in resolve
# raise Exception("binding '%s', not-found" % p.binding)
# Exception: binding 'ns:ValidationAvailabilityAndCommitmentServiceSoapBinding', not-found
super(FedexAvailabilityCommitmentRequest, self).__init__(
self._config_obj, 'ValidationAvailabilityAndCommitmentService_v14.wsdl', *args, **kwargs)
def _prepare_wsdl_objects(self):
"""
Create the data structure and get it ready for the WSDL request.
"""
self.CarrierCode = 'FDXE'
self.Origin = self.client.factory.create('Address')
self.Destination = self.client.factory.create('Address')
self.ShipDate = datetime.date.today().isoformat()
self.Service = None
self.Packaging = 'YOUR_PACKAGING'
def _assemble_and_send_request(self):
"""
Fires off the Fedex request.
@warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(),
WHICH RESIDES ON FedexBaseService AND IS INHERITED.
"""
# We get an exception like this when specifying an IntegratorId:
# suds.TypeNotFound: Type not found: 'IntegratorId'
# Setting it to None does not seem to appease it.
del self.ClientDetail.IntegratorId
self.logger.debug(self.WebAuthenticationDetail)
self.logger.debug(self.ClientDetail)
self.logger.debug(self.TransactionDetail)
self.logger.debug(self.VersionId)
# Fire off the query.
return self.client.service.serviceAvailability(
WebAuthenticationDetail=self.WebAuthenticationDetail,
ClientDetail=self.ClientDetail,
TransactionDetail=self.TransactionDetail,
Version=self.VersionId,
Origin=self.Origin,
Destination=self.Destination,
ShipDate=self.ShipDate,
CarrierCode=self.CarrierCode,
Service=self.Service,
Packaging=self.Packaging)
| <filename>fedex/services/availability_commitment_service.py<gh_stars>0
"""
Service Availability and Commitment Module
This package contains the shipping methods defined by Fedex's
ValidationAvailabilityAndCommitmentService WSDL file. Each is encapsulated in a class for
easy access. For more details on each, refer to the respective class's
documentation.
"""
import datetime
from ..base_service import FedexBaseService
class FedexAvailabilityCommitmentRequest(FedexBaseService):
"""
This class allows you validate service availability
"""
def __init__(self, config_obj, *args, **kwargs):
"""
@type config_obj: L{FedexConfig}
@param config_obj: A valid FedexConfig object.
"""
self._config_obj = config_obj
# Holds version info for the VersionId SOAP object.
self._version_info = {
'service_id': 'vacs',
'major': '14',
'intermediate': '0',
'minor': '0'
}
self.CarrierCode = None
"""@ivar: Carrier Code Default to Fedex (FDXE), or can bbe FDXG."""
self.Origin = None
"""@ivar: Holds Origin Address WSDL object."""
self.Destination = None
"""@ivar: Holds Destination Address WSDL object."""
self.ShipDate = None
"""@ivar: Ship Date date WSDL object."""
self.Service = None
"""@ivar: Service type, if set to None will get all available service information."""
self.Packaging = None
"""@ivar: Type of packaging to narrow down available shipping options or defaults to YOUR_PACKAGING."""
# Call the parent FedexBaseService class for basic setup work.
# Shortened the name of the wsdl, otherwise suds did not load it properly.
# Suds throws the following error when using the long file name from FedEx:
#
# File "/Library/Python/2.7/site-packages/suds/wsdl.py", line 878, in resolve
# raise Exception("binding '%s', not-found" % p.binding)
# Exception: binding 'ns:ValidationAvailabilityAndCommitmentServiceSoapBinding', not-found
super(FedexAvailabilityCommitmentRequest, self).__init__(
self._config_obj, 'ValidationAvailabilityAndCommitmentService_v14.wsdl', *args, **kwargs)
def _prepare_wsdl_objects(self):
"""
Create the data structure and get it ready for the WSDL request.
"""
self.CarrierCode = 'FDXE'
self.Origin = self.client.factory.create('Address')
self.Destination = self.client.factory.create('Address')
self.ShipDate = datetime.date.today().isoformat()
self.Service = None
self.Packaging = 'YOUR_PACKAGING'
def _assemble_and_send_request(self):
"""
Fires off the Fedex request.
@warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(),
WHICH RESIDES ON FedexBaseService AND IS INHERITED.
"""
# We get an exception like this when specifying an IntegratorId:
# suds.TypeNotFound: Type not found: 'IntegratorId'
# Setting it to None does not seem to appease it.
del self.ClientDetail.IntegratorId
self.logger.debug(self.WebAuthenticationDetail)
self.logger.debug(self.ClientDetail)
self.logger.debug(self.TransactionDetail)
self.logger.debug(self.VersionId)
# Fire off the query.
return self.client.service.serviceAvailability(
WebAuthenticationDetail=self.WebAuthenticationDetail,
ClientDetail=self.ClientDetail,
TransactionDetail=self.TransactionDetail,
Version=self.VersionId,
Origin=self.Origin,
Destination=self.Destination,
ShipDate=self.ShipDate,
CarrierCode=self.CarrierCode,
Service=self.Service,
Packaging=self.Packaging)
| en | 0.735403 | Service Availability and Commitment Module This package contains the shipping methods defined by Fedex's ValidationAvailabilityAndCommitmentService WSDL file. Each is encapsulated in a class for easy access. For more details on each, refer to the respective class's documentation. This class allows you validate service availability @type config_obj: L{FedexConfig} @param config_obj: A valid FedexConfig object. # Holds version info for the VersionId SOAP object. @ivar: Carrier Code Default to Fedex (FDXE), or can bbe FDXG. @ivar: Holds Origin Address WSDL object. @ivar: Holds Destination Address WSDL object. @ivar: Ship Date date WSDL object. @ivar: Service type, if set to None will get all available service information. @ivar: Type of packaging to narrow down available shipping options or defaults to YOUR_PACKAGING. # Call the parent FedexBaseService class for basic setup work. # Shortened the name of the wsdl, otherwise suds did not load it properly. # Suds throws the following error when using the long file name from FedEx: # # File "/Library/Python/2.7/site-packages/suds/wsdl.py", line 878, in resolve # raise Exception("binding '%s', not-found" % p.binding) # Exception: binding 'ns:ValidationAvailabilityAndCommitmentServiceSoapBinding', not-found Create the data structure and get it ready for the WSDL request. Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. # We get an exception like this when specifying an IntegratorId: # suds.TypeNotFound: Type not found: 'IntegratorId' # Setting it to None does not seem to appease it. # Fire off the query. | 2.692755 | 3 |
xverse/transformer/_woe.py | gb-andreygsouza/XuniVerse | 0 | 8768 | import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
import scipy.stats.stats as stats
import pandas.core.algorithms as algos
#from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array
from ..transformer import MonotonicBinning
pd.options.mode.chained_assignment = None
class WOE(BaseEstimator, TransformerMixin):
"""Weight of evidence transformation for categorical variables. For numeric variables,
monotonic operation is provided as default with this package.
Parameters
----------
feature_names: 'all' or list (default='all')
list of features to perform WOE transformation.
- 'all' (default): All categorical features in the dataset will be used
- list of features: ['age', 'income',......]
exclude_features: list (default=None)
list of features to be excluded from WOE transformation.
- Example - ['age', 'income', .......]
woe_prefix: string (default=None)
Variable prefix to be used for the column created by WOE transformer. The default value is set 'None'.
treat_missing: {'separate', 'mode', 'least_frequent'} (default='separate')
This parameter setting is used to handle missing values in the dataset.
'separate' - Missing values are treated as a own group (category)
'mode' - Missing values are combined with the highest frequent item in the dataset
'least_frequent' - Missing values are combined with the least frequent item in the dataset
woe_bins: dict of dicts(default=None)
This feature is added as part of future WOE transformations or scoring. If this value is set,
then WOE values provided for each of the features here will be used for transformation.
Applicable only in the transform method.
Dictionary structure - {'feature_name': float list}
Example - {'education': {'primary' : 0.1, 'tertiary' : 0.5, 'secondary', 0.7}}
monotonic_binning: bool (default=True)
This parameter is used to perform monotonic binning on numeric variables. If set to False,
numeric variables would be ignored.
mono_feature_names: 'all' or list (default='all')
list of features to perform monotonic binning operation.
- 'all' (default): All features in the dataset will be used
- list of features: ['age', 'income',......]
mono_max_bins: int (default=20)
Maximum number of bins that can be created for any given variable. The final number of bins
created will be less than or equal to this number.
mono_force_bins: int (default=3)
It forces the module to create bins for a variable, when it cannot find monotonic relationship
using "max_bins" option. The final number of bins created will be equal to the number specified.
mono_cardinality_cutoff: int (default=5)
Cutoff to determine if a variable is eligible for monotonic binning operation. Any variable
which has unique levels less than this number will be treated as character variables.
At this point no binning operation will be performed on the variable and it will return the
unique levels as bins for these variable.
mono_prefix: string (default=None)
Variable prefix to be used for the column created by monotonic binning.
mono_custom_binning: dict (default=None)
Using this parameter, the user can perform custom binning on variables. This parameter is also
used to apply previously computed bins for each feature (Score new data).
Dictionary structure - {'feature_name': float list}
Example - {'age': [0., 1., 2., 3.]}
"""
# Initialize the parameters for the function
def __init__(self, feature_names='all', exclude_features=None, woe_prefix=None,
treat_missing='separate', woe_bins=None, monotonic_binning=True,
mono_feature_names='all', mono_max_bins=20, mono_force_bins=3,
mono_cardinality_cutoff=5, mono_prefix=None, mono_custom_binning=None):
self.feature_names = feature_names
self.exclude_features = exclude_features
self.woe_prefix = woe_prefix
self.treat_missing = treat_missing
self.woe_bins = woe_bins #only used for future transformations
#these features below are for monotonic operations on numeric variables.
#It uses MonotonicBinning class from binning package.
self.monotonic_binning = monotonic_binning
self.mono_feature_names = mono_feature_names
self.mono_max_bins = mono_max_bins
self.mono_force_bins = mono_force_bins
self.mono_cardinality_cutoff = mono_cardinality_cutoff
self.mono_prefix = mono_prefix
self.mono_custom_binning = mono_custom_binning #only used for monotonic transformations
# check input data type - Only Pandas Dataframe allowed
def check_datatype(self, X):
if not isinstance(X, pd.DataFrame):
raise ValueError("The input data must be pandas dataframe. But the input provided is " + str(type(X)))
return self
# the fit function for WOE transformer
def fit(self, X, y):
#if the function is used as part of pipeline, then try to unpack tuple values
#produced in the previous step. Added as a part of pipeline feature.
try:
X, y = X
except:
pass
#check datatype of X
self.check_datatype(X)
#The length of X and Y should be equal
if X.shape[0] != y.shape[0]:
raise ValueError("Mismatch in input lengths. Length of X is " + str(X.shape[0]) + " \
but length of y is " + str(y.shape[0]) + ".")
# The label must be binary with values {0,1}
unique = np.unique(y)
if len(unique) != 2:
raise ValueError("The target column y must be binary. But the target contains " + str(len(unique)) + \
" unique value(s).")
#apply monotonic binning operation
if self.monotonic_binning:
self.mono_bin_clf = MonotonicBinning(feature_names=self.mono_feature_names,
max_bins=self.mono_max_bins, force_bins=self.mono_force_bins,
cardinality_cutoff=self.mono_cardinality_cutoff,
prefix=self.mono_prefix, custom_binning=self.mono_custom_binning)
if self.mono_custom_binning:
X = self.mono_bin_clf.transform(X)
self.mono_custom_binning = self.mono_bin_clf.bins
else:
X = self.mono_bin_clf.fit_transform(X, y)
self.mono_custom_binning = self.mono_bin_clf.bins
#identify the variables to tranform and assign the bin mapping dictionary
self.woe_bins = {} #bin mapping
if not self.mono_custom_binning:
self.mono_custom_binning= {}
else:
for i in self.mono_custom_binning:
X[i] = X[i].astype('object')
numerical_features = list(X._get_numeric_data().columns)
categorical_features = list(X.columns.difference(numerical_features))
#Identifying the features to perform fit
if self.feature_names == 'all':
self.transform_features = categorical_features
else:
self.transform_features = list(set(self.feature_names))
#Exclude variables provided in the exclusion list
if self.exclude_features:
self.transform_features = list(set(self.transform_features) - set(self.exclude_features))
temp_X = X[self.transform_features] #subset data only on features to fit
temp_X = temp_X.astype('object') #convert categorical columns to object columns
temp_X = self.treat_missing_values(temp_X) #treat missing values function
#apply the WOE train function on dataset
temp_X.apply(lambda x: self.train(x, y), axis=0)
#provide Information value for each variable as a separate dataset
self.iv_df = pd.DataFrame({'Information_Value':self.woe_df.groupby('Variable_Name').Information_Value.max()})
self.iv_df = self.iv_df.reset_index()
self.iv_df = self.iv_df.sort_values('Information_Value', ascending=False)
return self
#treat missing values based on the 'treat_missing' option provided by user
def treat_missing_values(self, X):
"""
treat_missing: {'separate', 'mode', 'least_frequent'} (default='separate')
This parameter setting is used to handle missing values in the dataset.
'separate' - Missing values are treated as a own group (category)
'mode' - Missing values are combined with the highest frequent item in the dataset
'least_frequent' - Missing values are combined with the least frequent item in the dataset
"""
if self.treat_missing == 'separate':
X = X.fillna('NA')
elif self.treat_missing == 'mode':
X = X.fillna(X.mode().iloc[0])
elif self.treat_missing == 'least_frequent':
for i in X:
X[i] = X[i].fillna(X[i].value_counts().index[-1])
else:
raise ValueError("Missing values could be treated with one of these three options - \
'separate', 'mode', 'least_frequent'. \
The provided option is - " + str(self.treat_missing))
return X
#WOE binning - The function is applied on each columns identified in the fit function.
#Here, the input X is a Pandas Series type.
def train(self, X, y):
# Assign values
woe_mapping = {} #dictionary mapping for the current feature
temp_woe = pd.DataFrame({},index=[])
temp_df = pd.DataFrame({'X': X, "Y":y})
grouped_df = temp_df.groupby('X', as_index=True)
#calculate stats for variable and store it in temp_woe
target_sum = grouped_df.Y.sum()
temp_woe['Count'] = grouped_df.Y.count()
temp_woe['Category'] = target_sum.index
temp_woe['Event'] = target_sum
temp_woe['Non_Event'] = temp_woe['Count'] - temp_woe['Event']
temp_woe['Event_Rate'] = temp_woe['Event']/temp_woe['Count']
temp_woe['Non_Event_Rate'] = temp_woe['Non_Event']/temp_woe['Count']
#calculate distributions and woe
total_event = temp_woe['Event'].sum()
total_non_event = temp_woe['Non_Event'].sum()
temp_woe['Event_Distribution'] = temp_woe['Event']/total_event
temp_woe['Non_Event_Distribution'] = temp_woe['Non_Event']/total_non_event
temp_woe['WOE'] = np.log(temp_woe['Event_Distribution']/temp_woe['Non_Event_Distribution'])
temp_woe['Information_Value'] = (temp_woe['Event_Distribution']- \
temp_woe['Non_Event_Distribution'])*temp_woe['WOE']
temp_woe['Variable_Name'] = X.name
temp_woe = temp_woe[['Variable_Name', 'Category', 'Count', 'Event', 'Non_Event', \
'Event_Rate', 'Non_Event_Rate', 'Event_Distribution', 'Non_Event_Distribution', \
'WOE', 'Information_Value']]
temp_woe = temp_woe.replace([np.inf, -np.inf], 0)
temp_woe['Information_Value'] = temp_woe['Information_Value'].sum()
temp_woe = temp_woe.reset_index(drop=True)
woe_mapping[str(X.name)] = dict(zip(temp_woe['Category'], temp_woe['WOE']))
#assign computed values to class variables
try:
self.woe_df = self.woe_df.append(temp_woe, ignore_index=True)
self.woe_bins.update(woe_mapping)
except:
self.woe_df = temp_woe
self.woe_bins = woe_mapping
return self
#Transform new data or existing data based on the fit identified or custom transformation provided by user
def transform(self, X, y=None):
#if the function is used as part of pipeline, then try to unpack tuple values
#produced in the previous step. Added as a part of pipeline feature.
try:
X, y = X
except:
pass
self.check_datatype(X) #check input datatype.
outX = X.copy(deep=True)
#identify the features on which the transformation should be performed
try:
if self.transform_features:
transform_features = self.transform_features
except:
if self.woe_bins:
transform_features = list(self.woe_bins.keys())
else:
raise ValueError("Estimator has to be fitted to make WOE transformations")
#final list of features to be transformed
transform_features = list(set(transform_features) & set(outX.columns))
#raise error if the list is empty
if not transform_features:
raise ValueError("Empty list for WOE transformation. \
Estimator has to be fitted to make WOE transformations")
#use the custom bins provided by user for numeric variables
if self.mono_custom_binning:
try:
if self.mono_bin_clf:
pass
except:
self.mono_bin_clf = MonotonicBinning(feature_names=self.mono_feature_names,
max_bins=self.mono_max_bins, force_bins=self.mono_force_bins,
cardinality_cutoff=self.mono_cardinality_cutoff,
prefix=self.mono_prefix, custom_binning=self.mono_custom_binning)
outX = self.mono_bin_clf.transform(outX)
outX = outX.astype('object') #convert categorical columns to object columns
outX = self.treat_missing_values(outX) #treat missing values function
#iterate through the dataframe and apply the bins
for i in transform_features:
tempX = outX[i] #pandas Series
original_column_name = str(i)
#create the column name based on user provided prefix
if self.woe_prefix:
new_column_name = str(self.woe_prefix) + '_' + str(i)
else:
new_column_name = original_column_name
#check if the bin mapping is present
#check_is_fitted(self, 'woe_bins')
if not self.woe_bins:
raise ValueError("woe_bins variable is not present. \
Estimator has to be fitted to apply transformations.")
outX[new_column_name] = tempX.replace(self.woe_bins[original_column_name])
#transformed dataframe
return outX
#Method that describes what we need this transformer to do
def fit_transform(self, X, y):
return self.fit(X, y).transform(X)
| import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
import scipy.stats.stats as stats
import pandas.core.algorithms as algos
#from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array
from ..transformer import MonotonicBinning
pd.options.mode.chained_assignment = None
class WOE(BaseEstimator, TransformerMixin):
"""Weight of evidence transformation for categorical variables. For numeric variables,
monotonic operation is provided as default with this package.
Parameters
----------
feature_names: 'all' or list (default='all')
list of features to perform WOE transformation.
- 'all' (default): All categorical features in the dataset will be used
- list of features: ['age', 'income',......]
exclude_features: list (default=None)
list of features to be excluded from WOE transformation.
- Example - ['age', 'income', .......]
woe_prefix: string (default=None)
Variable prefix to be used for the column created by WOE transformer. The default value is set 'None'.
treat_missing: {'separate', 'mode', 'least_frequent'} (default='separate')
This parameter setting is used to handle missing values in the dataset.
'separate' - Missing values are treated as a own group (category)
'mode' - Missing values are combined with the highest frequent item in the dataset
'least_frequent' - Missing values are combined with the least frequent item in the dataset
woe_bins: dict of dicts(default=None)
This feature is added as part of future WOE transformations or scoring. If this value is set,
then WOE values provided for each of the features here will be used for transformation.
Applicable only in the transform method.
Dictionary structure - {'feature_name': float list}
Example - {'education': {'primary' : 0.1, 'tertiary' : 0.5, 'secondary', 0.7}}
monotonic_binning: bool (default=True)
This parameter is used to perform monotonic binning on numeric variables. If set to False,
numeric variables would be ignored.
mono_feature_names: 'all' or list (default='all')
list of features to perform monotonic binning operation.
- 'all' (default): All features in the dataset will be used
- list of features: ['age', 'income',......]
mono_max_bins: int (default=20)
Maximum number of bins that can be created for any given variable. The final number of bins
created will be less than or equal to this number.
mono_force_bins: int (default=3)
It forces the module to create bins for a variable, when it cannot find monotonic relationship
using "max_bins" option. The final number of bins created will be equal to the number specified.
mono_cardinality_cutoff: int (default=5)
Cutoff to determine if a variable is eligible for monotonic binning operation. Any variable
which has unique levels less than this number will be treated as character variables.
At this point no binning operation will be performed on the variable and it will return the
unique levels as bins for these variable.
mono_prefix: string (default=None)
Variable prefix to be used for the column created by monotonic binning.
mono_custom_binning: dict (default=None)
Using this parameter, the user can perform custom binning on variables. This parameter is also
used to apply previously computed bins for each feature (Score new data).
Dictionary structure - {'feature_name': float list}
Example - {'age': [0., 1., 2., 3.]}
"""
# Initialize the parameters for the function
def __init__(self, feature_names='all', exclude_features=None, woe_prefix=None,
treat_missing='separate', woe_bins=None, monotonic_binning=True,
mono_feature_names='all', mono_max_bins=20, mono_force_bins=3,
mono_cardinality_cutoff=5, mono_prefix=None, mono_custom_binning=None):
self.feature_names = feature_names
self.exclude_features = exclude_features
self.woe_prefix = woe_prefix
self.treat_missing = treat_missing
self.woe_bins = woe_bins #only used for future transformations
#these features below are for monotonic operations on numeric variables.
#It uses MonotonicBinning class from binning package.
self.monotonic_binning = monotonic_binning
self.mono_feature_names = mono_feature_names
self.mono_max_bins = mono_max_bins
self.mono_force_bins = mono_force_bins
self.mono_cardinality_cutoff = mono_cardinality_cutoff
self.mono_prefix = mono_prefix
self.mono_custom_binning = mono_custom_binning #only used for monotonic transformations
# check input data type - Only Pandas Dataframe allowed
def check_datatype(self, X):
if not isinstance(X, pd.DataFrame):
raise ValueError("The input data must be pandas dataframe. But the input provided is " + str(type(X)))
return self
# the fit function for WOE transformer
def fit(self, X, y):
#if the function is used as part of pipeline, then try to unpack tuple values
#produced in the previous step. Added as a part of pipeline feature.
try:
X, y = X
except:
pass
#check datatype of X
self.check_datatype(X)
#The length of X and Y should be equal
if X.shape[0] != y.shape[0]:
raise ValueError("Mismatch in input lengths. Length of X is " + str(X.shape[0]) + " \
but length of y is " + str(y.shape[0]) + ".")
# The label must be binary with values {0,1}
unique = np.unique(y)
if len(unique) != 2:
raise ValueError("The target column y must be binary. But the target contains " + str(len(unique)) + \
" unique value(s).")
#apply monotonic binning operation
if self.monotonic_binning:
self.mono_bin_clf = MonotonicBinning(feature_names=self.mono_feature_names,
max_bins=self.mono_max_bins, force_bins=self.mono_force_bins,
cardinality_cutoff=self.mono_cardinality_cutoff,
prefix=self.mono_prefix, custom_binning=self.mono_custom_binning)
if self.mono_custom_binning:
X = self.mono_bin_clf.transform(X)
self.mono_custom_binning = self.mono_bin_clf.bins
else:
X = self.mono_bin_clf.fit_transform(X, y)
self.mono_custom_binning = self.mono_bin_clf.bins
#identify the variables to tranform and assign the bin mapping dictionary
self.woe_bins = {} #bin mapping
if not self.mono_custom_binning:
self.mono_custom_binning= {}
else:
for i in self.mono_custom_binning:
X[i] = X[i].astype('object')
numerical_features = list(X._get_numeric_data().columns)
categorical_features = list(X.columns.difference(numerical_features))
#Identifying the features to perform fit
if self.feature_names == 'all':
self.transform_features = categorical_features
else:
self.transform_features = list(set(self.feature_names))
#Exclude variables provided in the exclusion list
if self.exclude_features:
self.transform_features = list(set(self.transform_features) - set(self.exclude_features))
temp_X = X[self.transform_features] #subset data only on features to fit
temp_X = temp_X.astype('object') #convert categorical columns to object columns
temp_X = self.treat_missing_values(temp_X) #treat missing values function
#apply the WOE train function on dataset
temp_X.apply(lambda x: self.train(x, y), axis=0)
#provide Information value for each variable as a separate dataset
self.iv_df = pd.DataFrame({'Information_Value':self.woe_df.groupby('Variable_Name').Information_Value.max()})
self.iv_df = self.iv_df.reset_index()
self.iv_df = self.iv_df.sort_values('Information_Value', ascending=False)
return self
#treat missing values based on the 'treat_missing' option provided by user
def treat_missing_values(self, X):
"""
treat_missing: {'separate', 'mode', 'least_frequent'} (default='separate')
This parameter setting is used to handle missing values in the dataset.
'separate' - Missing values are treated as a own group (category)
'mode' - Missing values are combined with the highest frequent item in the dataset
'least_frequent' - Missing values are combined with the least frequent item in the dataset
"""
if self.treat_missing == 'separate':
X = X.fillna('NA')
elif self.treat_missing == 'mode':
X = X.fillna(X.mode().iloc[0])
elif self.treat_missing == 'least_frequent':
for i in X:
X[i] = X[i].fillna(X[i].value_counts().index[-1])
else:
raise ValueError("Missing values could be treated with one of these three options - \
'separate', 'mode', 'least_frequent'. \
The provided option is - " + str(self.treat_missing))
return X
#WOE binning - The function is applied on each columns identified in the fit function.
#Here, the input X is a Pandas Series type.
def train(self, X, y):
# Assign values
woe_mapping = {} #dictionary mapping for the current feature
temp_woe = pd.DataFrame({},index=[])
temp_df = pd.DataFrame({'X': X, "Y":y})
grouped_df = temp_df.groupby('X', as_index=True)
#calculate stats for variable and store it in temp_woe
target_sum = grouped_df.Y.sum()
temp_woe['Count'] = grouped_df.Y.count()
temp_woe['Category'] = target_sum.index
temp_woe['Event'] = target_sum
temp_woe['Non_Event'] = temp_woe['Count'] - temp_woe['Event']
temp_woe['Event_Rate'] = temp_woe['Event']/temp_woe['Count']
temp_woe['Non_Event_Rate'] = temp_woe['Non_Event']/temp_woe['Count']
#calculate distributions and woe
total_event = temp_woe['Event'].sum()
total_non_event = temp_woe['Non_Event'].sum()
temp_woe['Event_Distribution'] = temp_woe['Event']/total_event
temp_woe['Non_Event_Distribution'] = temp_woe['Non_Event']/total_non_event
temp_woe['WOE'] = np.log(temp_woe['Event_Distribution']/temp_woe['Non_Event_Distribution'])
temp_woe['Information_Value'] = (temp_woe['Event_Distribution']- \
temp_woe['Non_Event_Distribution'])*temp_woe['WOE']
temp_woe['Variable_Name'] = X.name
temp_woe = temp_woe[['Variable_Name', 'Category', 'Count', 'Event', 'Non_Event', \
'Event_Rate', 'Non_Event_Rate', 'Event_Distribution', 'Non_Event_Distribution', \
'WOE', 'Information_Value']]
temp_woe = temp_woe.replace([np.inf, -np.inf], 0)
temp_woe['Information_Value'] = temp_woe['Information_Value'].sum()
temp_woe = temp_woe.reset_index(drop=True)
woe_mapping[str(X.name)] = dict(zip(temp_woe['Category'], temp_woe['WOE']))
#assign computed values to class variables
try:
self.woe_df = self.woe_df.append(temp_woe, ignore_index=True)
self.woe_bins.update(woe_mapping)
except:
self.woe_df = temp_woe
self.woe_bins = woe_mapping
return self
#Transform new data or existing data based on the fit identified or custom transformation provided by user
def transform(self, X, y=None):
#if the function is used as part of pipeline, then try to unpack tuple values
#produced in the previous step. Added as a part of pipeline feature.
try:
X, y = X
except:
pass
self.check_datatype(X) #check input datatype.
outX = X.copy(deep=True)
#identify the features on which the transformation should be performed
try:
if self.transform_features:
transform_features = self.transform_features
except:
if self.woe_bins:
transform_features = list(self.woe_bins.keys())
else:
raise ValueError("Estimator has to be fitted to make WOE transformations")
#final list of features to be transformed
transform_features = list(set(transform_features) & set(outX.columns))
#raise error if the list is empty
if not transform_features:
raise ValueError("Empty list for WOE transformation. \
Estimator has to be fitted to make WOE transformations")
#use the custom bins provided by user for numeric variables
if self.mono_custom_binning:
try:
if self.mono_bin_clf:
pass
except:
self.mono_bin_clf = MonotonicBinning(feature_names=self.mono_feature_names,
max_bins=self.mono_max_bins, force_bins=self.mono_force_bins,
cardinality_cutoff=self.mono_cardinality_cutoff,
prefix=self.mono_prefix, custom_binning=self.mono_custom_binning)
outX = self.mono_bin_clf.transform(outX)
outX = outX.astype('object') #convert categorical columns to object columns
outX = self.treat_missing_values(outX) #treat missing values function
#iterate through the dataframe and apply the bins
for i in transform_features:
tempX = outX[i] #pandas Series
original_column_name = str(i)
#create the column name based on user provided prefix
if self.woe_prefix:
new_column_name = str(self.woe_prefix) + '_' + str(i)
else:
new_column_name = original_column_name
#check if the bin mapping is present
#check_is_fitted(self, 'woe_bins')
if not self.woe_bins:
raise ValueError("woe_bins variable is not present. \
Estimator has to be fitted to apply transformations.")
outX[new_column_name] = tempX.replace(self.woe_bins[original_column_name])
#transformed dataframe
return outX
#Method that describes what we need this transformer to do
def fit_transform(self, X, y):
return self.fit(X, y).transform(X)
| en | 0.687306 | #from sklearn.utils.validation import check_is_fitted Weight of evidence transformation for categorical variables. For numeric variables, monotonic operation is provided as default with this package. Parameters ---------- feature_names: 'all' or list (default='all') list of features to perform WOE transformation. - 'all' (default): All categorical features in the dataset will be used - list of features: ['age', 'income',......] exclude_features: list (default=None) list of features to be excluded from WOE transformation. - Example - ['age', 'income', .......] woe_prefix: string (default=None) Variable prefix to be used for the column created by WOE transformer. The default value is set 'None'. treat_missing: {'separate', 'mode', 'least_frequent'} (default='separate') This parameter setting is used to handle missing values in the dataset. 'separate' - Missing values are treated as a own group (category) 'mode' - Missing values are combined with the highest frequent item in the dataset 'least_frequent' - Missing values are combined with the least frequent item in the dataset woe_bins: dict of dicts(default=None) This feature is added as part of future WOE transformations or scoring. If this value is set, then WOE values provided for each of the features here will be used for transformation. Applicable only in the transform method. Dictionary structure - {'feature_name': float list} Example - {'education': {'primary' : 0.1, 'tertiary' : 0.5, 'secondary', 0.7}} monotonic_binning: bool (default=True) This parameter is used to perform monotonic binning on numeric variables. If set to False, numeric variables would be ignored. mono_feature_names: 'all' or list (default='all') list of features to perform monotonic binning operation. - 'all' (default): All features in the dataset will be used - list of features: ['age', 'income',......] mono_max_bins: int (default=20) Maximum number of bins that can be created for any given variable. The final number of bins created will be less than or equal to this number. mono_force_bins: int (default=3) It forces the module to create bins for a variable, when it cannot find monotonic relationship using "max_bins" option. The final number of bins created will be equal to the number specified. mono_cardinality_cutoff: int (default=5) Cutoff to determine if a variable is eligible for monotonic binning operation. Any variable which has unique levels less than this number will be treated as character variables. At this point no binning operation will be performed on the variable and it will return the unique levels as bins for these variable. mono_prefix: string (default=None) Variable prefix to be used for the column created by monotonic binning. mono_custom_binning: dict (default=None) Using this parameter, the user can perform custom binning on variables. This parameter is also used to apply previously computed bins for each feature (Score new data). Dictionary structure - {'feature_name': float list} Example - {'age': [0., 1., 2., 3.]} # Initialize the parameters for the function #only used for future transformations #these features below are for monotonic operations on numeric variables. #It uses MonotonicBinning class from binning package. #only used for monotonic transformations # check input data type - Only Pandas Dataframe allowed # the fit function for WOE transformer #if the function is used as part of pipeline, then try to unpack tuple values #produced in the previous step. Added as a part of pipeline feature. #check datatype of X #The length of X and Y should be equal # The label must be binary with values {0,1} #apply monotonic binning operation #identify the variables to tranform and assign the bin mapping dictionary #bin mapping #Identifying the features to perform fit #Exclude variables provided in the exclusion list #subset data only on features to fit #convert categorical columns to object columns #treat missing values function #apply the WOE train function on dataset #provide Information value for each variable as a separate dataset #treat missing values based on the 'treat_missing' option provided by user treat_missing: {'separate', 'mode', 'least_frequent'} (default='separate') This parameter setting is used to handle missing values in the dataset. 'separate' - Missing values are treated as a own group (category) 'mode' - Missing values are combined with the highest frequent item in the dataset 'least_frequent' - Missing values are combined with the least frequent item in the dataset #WOE binning - The function is applied on each columns identified in the fit function. #Here, the input X is a Pandas Series type. # Assign values #dictionary mapping for the current feature #calculate stats for variable and store it in temp_woe #calculate distributions and woe #assign computed values to class variables #Transform new data or existing data based on the fit identified or custom transformation provided by user #if the function is used as part of pipeline, then try to unpack tuple values #produced in the previous step. Added as a part of pipeline feature. #check input datatype. #identify the features on which the transformation should be performed #final list of features to be transformed #raise error if the list is empty #use the custom bins provided by user for numeric variables #convert categorical columns to object columns #treat missing values function #iterate through the dataframe and apply the bins #pandas Series #create the column name based on user provided prefix #check if the bin mapping is present #check_is_fitted(self, 'woe_bins') #transformed dataframe #Method that describes what we need this transformer to do | 2.821338 | 3 |
cupy/linalg/product.py | okapies/cupy | 1 | 8769 | <filename>cupy/linalg/product.py
import numpy
import six
import cupy
from cupy import core
from cupy import internal
from cupy.linalg.solve import inv
from cupy.util import collections_abc
matmul = core.matmul
def dot(a, b, out=None):
"""Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the
last axis of ``a`` and the second-to-last axis of ``b``. This is just a
matrix product if the both arrays are 2-D. For 1-D arrays, it uses their
unique axis as an axis to take dot product over.
Args:
a (cupy.ndarray): The left argument.
b (cupy.ndarray): The right argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The dot product of ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
"""
# TODO(okuta): check type
return a.dot(b, out)
def vdot(a, b):
"""Returns the dot product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs inner
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: Zero-dimensional array of the dot product result.
.. seealso:: :func:`numpy.vdot`
"""
if a.size != b.size:
raise ValueError('Axis dimension mismatch')
if a.dtype.kind == 'c':
a = a.conj()
return core.tensordot_core(a, b, None, 1, 1, a.size, ())
def inner(a, b):
"""Returns the inner product of two arrays.
It uses the last axis of each argument to take sum product.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: The inner product of ``a`` and ``b``.
.. seealso:: :func:`numpy.inner`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
a_axis = a_ndim - 1
b_axis = b_ndim - 1
if a.shape[-1] != b.shape[-1]:
raise ValueError('Axis dimension mismatch')
if a_axis:
a = cupy.rollaxis(a, a_axis, 0)
if b_axis:
b = cupy.rollaxis(b, b_axis, 0)
ret_shape = a.shape[1:] + b.shape[1:]
k = a.shape[0]
n = a.size // k
m = b.size // k
return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def outer(a, b, out=None):
"""Returns the outer product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs outer
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``.
.. seealso:: :func:`numpy.outer`
"""
n = a.size
m = b.size
ret_shape = (n, m)
if out is None:
return core.tensordot_core(a, b, None, n, m, 1, ret_shape)
if out.size != n * m:
raise ValueError('Output array has an invalid size')
if out.flags.c_contiguous:
return core.tensordot_core(a, b, out, n, m, 1, ret_shape)
else:
out[:] = core.tensordot_core(a, b, None, n, m, 1, ret_shape)
return out
def tensordot(a, b, axes=2):
"""Returns the tensor dot product of two arrays along specified axes.
This is equivalent to compute dot product along the specified axes which
are treated as one axis by reshaping.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
axes:
- If it is an integer, then ``axes`` axes at the last of ``a`` and
the first of ``b`` are used.
- If it is a pair of sequences of integers, then these two
sequences specify the list of axes for ``a`` and ``b``. The
corresponding axes are paired for sum-product.
Returns:
cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the
axes specified by ``axes``.
.. seealso:: :func:`numpy.tensordot`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
if axes != 0 and axes != ((), ()):
raise ValueError('An input is zero-dim while axes has dimensions')
return cupy.multiply(a, b)
if isinstance(axes, collections_abc.Sequence):
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes, b_axes = axes
if numpy.isscalar(a_axes):
a_axes = a_axes,
if numpy.isscalar(b_axes):
b_axes = b_axes,
else:
a_axes = tuple(six.moves.range(a_ndim - axes, a_ndim))
b_axes = tuple(six.moves.range(axes))
sum_ndim = len(a_axes)
if sum_ndim != len(b_axes):
raise ValueError('Axes length mismatch')
for a_axis, b_axis in zip(a_axes, b_axes):
if a.shape[a_axis] != b.shape[b_axis]:
raise ValueError('Axis dimension mismatch')
# Make the axes non-negative
a = _move_axes_to_head(a, [axis % a_ndim for axis in a_axes])
b = _move_axes_to_head(b, [axis % b_ndim for axis in b_axes])
ret_shape = a.shape[sum_ndim:] + b.shape[sum_ndim:]
k = internal.prod(a.shape[:sum_ndim])
# Avoid division by zero: core.tensordot_core returns zeros without
# checking n, m consistency, thus allowing 0-length dimensions to work
n = a.size // k if k != 0 else 0
m = b.size // k if k != 0 else 0
return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def matrix_power(M, n):
"""Raise a square matrix to the (integer) power `n`.
Args:
M (~cupy.ndarray): Matrix to raise by power n.
n (~int): Power to raise matrix to.
Returns:
~cupy.ndarray: Output array.
.. note:: M must be of dtype `float32` or `float64`.
..seealso:: :func:`numpy.linalg.matrix_power`
"""
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError('input must be a square array')
if not isinstance(n, six.integer_types):
raise TypeError('exponent must be an integer')
if n == 0:
return cupy.identity(M.shape[0], dtype=M.dtype)
elif n < 0:
M = inv(M)
n *= -1
# short-cuts
if n <= 3:
if n == 1:
return M
elif n == 2:
return cupy.matmul(M, M)
else:
return cupy.matmul(cupy.matmul(M, M), M)
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
result, Z = None, None
for b in cupy.binary_repr(n)[::-1]:
Z = M if Z is None else cupy.matmul(Z, Z)
if b == '1':
result = Z if result is None else cupy.matmul(result, Z)
return result
def kron(a, b):
"""Returns the kronecker product of two arrays.
Args:
a (~cupy.ndarray): The first argument.
b (~cupy.ndarray): The second argument.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.kron`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
ndim = b_ndim
a_shape = a.shape
b_shape = b.shape
if a_ndim != b_ndim:
if b_ndim > a_ndim:
a_shape = (1,) * (b_ndim - a_ndim) + a_shape
else:
b_shape = (1,) * (a_ndim - b_ndim) + b_shape
ndim = a_ndim
axis = ndim - 1
out = core.tensordot_core(a, b, None, a.size, b.size, 1, a_shape + b_shape)
for _ in six.moves.range(ndim):
out = core.concatenate_method(out, axis=axis)
return out
def _move_axes_to_head(a, axes):
# This function moves the axes of ``s`` to the head of the shape.
for idx, axis in enumerate(axes):
if idx != axis:
break
else:
return a
return a.transpose(
axes + [i for i in six.moves.range(a.ndim) if i not in axes])
| <filename>cupy/linalg/product.py
import numpy
import six
import cupy
from cupy import core
from cupy import internal
from cupy.linalg.solve import inv
from cupy.util import collections_abc
matmul = core.matmul
def dot(a, b, out=None):
"""Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the
last axis of ``a`` and the second-to-last axis of ``b``. This is just a
matrix product if the both arrays are 2-D. For 1-D arrays, it uses their
unique axis as an axis to take dot product over.
Args:
a (cupy.ndarray): The left argument.
b (cupy.ndarray): The right argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The dot product of ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
"""
# TODO(okuta): check type
return a.dot(b, out)
def vdot(a, b):
"""Returns the dot product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs inner
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: Zero-dimensional array of the dot product result.
.. seealso:: :func:`numpy.vdot`
"""
if a.size != b.size:
raise ValueError('Axis dimension mismatch')
if a.dtype.kind == 'c':
a = a.conj()
return core.tensordot_core(a, b, None, 1, 1, a.size, ())
def inner(a, b):
"""Returns the inner product of two arrays.
It uses the last axis of each argument to take sum product.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
Returns:
cupy.ndarray: The inner product of ``a`` and ``b``.
.. seealso:: :func:`numpy.inner`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
a_axis = a_ndim - 1
b_axis = b_ndim - 1
if a.shape[-1] != b.shape[-1]:
raise ValueError('Axis dimension mismatch')
if a_axis:
a = cupy.rollaxis(a, a_axis, 0)
if b_axis:
b = cupy.rollaxis(b, b_axis, 0)
ret_shape = a.shape[1:] + b.shape[1:]
k = a.shape[0]
n = a.size // k
m = b.size // k
return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def outer(a, b, out=None):
"""Returns the outer product of two vectors.
The input arrays are flattened into 1-D vectors and then it performs outer
product of these vectors.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``.
.. seealso:: :func:`numpy.outer`
"""
n = a.size
m = b.size
ret_shape = (n, m)
if out is None:
return core.tensordot_core(a, b, None, n, m, 1, ret_shape)
if out.size != n * m:
raise ValueError('Output array has an invalid size')
if out.flags.c_contiguous:
return core.tensordot_core(a, b, out, n, m, 1, ret_shape)
else:
out[:] = core.tensordot_core(a, b, None, n, m, 1, ret_shape)
return out
def tensordot(a, b, axes=2):
"""Returns the tensor dot product of two arrays along specified axes.
This is equivalent to compute dot product along the specified axes which
are treated as one axis by reshaping.
Args:
a (cupy.ndarray): The first argument.
b (cupy.ndarray): The second argument.
axes:
- If it is an integer, then ``axes`` axes at the last of ``a`` and
the first of ``b`` are used.
- If it is a pair of sequences of integers, then these two
sequences specify the list of axes for ``a`` and ``b``. The
corresponding axes are paired for sum-product.
Returns:
cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the
axes specified by ``axes``.
.. seealso:: :func:`numpy.tensordot`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
if axes != 0 and axes != ((), ()):
raise ValueError('An input is zero-dim while axes has dimensions')
return cupy.multiply(a, b)
if isinstance(axes, collections_abc.Sequence):
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes, b_axes = axes
if numpy.isscalar(a_axes):
a_axes = a_axes,
if numpy.isscalar(b_axes):
b_axes = b_axes,
else:
a_axes = tuple(six.moves.range(a_ndim - axes, a_ndim))
b_axes = tuple(six.moves.range(axes))
sum_ndim = len(a_axes)
if sum_ndim != len(b_axes):
raise ValueError('Axes length mismatch')
for a_axis, b_axis in zip(a_axes, b_axes):
if a.shape[a_axis] != b.shape[b_axis]:
raise ValueError('Axis dimension mismatch')
# Make the axes non-negative
a = _move_axes_to_head(a, [axis % a_ndim for axis in a_axes])
b = _move_axes_to_head(b, [axis % b_ndim for axis in b_axes])
ret_shape = a.shape[sum_ndim:] + b.shape[sum_ndim:]
k = internal.prod(a.shape[:sum_ndim])
# Avoid division by zero: core.tensordot_core returns zeros without
# checking n, m consistency, thus allowing 0-length dimensions to work
n = a.size // k if k != 0 else 0
m = b.size // k if k != 0 else 0
return core.tensordot_core(a, b, None, n, m, k, ret_shape)
def matrix_power(M, n):
"""Raise a square matrix to the (integer) power `n`.
Args:
M (~cupy.ndarray): Matrix to raise by power n.
n (~int): Power to raise matrix to.
Returns:
~cupy.ndarray: Output array.
.. note:: M must be of dtype `float32` or `float64`.
..seealso:: :func:`numpy.linalg.matrix_power`
"""
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError('input must be a square array')
if not isinstance(n, six.integer_types):
raise TypeError('exponent must be an integer')
if n == 0:
return cupy.identity(M.shape[0], dtype=M.dtype)
elif n < 0:
M = inv(M)
n *= -1
# short-cuts
if n <= 3:
if n == 1:
return M
elif n == 2:
return cupy.matmul(M, M)
else:
return cupy.matmul(cupy.matmul(M, M), M)
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
result, Z = None, None
for b in cupy.binary_repr(n)[::-1]:
Z = M if Z is None else cupy.matmul(Z, Z)
if b == '1':
result = Z if result is None else cupy.matmul(result, Z)
return result
def kron(a, b):
"""Returns the kronecker product of two arrays.
Args:
a (~cupy.ndarray): The first argument.
b (~cupy.ndarray): The second argument.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.kron`
"""
a_ndim = a.ndim
b_ndim = b.ndim
if a_ndim == 0 or b_ndim == 0:
return cupy.multiply(a, b)
ndim = b_ndim
a_shape = a.shape
b_shape = b.shape
if a_ndim != b_ndim:
if b_ndim > a_ndim:
a_shape = (1,) * (b_ndim - a_ndim) + a_shape
else:
b_shape = (1,) * (a_ndim - b_ndim) + b_shape
ndim = a_ndim
axis = ndim - 1
out = core.tensordot_core(a, b, None, a.size, b.size, 1, a_shape + b_shape)
for _ in six.moves.range(ndim):
out = core.concatenate_method(out, axis=axis)
return out
def _move_axes_to_head(a, axes):
# This function moves the axes of ``s`` to the head of the shape.
for idx, axis in enumerate(axes):
if idx != axis:
break
else:
return a
return a.transpose(
axes + [i for i in six.moves.range(a.ndim) if i not in axes])
| en | 0.731944 | Returns a dot product of two arrays. For arrays with more than one axis, it computes the dot product along the last axis of ``a`` and the second-to-last axis of ``b``. This is just a matrix product if the both arrays are 2-D. For 1-D arrays, it uses their unique axis as an axis to take dot product over. Args: a (cupy.ndarray): The left argument. b (cupy.ndarray): The right argument. out (cupy.ndarray): Output array. Returns: cupy.ndarray: The dot product of ``a`` and ``b``. .. seealso:: :func:`numpy.dot` # TODO(okuta): check type Returns the dot product of two vectors. The input arrays are flattened into 1-D vectors and then it performs inner product of these vectors. Args: a (cupy.ndarray): The first argument. b (cupy.ndarray): The second argument. Returns: cupy.ndarray: Zero-dimensional array of the dot product result. .. seealso:: :func:`numpy.vdot` Returns the inner product of two arrays. It uses the last axis of each argument to take sum product. Args: a (cupy.ndarray): The first argument. b (cupy.ndarray): The second argument. Returns: cupy.ndarray: The inner product of ``a`` and ``b``. .. seealso:: :func:`numpy.inner` Returns the outer product of two vectors. The input arrays are flattened into 1-D vectors and then it performs outer product of these vectors. Args: a (cupy.ndarray): The first argument. b (cupy.ndarray): The second argument. out (cupy.ndarray): Output array. Returns: cupy.ndarray: 2-D array of the outer product of ``a`` and ``b``. .. seealso:: :func:`numpy.outer` Returns the tensor dot product of two arrays along specified axes. This is equivalent to compute dot product along the specified axes which are treated as one axis by reshaping. Args: a (cupy.ndarray): The first argument. b (cupy.ndarray): The second argument. axes: - If it is an integer, then ``axes`` axes at the last of ``a`` and the first of ``b`` are used. - If it is a pair of sequences of integers, then these two sequences specify the list of axes for ``a`` and ``b``. The corresponding axes are paired for sum-product. Returns: cupy.ndarray: The tensor dot product of ``a`` and ``b`` along the axes specified by ``axes``. .. seealso:: :func:`numpy.tensordot` # Make the axes non-negative # Avoid division by zero: core.tensordot_core returns zeros without # checking n, m consistency, thus allowing 0-length dimensions to work Raise a square matrix to the (integer) power `n`. Args: M (~cupy.ndarray): Matrix to raise by power n. n (~int): Power to raise matrix to. Returns: ~cupy.ndarray: Output array. .. note:: M must be of dtype `float32` or `float64`. ..seealso:: :func:`numpy.linalg.matrix_power` # short-cuts # binary decomposition to reduce the number of Matrix # multiplications for n > 3. Returns the kronecker product of two arrays. Args: a (~cupy.ndarray): The first argument. b (~cupy.ndarray): The second argument. Returns: ~cupy.ndarray: Output array. .. seealso:: :func:`numpy.kron` # This function moves the axes of ``s`` to the head of the shape. | 3.268448 | 3 |
fibo.py | aligoren/pyalgo | 22 | 8770 | <filename>fibo.py
def fibo(n):
return n <= 1 or fibo(n-1) + fibo(n-2)
def fibo_main():
for n in range(1,47):
res = fibo(n)
print("%s\t%s" % (n, res))
fibo_main()
# profiling result for 47 numbers
# profile: python -m profile fibo.py
"""
-1273940835 function calls (275 primitive calls) in 18966.707 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
90 0.000 0.000 0.001 0.000 cp857.py:18(encode)
1 0.000 0.000 18966.707 18966.707 fibo.py:1(<module>)
-1273941064/46 18966.697 -0.000 18966.697 412.319 fibo.py:1(fibo)
1 0.001 0.001 18966.707 18966.707 fibo.py:4(main)
90 0.000 0.000 0.000 0.000 {built-in method charmap_encode}
1 0.000 0.000 18966.707 18966.707 {built-in method exec}
45 0.009 0.000 0.010 0.000 {built-in method print}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Prof
iler' objects}
""" | <filename>fibo.py
def fibo(n):
return n <= 1 or fibo(n-1) + fibo(n-2)
def fibo_main():
for n in range(1,47):
res = fibo(n)
print("%s\t%s" % (n, res))
fibo_main()
# profiling result for 47 numbers
# profile: python -m profile fibo.py
"""
-1273940835 function calls (275 primitive calls) in 18966.707 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
90 0.000 0.000 0.001 0.000 cp857.py:18(encode)
1 0.000 0.000 18966.707 18966.707 fibo.py:1(<module>)
-1273941064/46 18966.697 -0.000 18966.697 412.319 fibo.py:1(fibo)
1 0.001 0.001 18966.707 18966.707 fibo.py:4(main)
90 0.000 0.000 0.000 0.000 {built-in method charmap_encode}
1 0.000 0.000 18966.707 18966.707 {built-in method exec}
45 0.009 0.000 0.010 0.000 {built-in method print}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Prof
iler' objects}
""" | en | 0.267884 | # profiling result for 47 numbers # profile: python -m profile fibo.py -1273940835 function calls (275 primitive calls) in 18966.707 seconds Ordered by: standard name ncalls tottime percall cumtime percall filename:lineno(function) 90 0.000 0.000 0.001 0.000 cp857.py:18(encode) 1 0.000 0.000 18966.707 18966.707 fibo.py:1(<module>) -1273941064/46 18966.697 -0.000 18966.697 412.319 fibo.py:1(fibo) 1 0.001 0.001 18966.707 18966.707 fibo.py:4(main) 90 0.000 0.000 0.000 0.000 {built-in method charmap_encode} 1 0.000 0.000 18966.707 18966.707 {built-in method exec} 45 0.009 0.000 0.010 0.000 {built-in method print} 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Prof iler' objects} | 3.494306 | 3 |
trt_util/common.py | yihui8776/TensorRT-DETR | 0 | 8771 | <filename>trt_util/common.py
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ~~~Medcare AI Lab~~~
# 该部分代码参考了TensorRT官方示例完成,对相关方法进行修改
#
import pycuda.driver as cuda
#https://documen.tician.de/pycuda/driver.html
import pycuda.autoinit
import numpy as np
import tensorrt as trt
from .calibrator import Calibrator
import sys, os
import time
# TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
# TRT_LOGGER = trt.Logger(trt.Logger.INFO)
TRT_LOGGER = trt.Logger()
# Allocate host and device buffers, and create a stream.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) # <--------- the main diff to v2
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def allocate_buffers_v2(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# do inference multi outputs
def do_inference_v2(context, bindings, inputs, outputs, stream, input_tensor):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
# The onnx path is used for Pytorch models.
def build_engine_onnx(model_file,engine_file,FP16=False,verbose=False,dynamic_input=False,batch_size=1):
def get_engine():
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
# with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network,builder.create_builder_config() as config, trt.OnnxParser(network,TRT_LOGGER) as parser:
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, builder.create_builder_config() as config,\
trt.OnnxParser(network,TRT_LOGGER) as parser:
# Workspace size is the maximum amount of memory available to the builder while building an engine.
#builder.max_workspace_size = 6 << 30 # 6G
config.max_workspace_size = (1 << 30) #for trt8
config.max_batch_size = batch_size #for trt8
#builder.max_batch_size = batch_size
if FP16:
print("[INFO] Open FP16 Mode!")
config.set_flag(tensorrt.BuilderFlag.FP16) # for trt8
#builder.fp16_mode = True #trt7
with open(model_file, 'rb') as model:
parser.parse(model.read())
if verbose:
print(">"*50)
for error in range(parser.num_errors):
print(parser.get_error(error))
network.get_input(0).shape = [ batch_size, 3, 800, 800 ]
if dynamic_input:
profile = builder.create_optimization_profile();
profile.set_shape("inputs", (1,3,800,800), (8,3,800,800), (64,3,800,800))
config.add_optimization_profile(profile)
# builder engine
#engine = builder.build_cuda_engine(network) #trt 7
engine = builder.build_engine(network, config) #trt8
print("[INFO] Completed creating Engine!")
with open(engine_file, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file):
# If a serialized engine exists, use it instead of building an engine.
print("[INFO] Reading engine from file {}".format(engine_file))
with open(engine_file, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return get_engine()
# int8 quant
def build_engine_onnx_v2(onnx_file_path="", engine_file_path="",fp16_mode=False, int8_mode=False, \
max_batch_size=1,calibration_stream=None, calibration_table_path="", save_engine=False):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine(max_batch_size, save_engine):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(1) as network,\
builder.create_builder_config() as config,trt.OnnxParser(network, TRT_LOGGER) as parser:
# parse onnx model file
if not os.path.exists(onnx_file_path):
quit(f'[Error]ONNX file {onnx_file_path} not found')
print(f'[INFO] Loading ONNX file from path {onnx_file_path}...')
with open(onnx_file_path, 'rb') as model:
print('[INFO] Beginning ONNX file parsing')
parser.parse(model.read())
assert network.num_layers > 0, '[Error] Failed to parse ONNX model. \
Please check if the ONNX model is compatible '
print('[INFO] Completed parsing of ONNX file')
print(f'[INFO] Building an engine from file {onnx_file_path}; this may take a while...')
# build trt engine
# config.max_workspace_size = 2 << 30 # 2GB
builder.max_batch_size = max_batch_size
config.max_workspace_size = 2 << 30 # 2GB
if fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
if int8_mode:
#builder.int8_mode = int8_mode
config.set_flag(trt.BuilderFlag.INT8)
assert calibration_stream, '[Error] a calibration_stream should be provided for int8 mode'
config.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
# builder.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
print('[INFO] Int8 mode enabled')
#engine = builder.build_cuda_engine(network)
engine = builder.build_engine(network, config)
if engine is None:
print('[INFO] Failed to create the engine')
return None
print("[INFO] Completed creating the engine")
if save_engine:
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, load it instead of building a new one.
print(f"[INFO] Reading engine from file {engine_file_path}")
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(max_batch_size, save_engine)
| <filename>trt_util/common.py
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ~~~Medcare AI Lab~~~
# 该部分代码参考了TensorRT官方示例完成,对相关方法进行修改
#
import pycuda.driver as cuda
#https://documen.tician.de/pycuda/driver.html
import pycuda.autoinit
import numpy as np
import tensorrt as trt
from .calibrator import Calibrator
import sys, os
import time
# TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
# TRT_LOGGER = trt.Logger(trt.Logger.INFO)
TRT_LOGGER = trt.Logger()
# Allocate host and device buffers, and create a stream.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) # <--------- the main diff to v2
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def allocate_buffers_v2(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# do inference multi outputs
def do_inference_v2(context, bindings, inputs, outputs, stream, input_tensor):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
# The onnx path is used for Pytorch models.
def build_engine_onnx(model_file,engine_file,FP16=False,verbose=False,dynamic_input=False,batch_size=1):
def get_engine():
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
# with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network,builder.create_builder_config() as config, trt.OnnxParser(network,TRT_LOGGER) as parser:
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, builder.create_builder_config() as config,\
trt.OnnxParser(network,TRT_LOGGER) as parser:
# Workspace size is the maximum amount of memory available to the builder while building an engine.
#builder.max_workspace_size = 6 << 30 # 6G
config.max_workspace_size = (1 << 30) #for trt8
config.max_batch_size = batch_size #for trt8
#builder.max_batch_size = batch_size
if FP16:
print("[INFO] Open FP16 Mode!")
config.set_flag(tensorrt.BuilderFlag.FP16) # for trt8
#builder.fp16_mode = True #trt7
with open(model_file, 'rb') as model:
parser.parse(model.read())
if verbose:
print(">"*50)
for error in range(parser.num_errors):
print(parser.get_error(error))
network.get_input(0).shape = [ batch_size, 3, 800, 800 ]
if dynamic_input:
profile = builder.create_optimization_profile();
profile.set_shape("inputs", (1,3,800,800), (8,3,800,800), (64,3,800,800))
config.add_optimization_profile(profile)
# builder engine
#engine = builder.build_cuda_engine(network) #trt 7
engine = builder.build_engine(network, config) #trt8
print("[INFO] Completed creating Engine!")
with open(engine_file, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file):
# If a serialized engine exists, use it instead of building an engine.
print("[INFO] Reading engine from file {}".format(engine_file))
with open(engine_file, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return get_engine()
# int8 quant
def build_engine_onnx_v2(onnx_file_path="", engine_file_path="",fp16_mode=False, int8_mode=False, \
max_batch_size=1,calibration_stream=None, calibration_table_path="", save_engine=False):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine(max_batch_size, save_engine):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(1) as network,\
builder.create_builder_config() as config,trt.OnnxParser(network, TRT_LOGGER) as parser:
# parse onnx model file
if not os.path.exists(onnx_file_path):
quit(f'[Error]ONNX file {onnx_file_path} not found')
print(f'[INFO] Loading ONNX file from path {onnx_file_path}...')
with open(onnx_file_path, 'rb') as model:
print('[INFO] Beginning ONNX file parsing')
parser.parse(model.read())
assert network.num_layers > 0, '[Error] Failed to parse ONNX model. \
Please check if the ONNX model is compatible '
print('[INFO] Completed parsing of ONNX file')
print(f'[INFO] Building an engine from file {onnx_file_path}; this may take a while...')
# build trt engine
# config.max_workspace_size = 2 << 30 # 2GB
builder.max_batch_size = max_batch_size
config.max_workspace_size = 2 << 30 # 2GB
if fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
if int8_mode:
#builder.int8_mode = int8_mode
config.set_flag(trt.BuilderFlag.INT8)
assert calibration_stream, '[Error] a calibration_stream should be provided for int8 mode'
config.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
# builder.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
print('[INFO] Int8 mode enabled')
#engine = builder.build_cuda_engine(network)
engine = builder.build_engine(network, config)
if engine is None:
print('[INFO] Failed to create the engine')
return None
print("[INFO] Completed creating the engine")
if save_engine:
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, load it instead of building a new one.
print(f"[INFO] Reading engine from file {engine_file_path}")
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(max_batch_size, save_engine)
| en | 0.761653 | # # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ~~~Medcare AI Lab~~~ # 该部分代码参考了TensorRT官方示例完成,对相关方法进行修改 # #https://documen.tician.de/pycuda/driver.html # TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) # TRT_LOGGER = trt.Logger(trt.Logger.INFO) # Allocate host and device buffers, and create a stream. # <--------- the main diff to v2 # Allocate host and device buffers # Append the device buffer to device bindings. # Append to the appropriate list. # Allocate host and device buffers # Append the device buffer to device bindings. # Append to the appropriate list. # do inference multi outputs # Transfer input data to the GPU. # Run inference. # Transfer predictions back from the GPU. # Synchronize the stream # Return only the host outputs. # The onnx path is used for Pytorch models. # with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network,builder.create_builder_config() as config, trt.OnnxParser(network,TRT_LOGGER) as parser: # Workspace size is the maximum amount of memory available to the builder while building an engine. #builder.max_workspace_size = 6 << 30 # 6G #for trt8 #for trt8 #builder.max_batch_size = batch_size # for trt8 #builder.fp16_mode = True #trt7 # builder engine #engine = builder.build_cuda_engine(network) #trt 7 #trt8 # If a serialized engine exists, use it instead of building an engine. # int8 quant Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it. Takes an ONNX file and creates a TensorRT engine to run inference with # parse onnx model file # build trt engine # config.max_workspace_size = 2 << 30 # 2GB # 2GB #builder.int8_mode = int8_mode # builder.int8_calibrator = Calibrator(calibration_stream, calibration_table_path) #engine = builder.build_cuda_engine(network) # If a serialized engine exists, load it instead of building a new one. | 2.002341 | 2 |
src/init.py | inpanel/inpanel-desktop | 1 | 8772 | #!/usr/bin/env python3
# -*- coding:utf-8-*-
import tkinter.messagebox
from tkinter import Button, Label, Tk
from utils.functions import set_window_center
from utils.sqlite_helper import DBHelper
from inpanel import App
class InitWindow(Tk):
"""初始化窗口"""
def __init__(self):
Tk.__init__(self)
self.title("初始化数据")
set_window_center(self, 300, 180)
self.resizable(False, False)
self.win_success = None # 初始化成功的提示窗口
self.init_page()
def init_page(self):
"""加载控件"""
btn_1 = Button(self, text="初始化数据库", command=self.do_init_db)
btn_1.pack(expand="yes", padx=10, pady=10, ipadx=5, ipady=5)
def do_init_db(self):
"""初始化"""
db_helper = DBHelper()
db_helper.reset_database()
db_helper.create_database()
try:
tmp = db_helper.insert_user("admin", "admin") # 默认用户
tmp2 = db_helper.insert_content_by_username(
"admin",
"Hello World !",
"源码仓库地址:https://github.com/doudoudzj/tkinter-app",
"github",
)
tmp3 = db_helper.get_content_by_username("admin")
print("添加用户admin:", tmp)
print("添加内容:", tmp2)
print("查询内容:", tmp3)
self.do_success()
self.destroy()
except KeyError:
print(KeyError)
self.do_failed()
def do_failed(self):
"""是否重试"""
res = tkinter.messagebox.askretrycancel('提示', '初始化失败,是否重试?', parent=self)
if res is True:
self.do_init_db()
elif res is False:
self.destroy()
def do_success(self):
"""初始化成功弹窗"""
self.win_success = Tk()
self.win_success.title("初始化成功")
set_window_center(self.win_success, 250, 150)
self.win_success.resizable(False, False)
msg = Label(self.win_success, text="初始化成功")
msg.pack(expand="yes", fill="both")
btn = Button(self.win_success, text="确定", command=self.quit)
btn.pack(side="right", padx=10, pady=10, ipadx=5, ipady=5)
btn_open_app = Button(self.win_success, text="启动程序", command=self.open_app)
btn_open_app.pack(side="right", padx=10, pady=10, ipadx=5, ipady=5)
def open_app(self):
"""打开应用程序"""
self.quit()
self.win_success.destroy()
self.win_success.quit()
App()
if __name__ == "__main__":
APP_INIT = InitWindow()
APP_INIT.mainloop()
| #!/usr/bin/env python3
# -*- coding:utf-8-*-
import tkinter.messagebox
from tkinter import Button, Label, Tk
from utils.functions import set_window_center
from utils.sqlite_helper import DBHelper
from inpanel import App
class InitWindow(Tk):
"""初始化窗口"""
def __init__(self):
Tk.__init__(self)
self.title("初始化数据")
set_window_center(self, 300, 180)
self.resizable(False, False)
self.win_success = None # 初始化成功的提示窗口
self.init_page()
def init_page(self):
"""加载控件"""
btn_1 = Button(self, text="初始化数据库", command=self.do_init_db)
btn_1.pack(expand="yes", padx=10, pady=10, ipadx=5, ipady=5)
def do_init_db(self):
"""初始化"""
db_helper = DBHelper()
db_helper.reset_database()
db_helper.create_database()
try:
tmp = db_helper.insert_user("admin", "admin") # 默认用户
tmp2 = db_helper.insert_content_by_username(
"admin",
"Hello World !",
"源码仓库地址:https://github.com/doudoudzj/tkinter-app",
"github",
)
tmp3 = db_helper.get_content_by_username("admin")
print("添加用户admin:", tmp)
print("添加内容:", tmp2)
print("查询内容:", tmp3)
self.do_success()
self.destroy()
except KeyError:
print(KeyError)
self.do_failed()
def do_failed(self):
"""是否重试"""
res = tkinter.messagebox.askretrycancel('提示', '初始化失败,是否重试?', parent=self)
if res is True:
self.do_init_db()
elif res is False:
self.destroy()
def do_success(self):
"""初始化成功弹窗"""
self.win_success = Tk()
self.win_success.title("初始化成功")
set_window_center(self.win_success, 250, 150)
self.win_success.resizable(False, False)
msg = Label(self.win_success, text="初始化成功")
msg.pack(expand="yes", fill="both")
btn = Button(self.win_success, text="确定", command=self.quit)
btn.pack(side="right", padx=10, pady=10, ipadx=5, ipady=5)
btn_open_app = Button(self.win_success, text="启动程序", command=self.open_app)
btn_open_app.pack(side="right", padx=10, pady=10, ipadx=5, ipady=5)
def open_app(self):
"""打开应用程序"""
self.quit()
self.win_success.destroy()
self.win_success.quit()
App()
if __name__ == "__main__":
APP_INIT = InitWindow()
APP_INIT.mainloop()
| zh | 0.943025 | #!/usr/bin/env python3 # -*- coding:utf-8-*- 初始化窗口 # 初始化成功的提示窗口 加载控件 初始化 # 默认用户 是否重试 初始化成功弹窗 打开应用程序 | 3.180834 | 3 |
Toolkits/CMake/hunter/packages/sugar/python/sugar/sugar_warnings_wiki_table_generator.py | roscopecoltran/SniperKit-Core | 102 | 8773 | <reponame>roscopecoltran/SniperKit-Core
#!/usr/bin/env python3
# Copyright (c) 2014, <NAME>
# All rights reserved.
"""
* Wiki table for `leathers` C++ project
Expected format:
### Main table
Name | Clang | GCC | MSVC |
-----------------------------|----------|----------|------|
static-ctor-not-thread-safe | *no* | *no* | 4640 |
switch | **same** | **same** | 4062 |
switch-enum | **same** | **same** | 4061 |
### Xcode/Clang table
Clang | Xcode | Objective-C |
-----------------------|--------------------------------|-------------|
bool-conversion | CLANG_WARN_BOOL_CONVERSION | no |
c++11-extensions | CLANG_WARN_CXX0X_EXTENSIONS | no |
strict-selector-match | GCC_WARN_STRICT_SELECTOR_MATCH | yes |
undeclared-selector | GCC_WARN_UNDECLARED_SELECTOR | yes |
"""
def generate(main_warnings_table):
groups = set()
for i in main_warnings_table:
if i.group != "":
groups.add(i.group)
wiki_file = open("wiki-table.txt", "w")
generate_main_table(main_warnings_table, wiki_file)
for group in groups:
generate_group_table(main_warnings_table, wiki_file, group)
generate_xcode_table(main_warnings_table, wiki_file)
def generate_main_table(main_warnings_table, wiki_file):
head_name = "Name"
head_clang = "Clang"
head_gcc = "GCC"
head_msvc = "MSVC"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def name_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.warning_name)
def clang_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.clang.wiki_entry(table_entry.warning_name))
def gcc_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.gcc.wiki_entry(table_entry.warning_name))
def msvc_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.msvc.wiki_entry(table_entry.warning_name))
max_name = calc_max(head_name, name_visitor)
max_clang = calc_max(head_clang, clang_visitor)
max_gcc = calc_max(head_gcc, gcc_visitor)
max_msvc = calc_max(head_msvc, msvc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("### Main table\n\n")
s = "{}|{}|{}|{}|\n".format(
fill_string(head_name, max_name),
fill_string(head_clang, max_clang),
fill_string(head_gcc, max_gcc),
fill_string(head_msvc, max_msvc),
)
wiki_file.write(s)
s = "{}|{}|{}|{}|\n".format(
'-' * max_name,
'-' * max_clang,
'-' * max_gcc,
'-' * max_msvc,
)
wiki_file.write(s)
for entry in main_warnings_table:
if entry.group != "":
continue
s = "{}|{}|{}|{}|\n".format(
fill_string(entry.warning_name, max_name),
fill_string(entry.clang.wiki_entry(entry.warning_name), max_clang),
fill_string(entry.gcc.wiki_entry(entry.warning_name), max_gcc),
fill_string(entry.msvc.wiki_entry(entry.warning_name), max_msvc),
)
wiki_file.write(s)
def generate_group_table(main_warnings_table, wiki_file, group):
head_name = "Name"
head_clang = "Clang"
head_gcc = "GCC"
head_msvc = "MSVC"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def name_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.warning_name)
def clang_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.clang.wiki_entry(table_entry.warning_name))
def gcc_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.gcc.wiki_entry(table_entry.warning_name))
def msvc_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.msvc.wiki_entry(table_entry.warning_name))
max_name = calc_max(head_name, name_visitor)
max_clang = calc_max(head_clang, clang_visitor)
max_gcc = calc_max(head_gcc, gcc_visitor)
max_msvc = calc_max(head_msvc, msvc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("\n### Table for group: `{}`\n\n".format(group))
s = "{}|{}|{}|{}|\n".format(
fill_string(head_name, max_name),
fill_string(head_clang, max_clang),
fill_string(head_gcc, max_gcc),
fill_string(head_msvc, max_msvc),
)
wiki_file.write(s)
s = "{}|{}|{}|{}|\n".format(
'-' * max_name,
'-' * max_clang,
'-' * max_gcc,
'-' * max_msvc,
)
wiki_file.write(s)
for entry in main_warnings_table:
if entry.group != group:
continue
s = "{}|{}|{}|{}|\n".format(
fill_string(entry.warning_name, max_name),
fill_string(entry.clang.wiki_entry(entry.warning_name), max_clang),
fill_string(entry.gcc.wiki_entry(entry.warning_name), max_gcc),
fill_string(entry.msvc.wiki_entry(entry.warning_name), max_msvc),
)
wiki_file.write(s)
def generate_xcode_table(main_warnings_table, wiki_file):
head_clang = "Clang"
head_xcode = "Xcode"
head_objc = "Objective-C"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def clang_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
return len(table_entry.clang.option)
def xcode_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
return len(table_entry.xcode.option)
def objc_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
if table_entry.objc:
return 3 # "yes"
else:
return 2 # "no"
max_clang = calc_max(head_clang, clang_visitor)
max_xcode = calc_max(head_xcode, xcode_visitor)
max_objc = calc_max(head_objc, objc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("\n\n### Xcode/Clang table\n\n")
s = "{}|{}|{}|\n".format(
fill_string(head_clang, max_clang),
fill_string(head_xcode, max_xcode),
fill_string(head_objc, max_objc),
)
wiki_file.write(s)
s = "{}|{}|{}|\n".format(
'-' * max_clang,
'-' * max_xcode,
'-' * max_objc,
)
wiki_file.write(s)
done_list = []
for entry in main_warnings_table:
if entry.xcode.option == "":
continue
if entry.clang.option in done_list:
continue
done_list.append(entry.clang.option)
if entry.objc:
objc = "yes"
else:
objc = "no"
s = "{}|{}|{}|\n".format(
fill_string(entry.clang.option, max_clang),
fill_string(entry.xcode.option, max_xcode),
fill_string(objc, max_objc),
)
wiki_file.write(s)
| #!/usr/bin/env python3
# Copyright (c) 2014, <NAME>
# All rights reserved.
"""
* Wiki table for `leathers` C++ project
Expected format:
### Main table
Name | Clang | GCC | MSVC |
-----------------------------|----------|----------|------|
static-ctor-not-thread-safe | *no* | *no* | 4640 |
switch | **same** | **same** | 4062 |
switch-enum | **same** | **same** | 4061 |
### Xcode/Clang table
Clang | Xcode | Objective-C |
-----------------------|--------------------------------|-------------|
bool-conversion | CLANG_WARN_BOOL_CONVERSION | no |
c++11-extensions | CLANG_WARN_CXX0X_EXTENSIONS | no |
strict-selector-match | GCC_WARN_STRICT_SELECTOR_MATCH | yes |
undeclared-selector | GCC_WARN_UNDECLARED_SELECTOR | yes |
"""
def generate(main_warnings_table):
groups = set()
for i in main_warnings_table:
if i.group != "":
groups.add(i.group)
wiki_file = open("wiki-table.txt", "w")
generate_main_table(main_warnings_table, wiki_file)
for group in groups:
generate_group_table(main_warnings_table, wiki_file, group)
generate_xcode_table(main_warnings_table, wiki_file)
def generate_main_table(main_warnings_table, wiki_file):
head_name = "Name"
head_clang = "Clang"
head_gcc = "GCC"
head_msvc = "MSVC"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def name_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.warning_name)
def clang_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.clang.wiki_entry(table_entry.warning_name))
def gcc_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.gcc.wiki_entry(table_entry.warning_name))
def msvc_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.msvc.wiki_entry(table_entry.warning_name))
max_name = calc_max(head_name, name_visitor)
max_clang = calc_max(head_clang, clang_visitor)
max_gcc = calc_max(head_gcc, gcc_visitor)
max_msvc = calc_max(head_msvc, msvc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("### Main table\n\n")
s = "{}|{}|{}|{}|\n".format(
fill_string(head_name, max_name),
fill_string(head_clang, max_clang),
fill_string(head_gcc, max_gcc),
fill_string(head_msvc, max_msvc),
)
wiki_file.write(s)
s = "{}|{}|{}|{}|\n".format(
'-' * max_name,
'-' * max_clang,
'-' * max_gcc,
'-' * max_msvc,
)
wiki_file.write(s)
for entry in main_warnings_table:
if entry.group != "":
continue
s = "{}|{}|{}|{}|\n".format(
fill_string(entry.warning_name, max_name),
fill_string(entry.clang.wiki_entry(entry.warning_name), max_clang),
fill_string(entry.gcc.wiki_entry(entry.warning_name), max_gcc),
fill_string(entry.msvc.wiki_entry(entry.warning_name), max_msvc),
)
wiki_file.write(s)
def generate_group_table(main_warnings_table, wiki_file, group):
head_name = "Name"
head_clang = "Clang"
head_gcc = "GCC"
head_msvc = "MSVC"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def name_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.warning_name)
def clang_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.clang.wiki_entry(table_entry.warning_name))
def gcc_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.gcc.wiki_entry(table_entry.warning_name))
def msvc_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.msvc.wiki_entry(table_entry.warning_name))
max_name = calc_max(head_name, name_visitor)
max_clang = calc_max(head_clang, clang_visitor)
max_gcc = calc_max(head_gcc, gcc_visitor)
max_msvc = calc_max(head_msvc, msvc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("\n### Table for group: `{}`\n\n".format(group))
s = "{}|{}|{}|{}|\n".format(
fill_string(head_name, max_name),
fill_string(head_clang, max_clang),
fill_string(head_gcc, max_gcc),
fill_string(head_msvc, max_msvc),
)
wiki_file.write(s)
s = "{}|{}|{}|{}|\n".format(
'-' * max_name,
'-' * max_clang,
'-' * max_gcc,
'-' * max_msvc,
)
wiki_file.write(s)
for entry in main_warnings_table:
if entry.group != group:
continue
s = "{}|{}|{}|{}|\n".format(
fill_string(entry.warning_name, max_name),
fill_string(entry.clang.wiki_entry(entry.warning_name), max_clang),
fill_string(entry.gcc.wiki_entry(entry.warning_name), max_gcc),
fill_string(entry.msvc.wiki_entry(entry.warning_name), max_msvc),
)
wiki_file.write(s)
def generate_xcode_table(main_warnings_table, wiki_file):
head_clang = "Clang"
head_xcode = "Xcode"
head_objc = "Objective-C"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def clang_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
return len(table_entry.clang.option)
def xcode_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
return len(table_entry.xcode.option)
def objc_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
if table_entry.objc:
return 3 # "yes"
else:
return 2 # "no"
max_clang = calc_max(head_clang, clang_visitor)
max_xcode = calc_max(head_xcode, xcode_visitor)
max_objc = calc_max(head_objc, objc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("\n\n### Xcode/Clang table\n\n")
s = "{}|{}|{}|\n".format(
fill_string(head_clang, max_clang),
fill_string(head_xcode, max_xcode),
fill_string(head_objc, max_objc),
)
wiki_file.write(s)
s = "{}|{}|{}|\n".format(
'-' * max_clang,
'-' * max_xcode,
'-' * max_objc,
)
wiki_file.write(s)
done_list = []
for entry in main_warnings_table:
if entry.xcode.option == "":
continue
if entry.clang.option in done_list:
continue
done_list.append(entry.clang.option)
if entry.objc:
objc = "yes"
else:
objc = "no"
s = "{}|{}|{}|\n".format(
fill_string(entry.clang.option, max_clang),
fill_string(entry.xcode.option, max_xcode),
fill_string(objc, max_objc),
)
wiki_file.write(s) | en | 0.282369 | #!/usr/bin/env python3 # Copyright (c) 2014, <NAME> # All rights reserved. * Wiki table for `leathers` C++ project Expected format: ### Main table Name | Clang | GCC | MSVC | -----------------------------|----------|----------|------| static-ctor-not-thread-safe | *no* | *no* | 4640 | switch | **same** | **same** | 4062 | switch-enum | **same** | **same** | 4061 | ### Xcode/Clang table Clang | Xcode | Objective-C | -----------------------|--------------------------------|-------------| bool-conversion | CLANG_WARN_BOOL_CONVERSION | no | c++11-extensions | CLANG_WARN_CXX0X_EXTENSIONS | no | strict-selector-match | GCC_WARN_STRICT_SELECTOR_MATCH | yes | undeclared-selector | GCC_WARN_UNDECLARED_SELECTOR | yes | ## Main table\n\n") ### Table for group: `{}`\n\n".format(group)) # "yes" # "no" ### Xcode/Clang table\n\n") | 2.218365 | 2 |
neutron/plugins/ofagent/agent/ports.py | armando-migliaccio/neutron-1 | 0 | 8774 | # Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 <NAME> <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class OFPort(object):
def __init__(self, port_name, ofport):
self.port_name = port_name
self.ofport = ofport
@classmethod
def from_ofp_port(cls, ofp_port):
"""Convert from ryu OFPPort."""
return cls(port_name=ofp_port.name, ofport=ofp_port.port_no)
PORT_NAME_LEN = 14
PORT_NAME_PREFIXES = [
"tap", # common cases, including ovs_use_veth=True
"qvo", # nova hybrid interface driver
"qr-", # l3-agent INTERNAL_DEV_PREFIX (ovs_use_veth=False)
"qg-", # l3-agent EXTERNAL_DEV_PREFIX (ovs_use_veth=False)
]
def _is_neutron_port(name):
"""Return True if the port name looks like a neutron port."""
if len(name) != PORT_NAME_LEN:
return False
for pref in PORT_NAME_PREFIXES:
if name.startswith(pref):
return True
return False
def get_normalized_port_name(interface_id):
"""Convert from neutron device id (uuid) to "normalized" port name.
This needs to be synced with ML2 plugin's _device_to_port_id().
An assumption: The switch uses an OS's interface name as the
corresponding OpenFlow port name.
NOTE(yamamoto): While it's true for Open vSwitch, it isn't
necessarily true everywhere. For example, LINC uses something
like "LogicalSwitch0-Port2".
NOTE(yamamoto): The actual prefix might be different. For example,
with the hybrid interface driver, it's "qvo". However, we always
use "tap" prefix throughout the agent and plugin for simplicity.
Some care should be taken when talking to the switch.
"""
return ("tap" + interface_id)[0:PORT_NAME_LEN]
def _normalize_port_name(name):
"""Normalize port name.
See comments in _get_ofport_name.
"""
for pref in PORT_NAME_PREFIXES:
if name.startswith(pref):
return "tap" + name[len(pref):]
return name
class Port(OFPort):
def __init__(self, *args, **kwargs):
super(Port, self).__init__(*args, **kwargs)
self.vif_mac = None
def is_neutron_port(self):
"""Return True if the port looks like a neutron port."""
return _is_neutron_port(self.port_name)
def normalized_port_name(self):
return _normalize_port_name(self.port_name)
| # Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 <NAME> <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class OFPort(object):
def __init__(self, port_name, ofport):
self.port_name = port_name
self.ofport = ofport
@classmethod
def from_ofp_port(cls, ofp_port):
"""Convert from ryu OFPPort."""
return cls(port_name=ofp_port.name, ofport=ofp_port.port_no)
PORT_NAME_LEN = 14
PORT_NAME_PREFIXES = [
"tap", # common cases, including ovs_use_veth=True
"qvo", # nova hybrid interface driver
"qr-", # l3-agent INTERNAL_DEV_PREFIX (ovs_use_veth=False)
"qg-", # l3-agent EXTERNAL_DEV_PREFIX (ovs_use_veth=False)
]
def _is_neutron_port(name):
"""Return True if the port name looks like a neutron port."""
if len(name) != PORT_NAME_LEN:
return False
for pref in PORT_NAME_PREFIXES:
if name.startswith(pref):
return True
return False
def get_normalized_port_name(interface_id):
"""Convert from neutron device id (uuid) to "normalized" port name.
This needs to be synced with ML2 plugin's _device_to_port_id().
An assumption: The switch uses an OS's interface name as the
corresponding OpenFlow port name.
NOTE(yamamoto): While it's true for Open vSwitch, it isn't
necessarily true everywhere. For example, LINC uses something
like "LogicalSwitch0-Port2".
NOTE(yamamoto): The actual prefix might be different. For example,
with the hybrid interface driver, it's "qvo". However, we always
use "tap" prefix throughout the agent and plugin for simplicity.
Some care should be taken when talking to the switch.
"""
return ("tap" + interface_id)[0:PORT_NAME_LEN]
def _normalize_port_name(name):
"""Normalize port name.
See comments in _get_ofport_name.
"""
for pref in PORT_NAME_PREFIXES:
if name.startswith(pref):
return "tap" + name[len(pref):]
return name
class Port(OFPort):
def __init__(self, *args, **kwargs):
super(Port, self).__init__(*args, **kwargs)
self.vif_mac = None
def is_neutron_port(self):
"""Return True if the port looks like a neutron port."""
return _is_neutron_port(self.port_name)
def normalized_port_name(self):
return _normalize_port_name(self.port_name)
| en | 0.787011 | # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 <NAME> <yamamoto at valinux co jp> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Convert from ryu OFPPort. # common cases, including ovs_use_veth=True # nova hybrid interface driver # l3-agent INTERNAL_DEV_PREFIX (ovs_use_veth=False) # l3-agent EXTERNAL_DEV_PREFIX (ovs_use_veth=False) Return True if the port name looks like a neutron port. Convert from neutron device id (uuid) to "normalized" port name. This needs to be synced with ML2 plugin's _device_to_port_id(). An assumption: The switch uses an OS's interface name as the corresponding OpenFlow port name. NOTE(yamamoto): While it's true for Open vSwitch, it isn't necessarily true everywhere. For example, LINC uses something like "LogicalSwitch0-Port2". NOTE(yamamoto): The actual prefix might be different. For example, with the hybrid interface driver, it's "qvo". However, we always use "tap" prefix throughout the agent and plugin for simplicity. Some care should be taken when talking to the switch. Normalize port name. See comments in _get_ofport_name. Return True if the port looks like a neutron port. | 1.857872 | 2 |
pdf/wechat/step.py | damaainan/html2md | 0 | 8775 | # -*- coding=utf-8 -*-
from zwechathihu.mypdf import GenPdf
from db.mysqlite import simpleToolSql
data=[{"url": "http://mp.weixin.qq.com/s?__biz=MzAxODQxMDM0Mw==&mid=2247484852&idx=1&sn=85b50b8b0470bb4897e517955f4e5002&chksm=9bd7fbbcaca072aa75e2a241064a403fde1e579d57ab846cd8537a54253ceb2c8b93cc3bf38e&scene=21#wechat_redirect", "name": "001学习算法和刷题的框架思维"}
]
# path = '***/' || ''
# for val in data:
# # print(val["url"])
# # print(val["name"])
# pdf = GenPdf()
# title = val["name"].replace("/", "-")
# print(title)
# pdf.deal(val["url"], title, '')
# sql = simpleToolSql("url")
# # sql.execute("insert into wx_article (id,name,age) values (?,?,?);",[(1,'abc',15),(2,'bca',16)])
# res = sql.query("select * from wx_article;")
# print(res)
# res = sql.query("select * from wx_article where id=?;",(3,))
# print(res)
# sql.close()
# 从 db 获取需要生成的url
def getListByTitle(title:str):
sql = simpleToolSql("url")
res = sql.query("select * from wx_article where title="+title+";")
print(res)
sql.close()
return res
# 从 db 获取需要生成的url
def getListFromSql():
sql = simpleToolSql("url")
# res = sql.query("select * from wx_article where state=0;")
res = sql.query("select * from wx_article;")
print(res)
sql.close()
return res
# 更新 db
def updateUrl(id:int):
sql = simpleToolSql("url")
res = sql.execute("update wx_article set state=1 where id = ?;",(id,))
# 需要加逗号 https://blog.csdn.net/yimaoyingbi/article/details/104323701
print(res)
sql.close()
return
def addUrl():
sql = simpleToolSql("url")
sql.execute(
"insert into wx_article (url,folder,title,state,turn,create_at,update_at) values (?,?,?,?,?,?);",
[("http",'test',"01",0,1,"2020-12-03 09:38:25","2020-12-03 09:38:25")]
)
res = sql.query("select * from wx_article;")
print(res)
sql.close()
return
# addUrl()
updateUrl(1)
res = getListFromSql()
print(res) | # -*- coding=utf-8 -*-
from zwechathihu.mypdf import GenPdf
from db.mysqlite import simpleToolSql
data=[{"url": "http://mp.weixin.qq.com/s?__biz=MzAxODQxMDM0Mw==&mid=2247484852&idx=1&sn=85b50b8b0470bb4897e517955f4e5002&chksm=9bd7fbbcaca072aa75e2a241064a403fde1e579d57ab846cd8537a54253ceb2c8b93cc3bf38e&scene=21#wechat_redirect", "name": "001学习算法和刷题的框架思维"}
]
# path = '***/' || ''
# for val in data:
# # print(val["url"])
# # print(val["name"])
# pdf = GenPdf()
# title = val["name"].replace("/", "-")
# print(title)
# pdf.deal(val["url"], title, '')
# sql = simpleToolSql("url")
# # sql.execute("insert into wx_article (id,name,age) values (?,?,?);",[(1,'abc',15),(2,'bca',16)])
# res = sql.query("select * from wx_article;")
# print(res)
# res = sql.query("select * from wx_article where id=?;",(3,))
# print(res)
# sql.close()
# 从 db 获取需要生成的url
def getListByTitle(title:str):
sql = simpleToolSql("url")
res = sql.query("select * from wx_article where title="+title+";")
print(res)
sql.close()
return res
# 从 db 获取需要生成的url
def getListFromSql():
sql = simpleToolSql("url")
# res = sql.query("select * from wx_article where state=0;")
res = sql.query("select * from wx_article;")
print(res)
sql.close()
return res
# 更新 db
def updateUrl(id:int):
sql = simpleToolSql("url")
res = sql.execute("update wx_article set state=1 where id = ?;",(id,))
# 需要加逗号 https://blog.csdn.net/yimaoyingbi/article/details/104323701
print(res)
sql.close()
return
def addUrl():
sql = simpleToolSql("url")
sql.execute(
"insert into wx_article (url,folder,title,state,turn,create_at,update_at) values (?,?,?,?,?,?);",
[("http",'test',"01",0,1,"2020-12-03 09:38:25","2020-12-03 09:38:25")]
)
res = sql.query("select * from wx_article;")
print(res)
sql.close()
return
# addUrl()
updateUrl(1)
res = getListFromSql()
print(res) | en | 0.366692 | # -*- coding=utf-8 -*- #wechat_redirect", "name": "001学习算法和刷题的框架思维"} # path = '***/' || '' # for val in data: # # print(val["url"]) # # print(val["name"]) # pdf = GenPdf() # title = val["name"].replace("/", "-") # print(title) # pdf.deal(val["url"], title, '') # sql = simpleToolSql("url") # # sql.execute("insert into wx_article (id,name,age) values (?,?,?);",[(1,'abc',15),(2,'bca',16)]) # res = sql.query("select * from wx_article;") # print(res) # res = sql.query("select * from wx_article where id=?;",(3,)) # print(res) # sql.close() # 从 db 获取需要生成的url # 从 db 获取需要生成的url # res = sql.query("select * from wx_article where state=0;") # 更新 db # 需要加逗号 https://blog.csdn.net/yimaoyingbi/article/details/104323701 # addUrl() | 2.798182 | 3 |
pipeline/validators/handlers.py | ZhuoZhuoCrayon/bk-nodeman | 31 | 8776 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.dispatch import receiver
from pipeline.core.flow.event import EndEvent
from pipeline.core.flow.signals import post_new_end_event_register
from pipeline.validators import rules
@receiver(post_new_end_event_register, sender=EndEvent)
def post_new_end_event_register_handler(sender, node_type, node_cls, **kwargs):
rules.NODE_RULES[node_type] = rules.SINK_RULE
rules.FLOW_NODES_WITHOUT_STARTEVENT.append(node_type)
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.dispatch import receiver
from pipeline.core.flow.event import EndEvent
from pipeline.core.flow.signals import post_new_end_event_register
from pipeline.validators import rules
@receiver(post_new_end_event_register, sender=EndEvent)
def post_new_end_event_register_handler(sender, node_type, node_cls, **kwargs):
rules.NODE_RULES[node_type] = rules.SINK_RULE
rules.FLOW_NODES_WITHOUT_STARTEVENT.append(node_type)
| en | 0.863967 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.57152 | 2 |
NumPy/Array Basics/Random Shuffle/tests/test_task.py | jetbrains-academy/Python-Libraries-NumPy | 0 | 8777 | import unittest
import numpy as np
from task import arr, permuted_2d, fully_random
class TestCase(unittest.TestCase):
def test_shape(self):
self.assertEqual((5, 20), arr.shape, msg="Wrong shape of the array 'arr'.")
self.assertEqual((5, 20), permuted_2d.shape, msg="Wrong shape of the array 'permuted_2d'.")
self.assertEqual((5, 20), fully_random.shape, msg="Wrong shape of the array 'fully_random'.")
def test_arr(self):
for i in arr:
# This test checks if in each row the minimum element goes first and maximum - last.
self.assertTrue(i[0] == min(i) and i[-1] == max(i), msg="'arr' should be shuffled along the 0th axis.")
def test_two_d(self):
for i in permuted_2d:
# This test checks that differences between all neighboring elements in rows of the array
# are not equal to 1 (in non-shuffled rows they would be).
self.assertFalse(all([(x - i[i.tolist().index(x) - 1]) == 1 for x in i if i.tolist().index(x) > 0]),
msg="'permuted_2d' should be shuffled along the 1st axis.")
def test_random(self):
# This test checks if elements were also randomized between the rows.
for i in fully_random:
self.assertTrue(max(i) - min(i) > 19, "'fully_random' needs to be fully shuffled.")
| import unittest
import numpy as np
from task import arr, permuted_2d, fully_random
class TestCase(unittest.TestCase):
def test_shape(self):
self.assertEqual((5, 20), arr.shape, msg="Wrong shape of the array 'arr'.")
self.assertEqual((5, 20), permuted_2d.shape, msg="Wrong shape of the array 'permuted_2d'.")
self.assertEqual((5, 20), fully_random.shape, msg="Wrong shape of the array 'fully_random'.")
def test_arr(self):
for i in arr:
# This test checks if in each row the minimum element goes first and maximum - last.
self.assertTrue(i[0] == min(i) and i[-1] == max(i), msg="'arr' should be shuffled along the 0th axis.")
def test_two_d(self):
for i in permuted_2d:
# This test checks that differences between all neighboring elements in rows of the array
# are not equal to 1 (in non-shuffled rows they would be).
self.assertFalse(all([(x - i[i.tolist().index(x) - 1]) == 1 for x in i if i.tolist().index(x) > 0]),
msg="'permuted_2d' should be shuffled along the 1st axis.")
def test_random(self):
# This test checks if elements were also randomized between the rows.
for i in fully_random:
self.assertTrue(max(i) - min(i) > 19, "'fully_random' needs to be fully shuffled.")
| en | 0.950242 | # This test checks if in each row the minimum element goes first and maximum - last. # This test checks that differences between all neighboring elements in rows of the array # are not equal to 1 (in non-shuffled rows they would be). # This test checks if elements were also randomized between the rows. | 3.429708 | 3 |
resources/lib/channelui.py | lausitzer/plugin.video.mediathekview | 0 | 8778 | # -*- coding: utf-8 -*-
"""
The channel model UI module
Copyright 2017-2018, <NAME> and <NAME>
SPDX-License-Identifier: MIT
"""
# pylint: disable=import-error
import os
import xbmcgui
import xbmcplugin
import resources.lib.mvutils as mvutils
from resources.lib.channel import Channel
class ChannelUI(Channel):
"""
The channel model view class
Args:
plugin(MediathekView): the plugin object
sortmethods(array, optional): an array of sort methods
for the directory representation. Default is
`[ xbmcplugin.SORT_METHOD_TITLE ]`
nextdir(str, optional):
"""
def __init__(self, plugin, sortmethods=None, nextdir='initial'):
super(ChannelUI, self).__init__()
self.plugin = plugin
self.handle = plugin.addon_handle
self.nextdir = nextdir
self.sortmethods = sortmethods if sortmethods is not None else [
xbmcplugin.SORT_METHOD_TITLE]
self.count = 0
def begin(self):
"""
Begin a directory containing channels
"""
for method in self.sortmethods:
xbmcplugin.addSortMethod(self.handle, method)
def add(self, altname=None):
"""
Add the current entry to the directory
Args:
altname(str, optional): alternative name for the entry
"""
resultingname = self.channel if self.count == 0 else '%s (%d)' % (
self.channel, self.count, )
list_item = xbmcgui.ListItem(
label=resultingname if altname is None else altname)
icon = os.path.join(
self.plugin.path,
'resources',
'icons',
self.channel.lower() + '-m.png'
)
list_item.setArt({
'thumb': icon,
'icon': icon
})
info_labels = {
'title': resultingname,
'sorttitle': resultingname.lower()
}
list_item.setInfo(type='video', infoLabels=info_labels)
xbmcplugin.addDirectoryItem(
handle=self.handle,
url=mvutils.build_url({
'mode': self.nextdir,
'channel': self.channelid
}),
listitem=list_item,
isFolder=True
)
def end(self):
""" Finish a directory containing channels """
xbmcplugin.endOfDirectory(self.handle)
| # -*- coding: utf-8 -*-
"""
The channel model UI module
Copyright 2017-2018, <NAME> and <NAME>
SPDX-License-Identifier: MIT
"""
# pylint: disable=import-error
import os
import xbmcgui
import xbmcplugin
import resources.lib.mvutils as mvutils
from resources.lib.channel import Channel
class ChannelUI(Channel):
"""
The channel model view class
Args:
plugin(MediathekView): the plugin object
sortmethods(array, optional): an array of sort methods
for the directory representation. Default is
`[ xbmcplugin.SORT_METHOD_TITLE ]`
nextdir(str, optional):
"""
def __init__(self, plugin, sortmethods=None, nextdir='initial'):
super(ChannelUI, self).__init__()
self.plugin = plugin
self.handle = plugin.addon_handle
self.nextdir = nextdir
self.sortmethods = sortmethods if sortmethods is not None else [
xbmcplugin.SORT_METHOD_TITLE]
self.count = 0
def begin(self):
"""
Begin a directory containing channels
"""
for method in self.sortmethods:
xbmcplugin.addSortMethod(self.handle, method)
def add(self, altname=None):
"""
Add the current entry to the directory
Args:
altname(str, optional): alternative name for the entry
"""
resultingname = self.channel if self.count == 0 else '%s (%d)' % (
self.channel, self.count, )
list_item = xbmcgui.ListItem(
label=resultingname if altname is None else altname)
icon = os.path.join(
self.plugin.path,
'resources',
'icons',
self.channel.lower() + '-m.png'
)
list_item.setArt({
'thumb': icon,
'icon': icon
})
info_labels = {
'title': resultingname,
'sorttitle': resultingname.lower()
}
list_item.setInfo(type='video', infoLabels=info_labels)
xbmcplugin.addDirectoryItem(
handle=self.handle,
url=mvutils.build_url({
'mode': self.nextdir,
'channel': self.channelid
}),
listitem=list_item,
isFolder=True
)
def end(self):
""" Finish a directory containing channels """
xbmcplugin.endOfDirectory(self.handle)
| en | 0.52842 | # -*- coding: utf-8 -*- The channel model UI module Copyright 2017-2018, <NAME> and <NAME> SPDX-License-Identifier: MIT # pylint: disable=import-error The channel model view class Args: plugin(MediathekView): the plugin object sortmethods(array, optional): an array of sort methods for the directory representation. Default is `[ xbmcplugin.SORT_METHOD_TITLE ]` nextdir(str, optional): Begin a directory containing channels Add the current entry to the directory Args: altname(str, optional): alternative name for the entry Finish a directory containing channels | 2.299552 | 2 |
getconf.py | smk762/Dragonhound | 3 | 8779 | #!/usr/bin/env python3
#Credit to @Alright for the RPCs
import re
import os
import requests
import json
import platform
# define function that fetchs rpc creds from .conf
def def_credentials(chain):
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Win64':
ac_dir = "dont have windows machine now to test"
# define config file path
if chain == 'KMD':
coin_config_file = str(ac_dir + '/komodo.conf')
else:
coin_config_file = str(ac_dir + '/' + chain + '/' + chain + '.conf')
#define rpc creds
with open(coin_config_file, 'r') as f:
#print("Reading config file for credentials:", coin_config_file)
for line in f:
l = line.rstrip()
if re.search('rpcuser', l):
rpcuser = l.replace('rpcuser=', '')
elif re.search('rpcpassword', l):
rpcpassword = l.replace('rpcpassword=', '')
elif re.search('rpcport', l):
rpcport = l.replace('rpcport=', '')
return('http://' + rpcuser + ':' + rpcpassword + '@127.0.0.1:' + rpcport)
# define function that posts json data
def post_rpc(url, payload, auth=None):
try:
r = requests.post(url, data=json.dumps(payload), auth=auth)
return(json.loads(r.text))
except Exception as e:
raise Exception("Couldn't connect to " + url + ": ", e)
# Return current -pubkey=
def getpubkey_rpc(chain):
getinfo_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "getinfo",
"params": []}
getinfo_result = post_rpc(def_credentials(chain), getinfo_payload)
return(getinfo_result['result']['pubkey'])
# return latest batontxid from all publishers
def get_latest_batontxids(chain, oracletxid):
oraclesinfo_result = oraclesinfo_rpc(chain, oracletxid)
latest_batontxids = {}
# fill "latest_batontxids" dictionary with publisher:batontxid data
for i in oraclesinfo_result['registered']:
latest_batontxids[i['publisher']] = i['batontxid']
return(latest_batontxids)
#VANILLA RPC
def sendrawtx_rpc(chain, rawtx):
sendrawtx_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "sendrawtransaction",
"params": [rawtx]}
#rpcurl = def_credentials(chain)
return(post_rpc(def_credentials(chain), sendrawtx_payload))
def signmessage_rpc(chain, address, message):
signmessage_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "signmessage",
"params": [
address,
message
]
}
signmessage_result = post_rpc(def_credentials(chain), signmessage_payload)
return(signmessage_result['result'])
def verifymessage_rpc(chain, address, signature, message):
verifymessage_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "verifymessage",
"params": [
address,
signature,
message
]
}
verifymessage_result = post_rpc(def_credentials(chain), verifymessage_payload)
return(verifymessage_result['result'])
def kvsearch_rpc(chain, key):
kvsearch_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "kvsearch",
"params": [
key
]
}
kvsearch_result = post_rpc(def_credentials(chain), kvsearch_payload)
return(kvsearch_result['result'])
def kvupdate_rpc(chain, key, value, days, password):
# create dynamic oraclessamples payload
kvupdate_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "kvupdate",
"params": [
key,
value,
str(days),
password]}
# make kvupdate rpc call
kvupdate_result = post_rpc(def_credentials(chain), kvupdate_payload)
return(kvupdate_result)
def oraclesdata_rpc(chain, oracletxid, hexstr):
oraclesdata_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesdata",
"params": [
oracletxid,
hexstr]}
oraclesdata_result = post_rpc(def_credentials(chain), oraclesdata_payload)
return(oraclesdata_result['result'])
def oraclescreate_rpc(chain, name, description, oracle_type):
oraclescreate_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclescreate",
"params": [
name,
description,
oracle_type]}
oraclescreate_result = post_rpc(def_credentials(chain), oraclescreate_payload)
return(oraclescreate_result['result'])
def oraclesinfo_rpc(chain, oracletxid):
oraclesinfo_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesinfo",
"params": [oracletxid]}
oraclesinfo_result = post_rpc(def_credentials(chain), oraclesinfo_payload)
return(oraclesinfo_result['result'])
def oracleslist_rpc(chain):
oracleslist_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oracleslist",
"params": []}
oracleslist_result = post_rpc(def_credentials(chain), oracleslist_payload)
return(oracleslist_result['result'])
def oraclessubscribe_rpc(chain, oracletxid, publisher, amount):
oraclessubscribe_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessubscribe",
"params": [oracletxid, publisher, amount]}
oraclessubscribe_result = post_rpc(def_credentials(chain), oraclessubscribe_payload)
return(oraclessubscribe_result['result'])
def oraclesregister_rpc(chain, oracletxid, datafee):
oraclesregister_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesregister",
"params": [
oracletxid,
str(datafee)]}
oraclesregister_result = post_rpc(def_credentials(chain), oraclesregister_payload)
return(oraclesregister_result['result'])
def oraclessamples_rpc(chain, oracletxid, batonutxo, num):
oraclessamples_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessamples",
"params": [
oracletxid,
batonutxo,
str(num)]}
oraclessamples_result = post_rpc(def_credentials(chain), oraclessamples_payload)
return(oraclessamples_result['result'])
def getlastsegidstakes_rpc(chain, depth):
oraclessubscribe_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessubscribe",
"params": [depth]}
getlastsegidstakes_result = post_rpc(def_credentials(chain), oraclessubscribe_payload)
return(getlastsegidstakes_result['result'])
| #!/usr/bin/env python3
#Credit to @Alright for the RPCs
import re
import os
import requests
import json
import platform
# define function that fetchs rpc creds from .conf
def def_credentials(chain):
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Win64':
ac_dir = "dont have windows machine now to test"
# define config file path
if chain == 'KMD':
coin_config_file = str(ac_dir + '/komodo.conf')
else:
coin_config_file = str(ac_dir + '/' + chain + '/' + chain + '.conf')
#define rpc creds
with open(coin_config_file, 'r') as f:
#print("Reading config file for credentials:", coin_config_file)
for line in f:
l = line.rstrip()
if re.search('rpcuser', l):
rpcuser = l.replace('rpcuser=', '')
elif re.search('rpcpassword', l):
rpcpassword = l.replace('rpcpassword=', '')
elif re.search('rpcport', l):
rpcport = l.replace('rpcport=', '')
return('http://' + rpcuser + ':' + rpcpassword + '@127.0.0.1:' + rpcport)
# define function that posts json data
def post_rpc(url, payload, auth=None):
try:
r = requests.post(url, data=json.dumps(payload), auth=auth)
return(json.loads(r.text))
except Exception as e:
raise Exception("Couldn't connect to " + url + ": ", e)
# Return current -pubkey=
def getpubkey_rpc(chain):
getinfo_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "getinfo",
"params": []}
getinfo_result = post_rpc(def_credentials(chain), getinfo_payload)
return(getinfo_result['result']['pubkey'])
# return latest batontxid from all publishers
def get_latest_batontxids(chain, oracletxid):
oraclesinfo_result = oraclesinfo_rpc(chain, oracletxid)
latest_batontxids = {}
# fill "latest_batontxids" dictionary with publisher:batontxid data
for i in oraclesinfo_result['registered']:
latest_batontxids[i['publisher']] = i['batontxid']
return(latest_batontxids)
#VANILLA RPC
def sendrawtx_rpc(chain, rawtx):
sendrawtx_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "sendrawtransaction",
"params": [rawtx]}
#rpcurl = def_credentials(chain)
return(post_rpc(def_credentials(chain), sendrawtx_payload))
def signmessage_rpc(chain, address, message):
signmessage_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "signmessage",
"params": [
address,
message
]
}
signmessage_result = post_rpc(def_credentials(chain), signmessage_payload)
return(signmessage_result['result'])
def verifymessage_rpc(chain, address, signature, message):
verifymessage_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "verifymessage",
"params": [
address,
signature,
message
]
}
verifymessage_result = post_rpc(def_credentials(chain), verifymessage_payload)
return(verifymessage_result['result'])
def kvsearch_rpc(chain, key):
kvsearch_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "kvsearch",
"params": [
key
]
}
kvsearch_result = post_rpc(def_credentials(chain), kvsearch_payload)
return(kvsearch_result['result'])
def kvupdate_rpc(chain, key, value, days, password):
# create dynamic oraclessamples payload
kvupdate_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "kvupdate",
"params": [
key,
value,
str(days),
password]}
# make kvupdate rpc call
kvupdate_result = post_rpc(def_credentials(chain), kvupdate_payload)
return(kvupdate_result)
def oraclesdata_rpc(chain, oracletxid, hexstr):
oraclesdata_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesdata",
"params": [
oracletxid,
hexstr]}
oraclesdata_result = post_rpc(def_credentials(chain), oraclesdata_payload)
return(oraclesdata_result['result'])
def oraclescreate_rpc(chain, name, description, oracle_type):
oraclescreate_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclescreate",
"params": [
name,
description,
oracle_type]}
oraclescreate_result = post_rpc(def_credentials(chain), oraclescreate_payload)
return(oraclescreate_result['result'])
def oraclesinfo_rpc(chain, oracletxid):
oraclesinfo_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesinfo",
"params": [oracletxid]}
oraclesinfo_result = post_rpc(def_credentials(chain), oraclesinfo_payload)
return(oraclesinfo_result['result'])
def oracleslist_rpc(chain):
oracleslist_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oracleslist",
"params": []}
oracleslist_result = post_rpc(def_credentials(chain), oracleslist_payload)
return(oracleslist_result['result'])
def oraclessubscribe_rpc(chain, oracletxid, publisher, amount):
oraclessubscribe_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessubscribe",
"params": [oracletxid, publisher, amount]}
oraclessubscribe_result = post_rpc(def_credentials(chain), oraclessubscribe_payload)
return(oraclessubscribe_result['result'])
def oraclesregister_rpc(chain, oracletxid, datafee):
oraclesregister_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclesregister",
"params": [
oracletxid,
str(datafee)]}
oraclesregister_result = post_rpc(def_credentials(chain), oraclesregister_payload)
return(oraclesregister_result['result'])
def oraclessamples_rpc(chain, oracletxid, batonutxo, num):
oraclessamples_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessamples",
"params": [
oracletxid,
batonutxo,
str(num)]}
oraclessamples_result = post_rpc(def_credentials(chain), oraclessamples_payload)
return(oraclessamples_result['result'])
def getlastsegidstakes_rpc(chain, depth):
oraclessubscribe_payload = {
"jsonrpc": "1.0",
"id": "python",
"method": "oraclessubscribe",
"params": [depth]}
getlastsegidstakes_result = post_rpc(def_credentials(chain), oraclessubscribe_payload)
return(getlastsegidstakes_result['result'])
| en | 0.441511 | #!/usr/bin/env python3 #Credit to @Alright for the RPCs # define function that fetchs rpc creds from .conf # define config file path #define rpc creds #print("Reading config file for credentials:", coin_config_file) # define function that posts json data # Return current -pubkey= # return latest batontxid from all publishers # fill "latest_batontxids" dictionary with publisher:batontxid data #VANILLA RPC #rpcurl = def_credentials(chain) # create dynamic oraclessamples payload # make kvupdate rpc call | 2.527189 | 3 |
cwr/parser/decoder/dictionary.py | orenyodfat/CWR-DataApi | 37 | 8780 | # -*- coding: utf-8 -*-
from cwr.acknowledgement import AcknowledgementRecord, MessageRecord
from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, \
InterestedPartyForAgreementRecord
from cwr.group import Group, GroupHeader, GroupTrailer
from cwr.info import AdditionalRelatedInfoRecord
from cwr.parser.decoder.common import Decoder
from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, \
PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord
from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, \
NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, \
NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, \
NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord
from cwr.transmission import Transmission, TransmissionTrailer, \
TransmissionHeader
from cwr.work import RecordingDetailRecord, ComponentRecord, \
AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, \
InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, \
WorkRecord
from cwr.file import CWRFile, FileTag
from cwr.other import AVIKey, VISAN
from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue
"""
Classes for transforming dictionaries into instances of the CWR model.
There is a decoder for each of the model classes, and all of them expect a
dictionary having at least one key for each field, having the same name as the
field, which will refer to a valid value.
As said, the values on the dictionary should be valid values, for example if
an integer is expected, then the dictionary contains an integer. The values
contained in the dictionary entries should not need to be parsed.
These decoders are useful for handling JSON transmissions or Mongo databases.
"""
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'Development'
class TransactionRecordDictionaryDecoder(Decoder):
def __init__(self):
super(TransactionRecordDictionaryDecoder, self).__init__()
self._decoders = {}
self._decoders['ACK'] = AcknowledgementDictionaryDecoder()
self._decoders['AGR'] = AgreementDictionaryDecoder()
self._decoders['TER'] = AgreementTerritoryDictionaryDecoder()
self._decoders['ARI'] = AdditionalRelatedInformationDictionaryDecoder()
self._decoders['ALT'] = AlternateTitleDictionaryDecoder()
self._decoders['EWT'] = AuthoredWorkDictionaryDecoder()
self._decoders['VER'] = AuthoredWorkDictionaryDecoder()
self._decoders['COM'] = ComponentDictionaryDecoder()
self._decoders['IPA'] = InterestedPartyForAgreementDictionaryDecoder()
self._decoders['SPT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['SWT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['IND'] = InstrumentationDetailDictionaryDecoder()
self._decoders['INS'] = InstrumentationSummaryDictionaryDecoder()
self._decoders['MSG'] = MessageDictionaryDecoder()
self._decoders['PER'] = PerformingArtistDictionaryDecoder()
self._decoders['PWR'] = PublisherForWriterDictionaryDecoder()
self._decoders['REC'] = RecordingDetailDictionaryDecoder()
self._decoders['EXC'] = WorkDictionaryDecoder()
self._decoders['ISW'] = WorkDictionaryDecoder()
self._decoders['NWR'] = WorkDictionaryDecoder()
self._decoders['REV'] = WorkDictionaryDecoder()
self._decoders['ORN'] = WorkOriginDictionaryDecoder()
self._decoders['SWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders[
'NPA'] = NonRomanAlphabetAgreementPartyDictionaryDecoder()
self._decoders['NOW'] = NonRomanAlphabetOtherWriterDictionaryDecoder()
self._decoders[
'NPR'] = NonRomanAlphabetPerformanceDataDictionaryDecoder()
self._decoders['NPN'] = NonRomanAlphabetPublisherNameDictionaryDecoder()
self._decoders['NAT'] = NonRomanAlphabetTitleDictionaryDecoder()
self._decoders['NET'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NCT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NVT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NWN'] = NonRomanAlphabetWriterNameDictionaryDecoder()
self._decoders['SPU'] = PublisherRecordDictionaryDecoder()
self._decoders['OPU'] = PublisherRecordDictionaryDecoder()
def decode(self, data):
return self._decoders[data['record_type']].decode(data)
class AcknowledgementDictionaryDecoder(Decoder):
def __init__(self):
super(AcknowledgementDictionaryDecoder, self).__init__()
def decode(self, data):
return AcknowledgementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
original_group_id=data[
'original_group_id'],
original_transaction_sequence_n=data[
'original_transaction_sequence_n'],
original_transaction_type=data[
'original_transaction_type'],
transaction_status=data[
'transaction_status'],
creation_date_time=data[
'creation_date_time'],
processing_date=data['processing_date'],
creation_title=data['creation_title'],
submitter_creation_n=data[
'submitter_creation_n'],
recipient_creation_n=data[
'recipient_creation_n'])
class AgreementDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
agreement_type=data['agreement_type'],
agreement_start_date=data[
'agreement_start_date'],
prior_royalty_status=data[
'prior_royalty_status'],
post_term_collection_status=data[
'post_term_collection_status'],
number_of_works=data['number_of_works'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'],
international_standard_code=data[
'international_standard_code'],
sales_manufacture_clause=data[
'sales_manufacture_clause'],
agreement_end_date=data['agreement_end_date'],
date_of_signature=data['date_of_signature'],
retention_end_date=data['retention_end_date'],
prior_royalty_start_date=data[
'prior_royalty_start_date'],
post_term_collection_end_date=data[
'post_term_collection_end_date'],
shares_change=data['shares_change'],
advance_given=data['advance_given'])
class AgreementTerritoryDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementTerritoryDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementTerritoryRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
tis_numeric_code=data[
'tis_numeric_code'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'])
class AdditionalRelatedInformationDictionaryDecoder(Decoder):
def __init__(self):
super(AdditionalRelatedInformationDictionaryDecoder, self).__init__()
def decode(self, data):
return AdditionalRelatedInfoRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
society_n=data['society_n'],
type_of_right=data['type_of_right'],
work_n=data['work_n'],
subject_code=data['subject_code'],
note=data['note'])
class AlternateTitleDictionaryDecoder(Decoder):
def __init__(self):
super(AlternateTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return AlternateTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
alternate_title=data['alternate_title'],
title_type=data['title_type'],
language_code=data['language_code'])
class AuthoredWorkDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(AuthoredWorkDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data[
'writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data[
'writer_2_ipi_base_n'])
return AuthoredWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_first_name=data[
'writer_1_first_name'],
writer_1_last_name=data['writer_1_last_name'],
writer_2_first_name=data[
'writer_2_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data[
'writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data[
'writer_2_ipi_name_n'],
source=data['source'],
language_code=data['language_code'],
iswc=data['iswc'])
class ComponentDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(ComponentDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data['writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data['writer_2_ipi_base_n'])
return ComponentRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_last_name=data['writer_1_last_name'],
writer_1_first_name=data['writer_1_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_2_first_name=data['writer_2_first_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data['writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data['writer_2_ipi_name_n'],
iswc=data['iswc'],
duration=data['duration'])
class GroupHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(GroupHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
return GroupHeader(record_type=data['record_type'],
group_id=data['group_id'],
transaction_type=data['transaction_type'],
version_number=data['version_number'],
batch_request_id=data['batch_request_id'])
class GroupTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(GroupTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
total_monetary_value = None
if 'total_monetary_value' in data:
total_monetary_value = data['total_monetary_value']
currency_indicator = None
if 'currency_indicator' in data:
currency_indicator = data['currency_indicator']
return GroupTrailer(record_type=data['record_type'],
group_id=data['group_id'],
transaction_count=data['transaction_count'],
record_count=data['record_count'],
currency_indicator=currency_indicator,
total_monetary_value=total_monetary_value,
)
class InterestedPartyForAgreementDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(InterestedPartyForAgreementDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
return InterestedPartyForAgreementRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_n=data['ip_n'],
ip_last_name=data['ip_last_name'],
agreement_role_code=data['agreement_role_code'],
ip_writer_first_name=data['ip_writer_first_name'],
ipi_name_n=data['ipi_name_n'], ipi_base_n=ipi_base,
pr_society=data['pr_society'], pr_share=data['pr_share'],
mr_society=data['mr_society'], mr_share=data['mr_share'],
sr_society=data['sr_society'], sr_share=data['sr_share'])
class IPTerritoryOfControlDictionaryDecoder(Decoder):
def __init__(self):
super(IPTerritoryOfControlDictionaryDecoder, self).__init__()
def decode(self, data):
record = IPTerritoryOfControlRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
ip_n=data['ip_n'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'],
tis_numeric_code=data[
'tis_numeric_code'],
sequence_n=data['sequence_n'],
pr_collection_share=data[
'pr_collection_share'],
mr_collection_share=data[
'mr_collection_share'],
shares_change=data['shares_change'])
if 'sr_collection_share' in data:
record.sr_collection_share = data['sr_collection_share']
return record
class InstrumentationDetailDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationDetailDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
instrument_code=data[
'instrument_code'],
number_players=data[
'number_players'])
class InstrumentationSummaryDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationSummaryDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationSummaryRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
number_voices=data['number_voices'],
standard_instrumentation_type=data['standard_instrumentation_type'],
instrumentation_description=data['instrumentation_description'])
class MessageDictionaryDecoder(Decoder):
def __init__(self):
super(MessageDictionaryDecoder, self).__init__()
def decode(self, data):
return MessageRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
message_type=data['message_type'],
message_text=data['message_text'],
original_record_sequence_n=data[
'original_record_sequence_n'],
message_record_type=data['message_record_type'],
message_level=data['message_level'],
validation_n=data['validation_n'])
class PerformingArtistDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PerformingArtistDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = None
if 'performing_artist_ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['performing_artist_ipi_base_n'])
performing_artist_first_name = None
if 'performing_artist_first_name' in data:
performing_artist_first_name = data['performing_artist_first_name']
performing_artist_ipi_name_n = None
if 'performing_artist_ipi_name_n' in data:
performing_artist_ipi_name_n = data['performing_artist_ipi_name_n']
return PerformingArtistRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
performing_artist_last_name=data[
'performing_artist_last_name'],
performing_artist_first_name=performing_artist_first_name,
performing_artist_ipi_name_n=performing_artist_ipi_name_n,
performing_artist_ipi_base_n=ipi_base)
class PublisherForWriterDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherForWriterDictionaryDecoder, self).__init__()
def decode(self, data):
publisher_name = None
if 'publisher_name' in data:
publisher_name = data['publisher_name']
return PublisherForWriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
publisher_ip_n=data['publisher_ip_n'],
publisher_name=publisher_name,
writer_ip_n=data['writer_ip_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'])
class RecordingDetailDictionaryDecoder(Decoder):
def __init__(self):
super(RecordingDetailDictionaryDecoder, self).__init__()
def decode(self, data):
media_type = None
if 'media_type' in data:
media_type = data['media_type']
return RecordingDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
first_release_date=data[
'first_release_date'],
first_release_duration=data[
'first_release_duration'],
first_album_title=data[
'first_album_title'],
first_album_label=data[
'first_album_label'],
first_release_catalog_n=data[
'first_release_catalog_n'],
ean=data['ean'],
isrc=data['isrc'],
recording_format=data['recording_format'],
recording_technique=data[
'recording_technique'],
media_type=media_type)
class FileDictionaryDecoder(Decoder):
def __init__(self):
super(FileDictionaryDecoder, self).__init__()
self._tag_decoder = FileTagDictionaryDecoder()
self._transmission_decoder = TransmissionDictionaryDecoder()
def decode(self, data):
tag = data['tag']
if isinstance(tag, dict):
tag = self._tag_decoder.decode(tag)
transmission = data['transmission']
if isinstance(transmission, dict):
transmission = self._transmission_decoder.decode(transmission)
return CWRFile(tag, transmission)
class TransmissionDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionDictionaryDecoder, self).__init__()
self._header_decoder = TransmissionHeaderDictionaryDecoder()
self._trailer_decoder = TransmissionTrailerDictionaryDecoder()
self._group_decoder = GroupDictionaryDecoder()
def decode(self, data):
header = data['header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
groups = []
if len(data['groups']) > 0:
if isinstance(data['groups'][0], dict):
for group in data['groups']:
groups.append(self._group_decoder.decode(group))
else:
groups = data['groups']
return Transmission(header, trailer, groups)
class GroupDictionaryDecoder(Decoder):
def __init__(self):
super(GroupDictionaryDecoder, self).__init__()
self._header_decoder = GroupHeaderDictionaryDecoder()
self._trailer_decoder = GroupTrailerDictionaryDecoder()
self._transaction_decoder = TransactionRecordDictionaryDecoder()
def decode(self, data):
header = data['group_header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['group_trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
transactions = []
if len(data['transactions']) > 0:
if isinstance(data['transactions'][0][0], dict):
for transaction in data['transactions']:
transaction_records = []
for record in transaction:
transaction_records.append(
self._transaction_decoder.decode(record))
transactions.append(transaction_records)
else:
transactions = data['transactions']
return Group(header, trailer, transactions)
class TransmissionHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
header = TransmissionHeader(record_type=data['record_type'],
sender_id=data['sender_id'],
sender_name=data['sender_name'],
sender_type=data['sender_type'],
creation_date_time=data[
'creation_date_time'],
transmission_date=data['transmission_date'],
edi_standard=data['edi_standard'])
if 'character_set' in data:
header.character_set = data['character_set']
return header
class TransmissionTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
return TransmissionTrailer(record_type=data['record_type'],
group_count=data['group_count'],
transaction_count=data['transaction_count'],
record_count=data['record_count'])
class WorkDictionaryDecoder(Decoder):
def __init__(self):
super(WorkDictionaryDecoder, self).__init__()
def decode(self, data):
catalogue_number = None
if 'catalogue_number' in data:
catalogue_number = data['catalogue_number']
exceptional_clause = None
if 'exceptional_clause' in data:
exceptional_clause = data['exceptional_clause']
opus_number = None
if 'opus_number' in data:
opus_number = data['opus_number']
priority_flag = None
if 'priority_flag' in data:
priority_flag = data['priority_flag']
return WorkRecord(record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_work_n=data['submitter_work_n'],
title=data['title'],
version_type=data['version_type'],
musical_work_distribution_category=data[
'musical_work_distribution_category'],
date_publication_printed_edition=data[
'date_publication_printed_edition'],
text_music_relationship=data[
'text_music_relationship'],
language_code=data['language_code'],
copyright_number=data['copyright_number'],
copyright_date=data['copyright_date'],
music_arrangement=data['music_arrangement'],
lyric_adaptation=data['lyric_adaptation'],
excerpt_type=data['excerpt_type'],
composite_type=data['composite_type'],
composite_component_count=data[
'composite_component_count'],
iswc=data['iswc'],
work_type=data['work_type'],
duration=data['duration'],
catalogue_number=catalogue_number,
opus_number=opus_number,
contact_id=data['contact_id'],
contact_name=data['contact_name'],
recorded_indicator=data['recorded_indicator'],
priority_flag=priority_flag,
exceptional_clause=exceptional_clause,
grand_rights_indicator=data['grand_rights_indicator'])
class WorkOriginDictionaryDecoder(Decoder):
def __init__(self):
super(WorkOriginDictionaryDecoder, self).__init__()
def decode(self, data):
return WorkOriginRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
intended_purpose=data['intended_purpose'],
production_title=data['production_title'],
cd_identifier=data['cd_identifier'],
cut_number=data['cut_number'],
library=data['library'],
bltvr=data['bltvr'],
visan=data['visan'],
production_n=data['production_n'],
episode_title=data['episode_title'],
episode_n=data['episode_n'],
year_production=data['year_production'],
audio_visual_key=data['audio_visual_key'])
class WriterDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(WriterDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_n = self._ipi_base_decoder.decode(data['ipi_base_n'])
return Writer(ip_n=data['ip_n'],
personal_number=data['personal_number'],
ipi_base_n=ipi_base_n,
writer_first_name=data['writer_first_name'],
writer_last_name=data['writer_last_name'],
tax_id=data['tax_id'],
ipi_name_n=data['ipi_name_n'])
class WriterRecordDictionaryDecoder(Decoder):
def __init__(self):
super(WriterRecordDictionaryDecoder, self).__init__()
self._writer_decoder = WriterDictionaryDecoder()
def decode(self, data):
writer = self._writer_decoder.decode(data['writer'])
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
return WriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer=writer,
writer_designation=data['writer_designation'],
work_for_hire=data['work_for_hire'],
writer_unknown=data['writer_unknown'],
reversionary=data['reversionary'],
first_recording_refusal=data[
'first_recording_refusal'],
usa_license=usa_license,
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'])
class NonRomanAlphabetAgreementPartyDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetAgreementPartyDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetAgreementPartyRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_name=data['ip_name'],
ip_writer_name=data['ip_writer_name'],
ip_n=data['ip_n'],
language_code=data['language_code'])
class NonRomanAlphabetOtherWriterDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetOtherWriterDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetOtherWriterRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer_first_name=data['writer_first_name'],
writer_name=data['writer_name'],
position=data['position'],
language_code=data['language_code'])
class NonRomanAlphabetPerformanceDataDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(NonRomanAlphabetPerformanceDataDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(
data['performing_artist_ipi_base_n'])
return NonRomanAlphabetPerformanceDataRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
performing_artist_first_name=data['performing_artist_first_name'],
performing_artist_name=data['performing_artist_name'],
performing_artist_ipi_name_n=data['performing_artist_ipi_name_n'],
performing_artist_ipi_base_n=ipi_base,
language_code=data['language_code'],
performance_language=data['performance_language'],
performance_dialect=data['performance_dialect'])
class NonRomanAlphabetPublisherNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetPublisherNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetPublisherNameRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher_sequence_n=data['publisher_sequence_n'],
ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
language_code=data['language_code'])
class NonRomanAlphabetTitleDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
title_type=data['title_type'],
language_code=data['language_code'])
class NonRomanAlphabetWorkDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWorkDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
language_code=data['language_code'])
class NonRomanAlphabetWriterNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWriterNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWriterNameRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
writer_first_name=data[
'writer_first_name'],
writer_last_name=data[
'writer_last_name'],
ip_n=data['ip_n'],
language_code=data[
'language_code'])
class PublisherDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PublisherDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
if 'ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
else:
ipi_base = None
return Publisher(ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
ipi_name_n=data['ipi_name_n'],
ipi_base_n=ipi_base,
tax_id=data['tax_id'])
class PublisherRecordDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherRecordDictionaryDecoder, self).__init__()
self._publisher_decoder = PublisherDictionaryDecoder()
def decode(self, data):
publisher = self._publisher_decoder.decode(data['publisher'])
special_agreements = None
if 'special_agreements' in data:
special_agreements = data['special_agreements']
first_recording_refusal = None
if 'first_recording_refusal' in data:
first_recording_refusal = data['first_recording_refusal']
agreement_type = None
if 'agreement_type' in data:
agreement_type = data['agreement_type']
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
international_standard_code = None
if 'international_standard_code' in data:
international_standard_code = data['international_standard_code']
society_assigned_agreement_n = None
if 'society_assigned_agreement_n' in data:
society_assigned_agreement_n = data['society_assigned_agreement_n']
return PublisherRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher=publisher,
publisher_sequence_n=data['publisher_sequence_n'],
submitter_agreement_n=data['submitter_agreement_n'],
publisher_type=data['publisher_type'],
publisher_unknown=data['publisher_unknown'],
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'],
special_agreements=special_agreements,
first_recording_refusal=first_recording_refusal,
international_standard_code=international_standard_code,
society_assigned_agreement_n=society_assigned_agreement_n,
agreement_type=agreement_type,
usa_license=usa_license)
class TableValueDictionaryDecoder(Decoder):
def __init__(self):
super(TableValueDictionaryDecoder, self).__init__()
def decode(self, data):
return TableValue(code=data['code'],
name=data['name'],
description=data['description'])
class MediaTypeValueDictionaryDecoder(Decoder):
def __init__(self):
super(MediaTypeValueDictionaryDecoder, self).__init__()
def decode(self, data):
return MediaTypeValue(code=data['code'],
name=data['name'],
media_type=data['media_type'],
duration_max=data['duration_max'],
works_max=data['works_max'],
fragments_max=data['fragments_max'])
class InstrumentValueDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentValueDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentValue(code=data['code'],
name=data['name'],
family=data['family'],
description=data['description'])
class FileTagDictionaryDecoder(Decoder):
def __init__(self):
super(FileTagDictionaryDecoder, self).__init__()
def decode(self, data):
return FileTag(data['year'],
data['sequence_n'],
data['sender'],
data['receiver'],
data['version'])
class AVIKeyDictionaryDecoder(Decoder):
def __init__(self):
super(AVIKeyDictionaryDecoder, self).__init__()
def decode(self, data):
return AVIKey(data['society_code'],
data['av_number'])
class IPIBaseDictionaryDecoder(Decoder):
def __init__(self):
super(IPIBaseDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class ISWCDictionaryDecoder(Decoder):
def __init__(self):
super(ISWCDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class VISANDictionaryDecoder(Decoder):
def __init__(self):
super(VISANDictionaryDecoder, self).__init__()
def decode(self, data):
return data
| # -*- coding: utf-8 -*-
from cwr.acknowledgement import AcknowledgementRecord, MessageRecord
from cwr.agreement import AgreementRecord, AgreementTerritoryRecord, \
InterestedPartyForAgreementRecord
from cwr.group import Group, GroupHeader, GroupTrailer
from cwr.info import AdditionalRelatedInfoRecord
from cwr.parser.decoder.common import Decoder
from cwr.interested_party import IPTerritoryOfControlRecord, Publisher, \
PublisherRecord, Writer, PublisherForWriterRecord, WriterRecord
from cwr.non_roman_alphabet import NonRomanAlphabetAgreementPartyRecord, \
NonRomanAlphabetOtherWriterRecord, NonRomanAlphabetPerformanceDataRecord, \
NonRomanAlphabetPublisherNameRecord, NonRomanAlphabetTitleRecord, \
NonRomanAlphabetWorkRecord, NonRomanAlphabetWriterNameRecord
from cwr.transmission import Transmission, TransmissionTrailer, \
TransmissionHeader
from cwr.work import RecordingDetailRecord, ComponentRecord, \
AlternateTitleRecord, AuthoredWorkRecord, InstrumentationDetailRecord, \
InstrumentationSummaryRecord, PerformingArtistRecord, WorkOriginRecord, \
WorkRecord
from cwr.file import CWRFile, FileTag
from cwr.other import AVIKey, VISAN
from cwr.table_value import MediaTypeValue, TableValue, InstrumentValue
"""
Classes for transforming dictionaries into instances of the CWR model.
There is a decoder for each of the model classes, and all of them expect a
dictionary having at least one key for each field, having the same name as the
field, which will refer to a valid value.
As said, the values on the dictionary should be valid values, for example if
an integer is expected, then the dictionary contains an integer. The values
contained in the dictionary entries should not need to be parsed.
These decoders are useful for handling JSON transmissions or Mongo databases.
"""
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'Development'
class TransactionRecordDictionaryDecoder(Decoder):
def __init__(self):
super(TransactionRecordDictionaryDecoder, self).__init__()
self._decoders = {}
self._decoders['ACK'] = AcknowledgementDictionaryDecoder()
self._decoders['AGR'] = AgreementDictionaryDecoder()
self._decoders['TER'] = AgreementTerritoryDictionaryDecoder()
self._decoders['ARI'] = AdditionalRelatedInformationDictionaryDecoder()
self._decoders['ALT'] = AlternateTitleDictionaryDecoder()
self._decoders['EWT'] = AuthoredWorkDictionaryDecoder()
self._decoders['VER'] = AuthoredWorkDictionaryDecoder()
self._decoders['COM'] = ComponentDictionaryDecoder()
self._decoders['IPA'] = InterestedPartyForAgreementDictionaryDecoder()
self._decoders['SPT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['SWT'] = IPTerritoryOfControlDictionaryDecoder()
self._decoders['IND'] = InstrumentationDetailDictionaryDecoder()
self._decoders['INS'] = InstrumentationSummaryDictionaryDecoder()
self._decoders['MSG'] = MessageDictionaryDecoder()
self._decoders['PER'] = PerformingArtistDictionaryDecoder()
self._decoders['PWR'] = PublisherForWriterDictionaryDecoder()
self._decoders['REC'] = RecordingDetailDictionaryDecoder()
self._decoders['EXC'] = WorkDictionaryDecoder()
self._decoders['ISW'] = WorkDictionaryDecoder()
self._decoders['NWR'] = WorkDictionaryDecoder()
self._decoders['REV'] = WorkDictionaryDecoder()
self._decoders['ORN'] = WorkOriginDictionaryDecoder()
self._decoders['SWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders['OWR'] = WriterRecordDictionaryDecoder()
self._decoders[
'NPA'] = NonRomanAlphabetAgreementPartyDictionaryDecoder()
self._decoders['NOW'] = NonRomanAlphabetOtherWriterDictionaryDecoder()
self._decoders[
'NPR'] = NonRomanAlphabetPerformanceDataDictionaryDecoder()
self._decoders['NPN'] = NonRomanAlphabetPublisherNameDictionaryDecoder()
self._decoders['NAT'] = NonRomanAlphabetTitleDictionaryDecoder()
self._decoders['NET'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NCT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NVT'] = NonRomanAlphabetWorkDictionaryDecoder()
self._decoders['NWN'] = NonRomanAlphabetWriterNameDictionaryDecoder()
self._decoders['SPU'] = PublisherRecordDictionaryDecoder()
self._decoders['OPU'] = PublisherRecordDictionaryDecoder()
def decode(self, data):
return self._decoders[data['record_type']].decode(data)
class AcknowledgementDictionaryDecoder(Decoder):
def __init__(self):
super(AcknowledgementDictionaryDecoder, self).__init__()
def decode(self, data):
return AcknowledgementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
original_group_id=data[
'original_group_id'],
original_transaction_sequence_n=data[
'original_transaction_sequence_n'],
original_transaction_type=data[
'original_transaction_type'],
transaction_status=data[
'transaction_status'],
creation_date_time=data[
'creation_date_time'],
processing_date=data['processing_date'],
creation_title=data['creation_title'],
submitter_creation_n=data[
'submitter_creation_n'],
recipient_creation_n=data[
'recipient_creation_n'])
class AgreementDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
agreement_type=data['agreement_type'],
agreement_start_date=data[
'agreement_start_date'],
prior_royalty_status=data[
'prior_royalty_status'],
post_term_collection_status=data[
'post_term_collection_status'],
number_of_works=data['number_of_works'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'],
international_standard_code=data[
'international_standard_code'],
sales_manufacture_clause=data[
'sales_manufacture_clause'],
agreement_end_date=data['agreement_end_date'],
date_of_signature=data['date_of_signature'],
retention_end_date=data['retention_end_date'],
prior_royalty_start_date=data[
'prior_royalty_start_date'],
post_term_collection_end_date=data[
'post_term_collection_end_date'],
shares_change=data['shares_change'],
advance_given=data['advance_given'])
class AgreementTerritoryDictionaryDecoder(Decoder):
def __init__(self):
super(AgreementTerritoryDictionaryDecoder, self).__init__()
def decode(self, data):
return AgreementTerritoryRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
tis_numeric_code=data[
'tis_numeric_code'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'])
class AdditionalRelatedInformationDictionaryDecoder(Decoder):
def __init__(self):
super(AdditionalRelatedInformationDictionaryDecoder, self).__init__()
def decode(self, data):
return AdditionalRelatedInfoRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
society_n=data['society_n'],
type_of_right=data['type_of_right'],
work_n=data['work_n'],
subject_code=data['subject_code'],
note=data['note'])
class AlternateTitleDictionaryDecoder(Decoder):
def __init__(self):
super(AlternateTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return AlternateTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
alternate_title=data['alternate_title'],
title_type=data['title_type'],
language_code=data['language_code'])
class AuthoredWorkDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(AuthoredWorkDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data[
'writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data[
'writer_2_ipi_base_n'])
return AuthoredWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_first_name=data[
'writer_1_first_name'],
writer_1_last_name=data['writer_1_last_name'],
writer_2_first_name=data[
'writer_2_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data[
'writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data[
'writer_2_ipi_name_n'],
source=data['source'],
language_code=data['language_code'],
iswc=data['iswc'])
class ComponentDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(ComponentDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_1 = self._ipi_base_decoder.decode(data['writer_1_ipi_base_n'])
ipi_base_2 = self._ipi_base_decoder.decode(data['writer_2_ipi_base_n'])
return ComponentRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
title=data['title'],
submitter_work_n=data['submitter_work_n'],
writer_1_last_name=data['writer_1_last_name'],
writer_1_first_name=data['writer_1_first_name'],
writer_2_last_name=data['writer_2_last_name'],
writer_2_first_name=data['writer_2_first_name'],
writer_1_ipi_base_n=ipi_base_1,
writer_1_ipi_name_n=data['writer_1_ipi_name_n'],
writer_2_ipi_base_n=ipi_base_2,
writer_2_ipi_name_n=data['writer_2_ipi_name_n'],
iswc=data['iswc'],
duration=data['duration'])
class GroupHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(GroupHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
return GroupHeader(record_type=data['record_type'],
group_id=data['group_id'],
transaction_type=data['transaction_type'],
version_number=data['version_number'],
batch_request_id=data['batch_request_id'])
class GroupTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(GroupTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
total_monetary_value = None
if 'total_monetary_value' in data:
total_monetary_value = data['total_monetary_value']
currency_indicator = None
if 'currency_indicator' in data:
currency_indicator = data['currency_indicator']
return GroupTrailer(record_type=data['record_type'],
group_id=data['group_id'],
transaction_count=data['transaction_count'],
record_count=data['record_count'],
currency_indicator=currency_indicator,
total_monetary_value=total_monetary_value,
)
class InterestedPartyForAgreementDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(InterestedPartyForAgreementDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
return InterestedPartyForAgreementRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_n=data['ip_n'],
ip_last_name=data['ip_last_name'],
agreement_role_code=data['agreement_role_code'],
ip_writer_first_name=data['ip_writer_first_name'],
ipi_name_n=data['ipi_name_n'], ipi_base_n=ipi_base,
pr_society=data['pr_society'], pr_share=data['pr_share'],
mr_society=data['mr_society'], mr_share=data['mr_share'],
sr_society=data['sr_society'], sr_share=data['sr_share'])
class IPTerritoryOfControlDictionaryDecoder(Decoder):
def __init__(self):
super(IPTerritoryOfControlDictionaryDecoder, self).__init__()
def decode(self, data):
record = IPTerritoryOfControlRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
ip_n=data['ip_n'],
inclusion_exclusion_indicator=data[
'inclusion_exclusion_indicator'],
tis_numeric_code=data[
'tis_numeric_code'],
sequence_n=data['sequence_n'],
pr_collection_share=data[
'pr_collection_share'],
mr_collection_share=data[
'mr_collection_share'],
shares_change=data['shares_change'])
if 'sr_collection_share' in data:
record.sr_collection_share = data['sr_collection_share']
return record
class InstrumentationDetailDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationDetailDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
instrument_code=data[
'instrument_code'],
number_players=data[
'number_players'])
class InstrumentationSummaryDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentationSummaryDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentationSummaryRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
number_voices=data['number_voices'],
standard_instrumentation_type=data['standard_instrumentation_type'],
instrumentation_description=data['instrumentation_description'])
class MessageDictionaryDecoder(Decoder):
def __init__(self):
super(MessageDictionaryDecoder, self).__init__()
def decode(self, data):
return MessageRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
message_type=data['message_type'],
message_text=data['message_text'],
original_record_sequence_n=data[
'original_record_sequence_n'],
message_record_type=data['message_record_type'],
message_level=data['message_level'],
validation_n=data['validation_n'])
class PerformingArtistDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PerformingArtistDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = None
if 'performing_artist_ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['performing_artist_ipi_base_n'])
performing_artist_first_name = None
if 'performing_artist_first_name' in data:
performing_artist_first_name = data['performing_artist_first_name']
performing_artist_ipi_name_n = None
if 'performing_artist_ipi_name_n' in data:
performing_artist_ipi_name_n = data['performing_artist_ipi_name_n']
return PerformingArtistRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
performing_artist_last_name=data[
'performing_artist_last_name'],
performing_artist_first_name=performing_artist_first_name,
performing_artist_ipi_name_n=performing_artist_ipi_name_n,
performing_artist_ipi_base_n=ipi_base)
class PublisherForWriterDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherForWriterDictionaryDecoder, self).__init__()
def decode(self, data):
publisher_name = None
if 'publisher_name' in data:
publisher_name = data['publisher_name']
return PublisherForWriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
publisher_ip_n=data['publisher_ip_n'],
publisher_name=publisher_name,
writer_ip_n=data['writer_ip_n'],
submitter_agreement_n=data[
'submitter_agreement_n'],
society_assigned_agreement_n=data[
'society_assigned_agreement_n'])
class RecordingDetailDictionaryDecoder(Decoder):
def __init__(self):
super(RecordingDetailDictionaryDecoder, self).__init__()
def decode(self, data):
media_type = None
if 'media_type' in data:
media_type = data['media_type']
return RecordingDetailRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
first_release_date=data[
'first_release_date'],
first_release_duration=data[
'first_release_duration'],
first_album_title=data[
'first_album_title'],
first_album_label=data[
'first_album_label'],
first_release_catalog_n=data[
'first_release_catalog_n'],
ean=data['ean'],
isrc=data['isrc'],
recording_format=data['recording_format'],
recording_technique=data[
'recording_technique'],
media_type=media_type)
class FileDictionaryDecoder(Decoder):
def __init__(self):
super(FileDictionaryDecoder, self).__init__()
self._tag_decoder = FileTagDictionaryDecoder()
self._transmission_decoder = TransmissionDictionaryDecoder()
def decode(self, data):
tag = data['tag']
if isinstance(tag, dict):
tag = self._tag_decoder.decode(tag)
transmission = data['transmission']
if isinstance(transmission, dict):
transmission = self._transmission_decoder.decode(transmission)
return CWRFile(tag, transmission)
class TransmissionDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionDictionaryDecoder, self).__init__()
self._header_decoder = TransmissionHeaderDictionaryDecoder()
self._trailer_decoder = TransmissionTrailerDictionaryDecoder()
self._group_decoder = GroupDictionaryDecoder()
def decode(self, data):
header = data['header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
groups = []
if len(data['groups']) > 0:
if isinstance(data['groups'][0], dict):
for group in data['groups']:
groups.append(self._group_decoder.decode(group))
else:
groups = data['groups']
return Transmission(header, trailer, groups)
class GroupDictionaryDecoder(Decoder):
def __init__(self):
super(GroupDictionaryDecoder, self).__init__()
self._header_decoder = GroupHeaderDictionaryDecoder()
self._trailer_decoder = GroupTrailerDictionaryDecoder()
self._transaction_decoder = TransactionRecordDictionaryDecoder()
def decode(self, data):
header = data['group_header']
if isinstance(header, dict):
header = self._header_decoder.decode(header)
trailer = data['group_trailer']
if isinstance(trailer, dict):
trailer = self._trailer_decoder.decode(trailer)
transactions = []
if len(data['transactions']) > 0:
if isinstance(data['transactions'][0][0], dict):
for transaction in data['transactions']:
transaction_records = []
for record in transaction:
transaction_records.append(
self._transaction_decoder.decode(record))
transactions.append(transaction_records)
else:
transactions = data['transactions']
return Group(header, trailer, transactions)
class TransmissionHeaderDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionHeaderDictionaryDecoder, self).__init__()
def decode(self, data):
header = TransmissionHeader(record_type=data['record_type'],
sender_id=data['sender_id'],
sender_name=data['sender_name'],
sender_type=data['sender_type'],
creation_date_time=data[
'creation_date_time'],
transmission_date=data['transmission_date'],
edi_standard=data['edi_standard'])
if 'character_set' in data:
header.character_set = data['character_set']
return header
class TransmissionTrailerDictionaryDecoder(Decoder):
def __init__(self):
super(TransmissionTrailerDictionaryDecoder, self).__init__()
def decode(self, data):
return TransmissionTrailer(record_type=data['record_type'],
group_count=data['group_count'],
transaction_count=data['transaction_count'],
record_count=data['record_count'])
class WorkDictionaryDecoder(Decoder):
def __init__(self):
super(WorkDictionaryDecoder, self).__init__()
def decode(self, data):
catalogue_number = None
if 'catalogue_number' in data:
catalogue_number = data['catalogue_number']
exceptional_clause = None
if 'exceptional_clause' in data:
exceptional_clause = data['exceptional_clause']
opus_number = None
if 'opus_number' in data:
opus_number = data['opus_number']
priority_flag = None
if 'priority_flag' in data:
priority_flag = data['priority_flag']
return WorkRecord(record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
submitter_work_n=data['submitter_work_n'],
title=data['title'],
version_type=data['version_type'],
musical_work_distribution_category=data[
'musical_work_distribution_category'],
date_publication_printed_edition=data[
'date_publication_printed_edition'],
text_music_relationship=data[
'text_music_relationship'],
language_code=data['language_code'],
copyright_number=data['copyright_number'],
copyright_date=data['copyright_date'],
music_arrangement=data['music_arrangement'],
lyric_adaptation=data['lyric_adaptation'],
excerpt_type=data['excerpt_type'],
composite_type=data['composite_type'],
composite_component_count=data[
'composite_component_count'],
iswc=data['iswc'],
work_type=data['work_type'],
duration=data['duration'],
catalogue_number=catalogue_number,
opus_number=opus_number,
contact_id=data['contact_id'],
contact_name=data['contact_name'],
recorded_indicator=data['recorded_indicator'],
priority_flag=priority_flag,
exceptional_clause=exceptional_clause,
grand_rights_indicator=data['grand_rights_indicator'])
class WorkOriginDictionaryDecoder(Decoder):
def __init__(self):
super(WorkOriginDictionaryDecoder, self).__init__()
def decode(self, data):
return WorkOriginRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
intended_purpose=data['intended_purpose'],
production_title=data['production_title'],
cd_identifier=data['cd_identifier'],
cut_number=data['cut_number'],
library=data['library'],
bltvr=data['bltvr'],
visan=data['visan'],
production_n=data['production_n'],
episode_title=data['episode_title'],
episode_n=data['episode_n'],
year_production=data['year_production'],
audio_visual_key=data['audio_visual_key'])
class WriterDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(WriterDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base_n = self._ipi_base_decoder.decode(data['ipi_base_n'])
return Writer(ip_n=data['ip_n'],
personal_number=data['personal_number'],
ipi_base_n=ipi_base_n,
writer_first_name=data['writer_first_name'],
writer_last_name=data['writer_last_name'],
tax_id=data['tax_id'],
ipi_name_n=data['ipi_name_n'])
class WriterRecordDictionaryDecoder(Decoder):
def __init__(self):
super(WriterRecordDictionaryDecoder, self).__init__()
self._writer_decoder = WriterDictionaryDecoder()
def decode(self, data):
writer = self._writer_decoder.decode(data['writer'])
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
return WriterRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer=writer,
writer_designation=data['writer_designation'],
work_for_hire=data['work_for_hire'],
writer_unknown=data['writer_unknown'],
reversionary=data['reversionary'],
first_recording_refusal=data[
'first_recording_refusal'],
usa_license=usa_license,
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'])
class NonRomanAlphabetAgreementPartyDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetAgreementPartyDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetAgreementPartyRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
ip_name=data['ip_name'],
ip_writer_name=data['ip_writer_name'],
ip_n=data['ip_n'],
language_code=data['language_code'])
class NonRomanAlphabetOtherWriterDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetOtherWriterDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetOtherWriterRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
writer_first_name=data['writer_first_name'],
writer_name=data['writer_name'],
position=data['position'],
language_code=data['language_code'])
class NonRomanAlphabetPerformanceDataDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(NonRomanAlphabetPerformanceDataDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
ipi_base = self._ipi_base_decoder.decode(
data['performing_artist_ipi_base_n'])
return NonRomanAlphabetPerformanceDataRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
performing_artist_first_name=data['performing_artist_first_name'],
performing_artist_name=data['performing_artist_name'],
performing_artist_ipi_name_n=data['performing_artist_ipi_name_n'],
performing_artist_ipi_base_n=ipi_base,
language_code=data['language_code'],
performance_language=data['performance_language'],
performance_dialect=data['performance_dialect'])
class NonRomanAlphabetPublisherNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetPublisherNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetPublisherNameRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher_sequence_n=data['publisher_sequence_n'],
ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
language_code=data['language_code'])
class NonRomanAlphabetTitleDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetTitleDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetTitleRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
title_type=data['title_type'],
language_code=data['language_code'])
class NonRomanAlphabetWorkDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWorkDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWorkRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
title=data['title'],
language_code=data['language_code'])
class NonRomanAlphabetWriterNameDictionaryDecoder(Decoder):
def __init__(self):
super(NonRomanAlphabetWriterNameDictionaryDecoder, self).__init__()
def decode(self, data):
return NonRomanAlphabetWriterNameRecord(record_type=data['record_type'],
transaction_sequence_n=data[
'transaction_sequence_n'],
record_sequence_n=data[
'record_sequence_n'],
writer_first_name=data[
'writer_first_name'],
writer_last_name=data[
'writer_last_name'],
ip_n=data['ip_n'],
language_code=data[
'language_code'])
class PublisherDictionaryDecoder(Decoder):
def __init__(self, ipi_base_decoder=None):
super(PublisherDictionaryDecoder, self).__init__()
if ipi_base_decoder:
self._ipi_base_decoder = ipi_base_decoder
else:
self._ipi_base_decoder = IPIBaseDictionaryDecoder()
def decode(self, data):
if 'ipi_base_n' in data:
ipi_base = self._ipi_base_decoder.decode(data['ipi_base_n'])
else:
ipi_base = None
return Publisher(ip_n=data['ip_n'],
publisher_name=data['publisher_name'],
ipi_name_n=data['ipi_name_n'],
ipi_base_n=ipi_base,
tax_id=data['tax_id'])
class PublisherRecordDictionaryDecoder(Decoder):
def __init__(self):
super(PublisherRecordDictionaryDecoder, self).__init__()
self._publisher_decoder = PublisherDictionaryDecoder()
def decode(self, data):
publisher = self._publisher_decoder.decode(data['publisher'])
special_agreements = None
if 'special_agreements' in data:
special_agreements = data['special_agreements']
first_recording_refusal = None
if 'first_recording_refusal' in data:
first_recording_refusal = data['first_recording_refusal']
agreement_type = None
if 'agreement_type' in data:
agreement_type = data['agreement_type']
usa_license = None
if 'usa_license' in data:
usa_license = data['usa_license']
international_standard_code = None
if 'international_standard_code' in data:
international_standard_code = data['international_standard_code']
society_assigned_agreement_n = None
if 'society_assigned_agreement_n' in data:
society_assigned_agreement_n = data['society_assigned_agreement_n']
return PublisherRecord(
record_type=data['record_type'],
transaction_sequence_n=data['transaction_sequence_n'],
record_sequence_n=data['record_sequence_n'],
publisher=publisher,
publisher_sequence_n=data['publisher_sequence_n'],
submitter_agreement_n=data['submitter_agreement_n'],
publisher_type=data['publisher_type'],
publisher_unknown=data['publisher_unknown'],
pr_society=data['pr_society'],
pr_ownership_share=data['pr_ownership_share'],
mr_society=data['mr_society'],
mr_ownership_share=data['mr_ownership_share'],
sr_society=data['sr_society'],
sr_ownership_share=data['sr_ownership_share'],
special_agreements=special_agreements,
first_recording_refusal=first_recording_refusal,
international_standard_code=international_standard_code,
society_assigned_agreement_n=society_assigned_agreement_n,
agreement_type=agreement_type,
usa_license=usa_license)
class TableValueDictionaryDecoder(Decoder):
def __init__(self):
super(TableValueDictionaryDecoder, self).__init__()
def decode(self, data):
return TableValue(code=data['code'],
name=data['name'],
description=data['description'])
class MediaTypeValueDictionaryDecoder(Decoder):
def __init__(self):
super(MediaTypeValueDictionaryDecoder, self).__init__()
def decode(self, data):
return MediaTypeValue(code=data['code'],
name=data['name'],
media_type=data['media_type'],
duration_max=data['duration_max'],
works_max=data['works_max'],
fragments_max=data['fragments_max'])
class InstrumentValueDictionaryDecoder(Decoder):
def __init__(self):
super(InstrumentValueDictionaryDecoder, self).__init__()
def decode(self, data):
return InstrumentValue(code=data['code'],
name=data['name'],
family=data['family'],
description=data['description'])
class FileTagDictionaryDecoder(Decoder):
def __init__(self):
super(FileTagDictionaryDecoder, self).__init__()
def decode(self, data):
return FileTag(data['year'],
data['sequence_n'],
data['sender'],
data['receiver'],
data['version'])
class AVIKeyDictionaryDecoder(Decoder):
def __init__(self):
super(AVIKeyDictionaryDecoder, self).__init__()
def decode(self, data):
return AVIKey(data['society_code'],
data['av_number'])
class IPIBaseDictionaryDecoder(Decoder):
def __init__(self):
super(IPIBaseDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class ISWCDictionaryDecoder(Decoder):
def __init__(self):
super(ISWCDictionaryDecoder, self).__init__()
def decode(self, data):
if data:
result = data
else:
result = None
return result
class VISANDictionaryDecoder(Decoder):
def __init__(self):
super(VISANDictionaryDecoder, self).__init__()
def decode(self, data):
return data
| en | 0.864843 | # -*- coding: utf-8 -*- Classes for transforming dictionaries into instances of the CWR model. There is a decoder for each of the model classes, and all of them expect a dictionary having at least one key for each field, having the same name as the field, which will refer to a valid value. As said, the values on the dictionary should be valid values, for example if an integer is expected, then the dictionary contains an integer. The values contained in the dictionary entries should not need to be parsed. These decoders are useful for handling JSON transmissions or Mongo databases. | 1.797038 | 2 |
prebuilt/twrp_fonts.py | imranpopz/android_bootable_recovery-1 | 95 | 8781 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf8 -*-
import codecs,os,gzip,ctypes,ctypes.util,sys
from struct import *
from PIL import Image, ImageDraw, ImageFont
# ====== Python script to convert TrueTypeFonts to TWRP's .dat format ======
# This script was originally made by https://github.com/suky for his chinese version of TWRP
# and then translated to English by feilplane at #twrp of irc.freenode.net.
# However, it was not compatible with vanilla TWRP, so https://github.com/Tasssadar rewrote
# most of it and it now has very little in common with the original script.
class Reference():
def __init__(self, val):
self.__value = val
def get(self):
return self.__value
def set(self, val):
self.__value = val
quiet = Reference(False)
def log(text):
if not quiet.get():
sys.stdout.write(text)
def write_data(f, width, height, offsets, data):
f.write(pack("<I", width))
f.write(pack("<I", height))
for off in offsets:
f.write(pack("<I", off))
f.write(data)
if __name__ == "__main__":
fontsize = Reference(20)
out_fname = Reference("font.dat")
voffset = Reference(None)
padding = Reference(0)
font_fname = Reference(None)
preview = Reference(None)
arg_parser = [
["-s", "--size=", fontsize, int],
["-o", "--output=", out_fname, str],
["-p", "--preview=", preview, str],
[None, "--padding=", padding, int],
["-q", "--quiet", quiet, None],
[None, "--voffset=", voffset, int]
]
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
arg = argv[i]
arg_next = argv[i+1] if i+1 < argc else None
if arg == "--help" or arg == "-h":
print ("This script converts TrueTypeFonts to .dat file for TWRP recovery.\n\n"
"Usage: %s [SWITCHES] [TRUETYPE FILE]\n\n"
" -h, --help - print help\n"
" -o, --output=[FILE] - output file or '-' for stdout (default: font.dat)\n"
" -p, --preview=[FILE] - generate font preview to png file\n"
" --padding=[PIXELS] - horizontal padding around each character (default: 0)\n"
" -q, --quiet - Do not print any output\n"
" -s, --size=[SIZE IN PIXELS] - specify font size in points (default: 20)\n"
" --voffset=[PIXELS] - vertical offset (default: font size*0.25)\n\n"
"Example:\n"
" %s -s 40 -o ComicSans_40.dat -p preview.png ComicSans.ttf\n") % (
sys.argv[0], sys.argv[0]
)
exit(0)
found = False
for p in arg_parser:
if p[0] and arg == p[0] and (arg_next or not p[3]):
if p[3]:
p[2].set(p[3](arg_next))
else:
p[2].set(True)
i += 1
found = True
break
elif p[1] and arg.startswith(p[1]):
if p[3]:
p[2].set(p[3](arg[len(p[1]):]))
else:
p[2].set(True)
found = True
break
if not found:
font_fname.set(arg)
i += 1
if not voffset.get():
voffset.set(int(fontsize.get()*0.25))
if out_fname.get() == "-":
quiet.set(True)
log("Loading font %s...\n" % font_fname.get())
font = ImageFont.truetype(font_fname.get(), fontsize.get(), 0, "utf-32be")
cwidth = 0
cheight = font.getsize('A')[1]
offsets = []
renders = []
data = bytes()
# temp Image and ImageDraw to get access to textsize
res = Image.new('L', (1, 1), 0)
res_draw = ImageDraw.Draw(res)
# Measure each character and render it to separate Image
log("Rendering characters...\n")
for i in range(32, 128):
w, h = res_draw.textsize(chr(i), font)
w += padding.get()*2
offsets.append(cwidth)
cwidth += w
if h > cheight:
cheight = h
ichr = Image.new('L', (w, cheight*2))
ichr_draw = ImageDraw.Draw(ichr)
ichr_draw.text((padding.get(), 0), chr(i), 255, font)
renders.append(ichr)
# Twice the height to account for under-the-baseline characters
cheight *= 2
# Create the result bitmap
log("Creating result bitmap...\n")
res = Image.new('L', (cwidth, cheight), 0)
res_draw = ImageDraw.Draw(res)
# Paste all characters into result bitmap
for i in range(len(renders)):
res.paste(renders[i], (offsets[i], 0))
# uncomment to draw lines separating each character (for debug)
#res_draw.rectangle([offsets[i], 0, offsets[i], cheight], outline="blue")
# crop the blank areas on top and bottom
(_, start_y, _, end_y) = res.getbbox()
res = res.crop((0, start_y, cwidth, end_y))
cheight = (end_y - start_y) + voffset.get()
new_res = Image.new('L', (cwidth, cheight))
new_res.paste(res, (0, voffset.get()))
res = new_res
# save the preview
if preview.get():
log("Saving preview to %s...\n" % preview.get())
res.save(preview.get())
# Pack the data.
# The "data" is a B/W bitmap with all 96 characters next to each other
# on one line. It is as wide as all the characters combined and as
# high as the tallest character, plus padding.
# Each byte contains info about eight pixels, starting from
# highest to lowest bit:
# bits: | 7 6 5 4 3 2 1 0 | 15 14 13 12 11 10 9 8 | ...
# pixels: | 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 | ...
log("Packing data...\n")
bit = 0
bit_itr = 0
for c in res.tostring():
# FIXME: How to handle antialiasing?
# if c != '\x00':
# In Python3, c is int, in Python2, c is string. Because of reasons.
try:
fill = (ord(c) >= 127)
except TypeError:
fill = (c >= 127)
if fill:
bit |= (1 << (7-bit_itr))
bit_itr += 1
if bit_itr >= 8:
data += pack("<B", bit)
bit_itr = 0
bit = 0
# Write them to the file.
# Format:
# 000: width
# 004: height
# 008: offsets of each characters (96*uint32)
# 392: data as described above
log("Writing to %s...\n" % out_fname.get())
if out_fname.get() == "-":
write_data(sys.stdout, cwidth, cheight, offsets, data)
else:
with open(out_fname.get(), 'wb') as f:
write_data(f, cwidth, cheight, offsets, data)
exit(0)
| #!/usr/bin/env python
# -*- coding: utf8 -*-
import codecs,os,gzip,ctypes,ctypes.util,sys
from struct import *
from PIL import Image, ImageDraw, ImageFont
# ====== Python script to convert TrueTypeFonts to TWRP's .dat format ======
# This script was originally made by https://github.com/suky for his chinese version of TWRP
# and then translated to English by feilplane at #twrp of irc.freenode.net.
# However, it was not compatible with vanilla TWRP, so https://github.com/Tasssadar rewrote
# most of it and it now has very little in common with the original script.
class Reference():
def __init__(self, val):
self.__value = val
def get(self):
return self.__value
def set(self, val):
self.__value = val
quiet = Reference(False)
def log(text):
if not quiet.get():
sys.stdout.write(text)
def write_data(f, width, height, offsets, data):
f.write(pack("<I", width))
f.write(pack("<I", height))
for off in offsets:
f.write(pack("<I", off))
f.write(data)
if __name__ == "__main__":
fontsize = Reference(20)
out_fname = Reference("font.dat")
voffset = Reference(None)
padding = Reference(0)
font_fname = Reference(None)
preview = Reference(None)
arg_parser = [
["-s", "--size=", fontsize, int],
["-o", "--output=", out_fname, str],
["-p", "--preview=", preview, str],
[None, "--padding=", padding, int],
["-q", "--quiet", quiet, None],
[None, "--voffset=", voffset, int]
]
argv = sys.argv
argc = len(argv)
i = 1
while i < argc:
arg = argv[i]
arg_next = argv[i+1] if i+1 < argc else None
if arg == "--help" or arg == "-h":
print ("This script converts TrueTypeFonts to .dat file for TWRP recovery.\n\n"
"Usage: %s [SWITCHES] [TRUETYPE FILE]\n\n"
" -h, --help - print help\n"
" -o, --output=[FILE] - output file or '-' for stdout (default: font.dat)\n"
" -p, --preview=[FILE] - generate font preview to png file\n"
" --padding=[PIXELS] - horizontal padding around each character (default: 0)\n"
" -q, --quiet - Do not print any output\n"
" -s, --size=[SIZE IN PIXELS] - specify font size in points (default: 20)\n"
" --voffset=[PIXELS] - vertical offset (default: font size*0.25)\n\n"
"Example:\n"
" %s -s 40 -o ComicSans_40.dat -p preview.png ComicSans.ttf\n") % (
sys.argv[0], sys.argv[0]
)
exit(0)
found = False
for p in arg_parser:
if p[0] and arg == p[0] and (arg_next or not p[3]):
if p[3]:
p[2].set(p[3](arg_next))
else:
p[2].set(True)
i += 1
found = True
break
elif p[1] and arg.startswith(p[1]):
if p[3]:
p[2].set(p[3](arg[len(p[1]):]))
else:
p[2].set(True)
found = True
break
if not found:
font_fname.set(arg)
i += 1
if not voffset.get():
voffset.set(int(fontsize.get()*0.25))
if out_fname.get() == "-":
quiet.set(True)
log("Loading font %s...\n" % font_fname.get())
font = ImageFont.truetype(font_fname.get(), fontsize.get(), 0, "utf-32be")
cwidth = 0
cheight = font.getsize('A')[1]
offsets = []
renders = []
data = bytes()
# temp Image and ImageDraw to get access to textsize
res = Image.new('L', (1, 1), 0)
res_draw = ImageDraw.Draw(res)
# Measure each character and render it to separate Image
log("Rendering characters...\n")
for i in range(32, 128):
w, h = res_draw.textsize(chr(i), font)
w += padding.get()*2
offsets.append(cwidth)
cwidth += w
if h > cheight:
cheight = h
ichr = Image.new('L', (w, cheight*2))
ichr_draw = ImageDraw.Draw(ichr)
ichr_draw.text((padding.get(), 0), chr(i), 255, font)
renders.append(ichr)
# Twice the height to account for under-the-baseline characters
cheight *= 2
# Create the result bitmap
log("Creating result bitmap...\n")
res = Image.new('L', (cwidth, cheight), 0)
res_draw = ImageDraw.Draw(res)
# Paste all characters into result bitmap
for i in range(len(renders)):
res.paste(renders[i], (offsets[i], 0))
# uncomment to draw lines separating each character (for debug)
#res_draw.rectangle([offsets[i], 0, offsets[i], cheight], outline="blue")
# crop the blank areas on top and bottom
(_, start_y, _, end_y) = res.getbbox()
res = res.crop((0, start_y, cwidth, end_y))
cheight = (end_y - start_y) + voffset.get()
new_res = Image.new('L', (cwidth, cheight))
new_res.paste(res, (0, voffset.get()))
res = new_res
# save the preview
if preview.get():
log("Saving preview to %s...\n" % preview.get())
res.save(preview.get())
# Pack the data.
# The "data" is a B/W bitmap with all 96 characters next to each other
# on one line. It is as wide as all the characters combined and as
# high as the tallest character, plus padding.
# Each byte contains info about eight pixels, starting from
# highest to lowest bit:
# bits: | 7 6 5 4 3 2 1 0 | 15 14 13 12 11 10 9 8 | ...
# pixels: | 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 | ...
log("Packing data...\n")
bit = 0
bit_itr = 0
for c in res.tostring():
# FIXME: How to handle antialiasing?
# if c != '\x00':
# In Python3, c is int, in Python2, c is string. Because of reasons.
try:
fill = (ord(c) >= 127)
except TypeError:
fill = (c >= 127)
if fill:
bit |= (1 << (7-bit_itr))
bit_itr += 1
if bit_itr >= 8:
data += pack("<B", bit)
bit_itr = 0
bit = 0
# Write them to the file.
# Format:
# 000: width
# 004: height
# 008: offsets of each characters (96*uint32)
# 392: data as described above
log("Writing to %s...\n" % out_fname.get())
if out_fname.get() == "-":
write_data(sys.stdout, cwidth, cheight, offsets, data)
else:
with open(out_fname.get(), 'wb') as f:
write_data(f, cwidth, cheight, offsets, data)
exit(0) | en | 0.891531 | #!/usr/bin/env python # -*- coding: utf8 -*- # ====== Python script to convert TrueTypeFonts to TWRP's .dat format ====== # This script was originally made by https://github.com/suky for his chinese version of TWRP # and then translated to English by feilplane at #twrp of irc.freenode.net. # However, it was not compatible with vanilla TWRP, so https://github.com/Tasssadar rewrote # most of it and it now has very little in common with the original script. # temp Image and ImageDraw to get access to textsize # Measure each character and render it to separate Image # Twice the height to account for under-the-baseline characters # Create the result bitmap # Paste all characters into result bitmap # uncomment to draw lines separating each character (for debug) #res_draw.rectangle([offsets[i], 0, offsets[i], cheight], outline="blue") # crop the blank areas on top and bottom # save the preview # Pack the data. # The "data" is a B/W bitmap with all 96 characters next to each other # on one line. It is as wide as all the characters combined and as # high as the tallest character, plus padding. # Each byte contains info about eight pixels, starting from # highest to lowest bit: # bits: | 7 6 5 4 3 2 1 0 | 15 14 13 12 11 10 9 8 | ... # pixels: | 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 | ... # FIXME: How to handle antialiasing? # if c != '\x00': # In Python3, c is int, in Python2, c is string. Because of reasons. # Write them to the file. # Format: # 000: width # 004: height # 008: offsets of each characters (96*uint32) # 392: data as described above | 2.422889 | 2 |
open/users/serializers.py | lawrendran/open | 105 | 8782 | import pytz
from rest_auth.serializers import TokenSerializer
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import ValidationError
from rest_framework.fields import (
CharField,
CurrentUserDefault,
HiddenField,
UUIDField,
ChoiceField,
)
from rest_framework.serializers import ModelSerializer, Serializer
from rest_framework.validators import UniqueValidator
from django.contrib.auth.hashers import check_password
from open.users.models import User
class SimpleUserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
)
class UserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
"signed_up_from",
"date_joined",
"username",
"email",
"created",
"modified",
)
class UserTokenSerializer(TokenSerializer):
user = UserReadSerializer()
class Meta:
model = Token
fields = ["key", "user"]
# TODO - this view and serializer is on hold as you figure out registration (later)
class UserCreateSerializer(ModelSerializer):
username = CharField(validators=[UniqueValidator(queryset=User.objects.all())])
# need to make email optional ... prob should think through signup form a little
email = CharField(
validators=[UniqueValidator(queryset=User.objects.all())], required=False
)
password = CharField(write_only=True, min_length=8)
signed_up_from = CharField(
write_only=True, min_length=8, required=False, default="", trim_whitespace=True
)
timezone_string = ChoiceField(
choices=pytz.all_timezones, required=False, default="US/Eastern"
)
class Meta:
model = User
fields = ["username", "email", "password", "signed_up_from", "timezone_string"]
# TODO test - does this work with just username / no email, etc.
def create(self, validated_data):
username = validated_data.pop("username")
password = validated_data.pop("password")
is_betterself_user = False
if validated_data["signed_up_from"] == "betterself":
is_betterself_user = True
validated_data["is_betterself_user"] = is_betterself_user
user = User.objects.create(username=username, **validated_data)
user.set_password(password)
user.save()
return user
class UserDeleteSerializer(Serializer):
# most of this is actually redundant, i don't need to have a validation step, but i do this
# out of paranoia reasons that someone may delete their account by mistake
password = CharField()
user = HiddenField(default=CurrentUserDefault())
uuid = UUIDField()
def validate(self, data):
user = data["user"]
validated_password = check_password(data["password"], user.password)
if not validated_password:
raise ValidationError("Invalid Password Entered")
validated_uuid = str(user.uuid) == str(data["uuid"])
if not validated_uuid:
raise ValidationError("Invalid UUID", str(user.uuid))
validate_user = user.username != "<EMAIL>"
if not validate_user:
raise ValidationError(
f"This is a protected user and cannot be deleted. {user.username}"
)
return data
| import pytz
from rest_auth.serializers import TokenSerializer
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import ValidationError
from rest_framework.fields import (
CharField,
CurrentUserDefault,
HiddenField,
UUIDField,
ChoiceField,
)
from rest_framework.serializers import ModelSerializer, Serializer
from rest_framework.validators import UniqueValidator
from django.contrib.auth.hashers import check_password
from open.users.models import User
class SimpleUserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
)
class UserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
"signed_up_from",
"date_joined",
"username",
"email",
"created",
"modified",
)
class UserTokenSerializer(TokenSerializer):
user = UserReadSerializer()
class Meta:
model = Token
fields = ["key", "user"]
# TODO - this view and serializer is on hold as you figure out registration (later)
class UserCreateSerializer(ModelSerializer):
username = CharField(validators=[UniqueValidator(queryset=User.objects.all())])
# need to make email optional ... prob should think through signup form a little
email = CharField(
validators=[UniqueValidator(queryset=User.objects.all())], required=False
)
password = CharField(write_only=True, min_length=8)
signed_up_from = CharField(
write_only=True, min_length=8, required=False, default="", trim_whitespace=True
)
timezone_string = ChoiceField(
choices=pytz.all_timezones, required=False, default="US/Eastern"
)
class Meta:
model = User
fields = ["username", "email", "password", "signed_up_from", "timezone_string"]
# TODO test - does this work with just username / no email, etc.
def create(self, validated_data):
username = validated_data.pop("username")
password = validated_data.pop("password")
is_betterself_user = False
if validated_data["signed_up_from"] == "betterself":
is_betterself_user = True
validated_data["is_betterself_user"] = is_betterself_user
user = User.objects.create(username=username, **validated_data)
user.set_password(password)
user.save()
return user
class UserDeleteSerializer(Serializer):
# most of this is actually redundant, i don't need to have a validation step, but i do this
# out of paranoia reasons that someone may delete their account by mistake
password = CharField()
user = HiddenField(default=CurrentUserDefault())
uuid = UUIDField()
def validate(self, data):
user = data["user"]
validated_password = check_password(data["password"], user.password)
if not validated_password:
raise ValidationError("Invalid Password Entered")
validated_uuid = str(user.uuid) == str(data["uuid"])
if not validated_uuid:
raise ValidationError("Invalid UUID", str(user.uuid))
validate_user = user.username != "<EMAIL>"
if not validate_user:
raise ValidationError(
f"This is a protected user and cannot be deleted. {user.username}"
)
return data
| en | 0.965376 | # TODO - this view and serializer is on hold as you figure out registration (later) # need to make email optional ... prob should think through signup form a little # TODO test - does this work with just username / no email, etc. # most of this is actually redundant, i don't need to have a validation step, but i do this # out of paranoia reasons that someone may delete their account by mistake | 2.281519 | 2 |
tests/en/test_asr.py | rhasspy/rhasspy-test | 0 | 8783 | """Automated speech recognition tests."""
import os
import sys
import unittest
from pathlib import Path
import requests
from rhasspyhermes.asr import AsrTextCaptured
from rhasspyhermes.nlu import NluIntent
class AsrEnglishTests(unittest.TestCase):
"""Test automated speech recognition (English)"""
def setUp(self):
self.http_host = os.environ.get("RHASSPY_HTTP_HOST", "localhost")
self.http_port = os.environ.get("RHASSPY_HTTP_PORT", 12101)
self.wav_bytes = Path("wav/en/turn_on_the_living_room_lamp.wav").read_bytes()
def api_url(self, fragment):
return f"http://{self.http_host}:{self.http_port}/api/{fragment}"
def check_status(self, response):
if response.status_code != 200:
print(response.text, file=sys.stderr)
response.raise_for_status()
def test_http_speech_to_text(self):
"""Test speech-to-text HTTP endpoint"""
response = requests.post(self.api_url("speech-to-text"), data=self.wav_bytes)
self.check_status(response)
text = response.content.decode()
self.assertEqual(text, "turn on the living room lamp")
def test_http_speech_to_text_json(self):
"""Text speech-to-text HTTP endpoint (Rhasspy JSON format)"""
response = requests.post(
self.api_url("speech-to-text"),
data=self.wav_bytes,
headers={"Accept": "application/json"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["text"], "turn on the living room lamp")
def test_http_speech_to_text_hermes(self):
"""Text speech-to-text HTTP endpoint (Hermes format)"""
response = requests.post(
self.api_url("speech-to-text"),
data=self.wav_bytes,
params={"outputFormat": "hermes"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["type"], "textCaptured")
text_captured = AsrTextCaptured.from_dict(result["value"])
self.assertEqual(text_captured.text, "turn on the living room lamp")
def test_http_speech_to_intent(self):
response = requests.post(self.api_url("speech-to-intent"), data=self.wav_bytes)
self.check_status(response)
result = response.json()
self.assertEqual(result["intent"]["name"], "ChangeLightState")
self.assertEqual(result["text"], "turn on the living room lamp")
self.assertEqual(result["slots"]["name"], "living room lamp")
self.assertEqual(result["slots"]["state"], "on")
def test_http_speech_to_intent_hermes(self):
response = requests.post(
self.api_url("speech-to-intent"),
data=self.wav_bytes,
params={"outputFormat": "hermes"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["type"], "intent")
nlu_intent = NluIntent.from_dict(result["value"])
self.assertEqual(nlu_intent.raw_input, "turn on the living room lamp")
self.assertEqual(nlu_intent.input, "turn on the living room lamp")
# Intent name and slots
self.assertEqual(nlu_intent.intent.intent_name, "ChangeLightState")
slots_by_name = {slot.slot_name: slot for slot in nlu_intent.slots}
self.assertIn("name", slots_by_name)
self.assertEqual(slots_by_name["name"].value["value"], "living room lamp")
self.assertIn("state", slots_by_name)
self.assertEqual(slots_by_name["state"].value["value"], "on")
| """Automated speech recognition tests."""
import os
import sys
import unittest
from pathlib import Path
import requests
from rhasspyhermes.asr import AsrTextCaptured
from rhasspyhermes.nlu import NluIntent
class AsrEnglishTests(unittest.TestCase):
"""Test automated speech recognition (English)"""
def setUp(self):
self.http_host = os.environ.get("RHASSPY_HTTP_HOST", "localhost")
self.http_port = os.environ.get("RHASSPY_HTTP_PORT", 12101)
self.wav_bytes = Path("wav/en/turn_on_the_living_room_lamp.wav").read_bytes()
def api_url(self, fragment):
return f"http://{self.http_host}:{self.http_port}/api/{fragment}"
def check_status(self, response):
if response.status_code != 200:
print(response.text, file=sys.stderr)
response.raise_for_status()
def test_http_speech_to_text(self):
"""Test speech-to-text HTTP endpoint"""
response = requests.post(self.api_url("speech-to-text"), data=self.wav_bytes)
self.check_status(response)
text = response.content.decode()
self.assertEqual(text, "turn on the living room lamp")
def test_http_speech_to_text_json(self):
"""Text speech-to-text HTTP endpoint (Rhasspy JSON format)"""
response = requests.post(
self.api_url("speech-to-text"),
data=self.wav_bytes,
headers={"Accept": "application/json"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["text"], "turn on the living room lamp")
def test_http_speech_to_text_hermes(self):
"""Text speech-to-text HTTP endpoint (Hermes format)"""
response = requests.post(
self.api_url("speech-to-text"),
data=self.wav_bytes,
params={"outputFormat": "hermes"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["type"], "textCaptured")
text_captured = AsrTextCaptured.from_dict(result["value"])
self.assertEqual(text_captured.text, "turn on the living room lamp")
def test_http_speech_to_intent(self):
response = requests.post(self.api_url("speech-to-intent"), data=self.wav_bytes)
self.check_status(response)
result = response.json()
self.assertEqual(result["intent"]["name"], "ChangeLightState")
self.assertEqual(result["text"], "turn on the living room lamp")
self.assertEqual(result["slots"]["name"], "living room lamp")
self.assertEqual(result["slots"]["state"], "on")
def test_http_speech_to_intent_hermes(self):
response = requests.post(
self.api_url("speech-to-intent"),
data=self.wav_bytes,
params={"outputFormat": "hermes"},
)
self.check_status(response)
result = response.json()
self.assertEqual(result["type"], "intent")
nlu_intent = NluIntent.from_dict(result["value"])
self.assertEqual(nlu_intent.raw_input, "turn on the living room lamp")
self.assertEqual(nlu_intent.input, "turn on the living room lamp")
# Intent name and slots
self.assertEqual(nlu_intent.intent.intent_name, "ChangeLightState")
slots_by_name = {slot.slot_name: slot for slot in nlu_intent.slots}
self.assertIn("name", slots_by_name)
self.assertEqual(slots_by_name["name"].value["value"], "living room lamp")
self.assertIn("state", slots_by_name)
self.assertEqual(slots_by_name["state"].value["value"], "on")
| en | 0.627629 | Automated speech recognition tests. Test automated speech recognition (English) Test speech-to-text HTTP endpoint Text speech-to-text HTTP endpoint (Rhasspy JSON format) Text speech-to-text HTTP endpoint (Hermes format) # Intent name and slots | 3.295789 | 3 |
speech/melgan/model/multiscale.py | OthmaneJ/deep-tts | 213 | 8784 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .discriminator import Discriminator
from .identity import Identity
class MultiScaleDiscriminator(nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[Discriminator() for _ in range(3)]
)
self.pooling = nn.ModuleList(
[Identity()] +
[nn.AvgPool1d(kernel_size=4, stride=2, padding=2) for _ in range(1, 3)]
)
def forward(self, x):
ret = list()
for pool, disc in zip(self.pooling, self.discriminators):
x = pool(x)
ret.append(disc(x))
return ret # [(feat, score), (feat, score), (feat, score)]
| import torch
import torch.nn as nn
import torch.nn.functional as F
from .discriminator import Discriminator
from .identity import Identity
class MultiScaleDiscriminator(nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[Discriminator() for _ in range(3)]
)
self.pooling = nn.ModuleList(
[Identity()] +
[nn.AvgPool1d(kernel_size=4, stride=2, padding=2) for _ in range(1, 3)]
)
def forward(self, x):
ret = list()
for pool, disc in zip(self.pooling, self.discriminators):
x = pool(x)
ret.append(disc(x))
return ret # [(feat, score), (feat, score), (feat, score)]
| en | 0.784948 | # [(feat, score), (feat, score), (feat, score)] | 2.331615 | 2 |
main.py | AntonioLourencos/jogo-da-velha | 10 | 8785 | <gh_stars>1-10
from game import about_button, start_button, play_sound, center_pos
import pygame
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (0, 255, 0)
pygame.init()
pygame.font.init()
pygame.mixer.init()
FONT = pygame.font.Font("assets/font.ttf", 70)
FONT_MIN = pygame.font.Font("assets/font.ttf", 30)
window = pygame.display.set_mode([600,600])
running = True
clock = pygame.time.Clock()
nickname = " "
me = "X"
ia = "O"
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
play_sound("minimize_001")
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE and len(nickname) > 2:
nickname = list(nickname)
nickname.pop(-2)
nickname = "".join(nickname)
play_sound("error_001")
elif len(nickname.strip()) <= 10:
play_sound("bong_001")
if len(nickname) > 1:
nickname = list(nickname)
nickname.pop(-1)
nickname = "".join(nickname)
nickname += event.unicode
nickname += " "
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
if me == "X":
me = "O"
ia = "X"
else:
me = "X"
ia = "O"
window.fill(BLACK)
title = FONT.render("<NAME>", True, WHITE)
title_pos = center_pos(title.get_rect(), 10)
window.blit(title, title_pos)
nickname_label = FONT.render("SEU NOME", True, WHITE)
nickname_label_pos = center_pos(nickname_label.get_rect(), 100)
window.blit(nickname_label, nickname_label_pos)
nickname_render = FONT.render(nickname, True, BLACK)
nickname_rect = nickname_render.get_rect()
nickname_pos = center_pos(nickname_rect, 180)
pygame.draw.rect(window, WHITE, (nickname_pos[0], 180, nickname_rect[2], nickname_rect[3]))
window.blit(nickname_render, nickname_pos)
choice_render = FONT.render(f"JOGUE COM {me}", True, WHITE)
window.blit(choice_render, center_pos(choice_render.get_rect(), 280))
my_name = FONT_MIN.render(f"DESENVOLVIDO POR <NAME>", True, WHITE)
window.blit(my_name, center_pos(my_name.get_rect(), 560))
start_button(window, "JOGAR", 380, me, ia, nickname.strip(), 10)
about_button(window, 450, 10)
pygame.display.flip()
clock.tick(60) | from game import about_button, start_button, play_sound, center_pos
import pygame
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (0, 255, 0)
pygame.init()
pygame.font.init()
pygame.mixer.init()
FONT = pygame.font.Font("assets/font.ttf", 70)
FONT_MIN = pygame.font.Font("assets/font.ttf", 30)
window = pygame.display.set_mode([600,600])
running = True
clock = pygame.time.Clock()
nickname = " "
me = "X"
ia = "O"
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
play_sound("minimize_001")
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_BACKSPACE and len(nickname) > 2:
nickname = list(nickname)
nickname.pop(-2)
nickname = "".join(nickname)
play_sound("error_001")
elif len(nickname.strip()) <= 10:
play_sound("bong_001")
if len(nickname) > 1:
nickname = list(nickname)
nickname.pop(-1)
nickname = "".join(nickname)
nickname += event.unicode
nickname += " "
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
if me == "X":
me = "O"
ia = "X"
else:
me = "X"
ia = "O"
window.fill(BLACK)
title = FONT.render("<NAME>", True, WHITE)
title_pos = center_pos(title.get_rect(), 10)
window.blit(title, title_pos)
nickname_label = FONT.render("SEU NOME", True, WHITE)
nickname_label_pos = center_pos(nickname_label.get_rect(), 100)
window.blit(nickname_label, nickname_label_pos)
nickname_render = FONT.render(nickname, True, BLACK)
nickname_rect = nickname_render.get_rect()
nickname_pos = center_pos(nickname_rect, 180)
pygame.draw.rect(window, WHITE, (nickname_pos[0], 180, nickname_rect[2], nickname_rect[3]))
window.blit(nickname_render, nickname_pos)
choice_render = FONT.render(f"JOGUE COM {me}", True, WHITE)
window.blit(choice_render, center_pos(choice_render.get_rect(), 280))
my_name = FONT_MIN.render(f"DESENVOLVIDO POR <NAME>", True, WHITE)
window.blit(my_name, center_pos(my_name.get_rect(), 560))
start_button(window, "JOGAR", 380, me, ia, nickname.strip(), 10)
about_button(window, 450, 10)
pygame.display.flip()
clock.tick(60) | none | 1 | 2.94631 | 3 |
|
schedule/views.py | 1donggri/teamProject | 0 | 8786 | from django.shortcuts import render, redirect
from .models import Post
from .forms import ScheduleForm
from django.core.paginator import Paginator
# Create your views here.
def view_schedule(request):
all_posts = Post.objects.all().order_by('pub_date')
page = int(request.GET.get('p', 1))
pagenator = Paginator(all_posts, 5)
posts = pagenator.get_page(page)
return render(request, 'schedule/view_schedule.html', {'posts': posts})
def write_schedule(request):
if request.method == "POST":
form = ScheduleForm(request.POST)
if form.is_valid():
# form의 모든 validators 호출 유효성 검증 수행
# user_id = request.session.get('user')
# user = User.objects.get(pk=user_id)
schedule = Post()
schedule.title = form.cleaned_data['title']
# # 검증에 성공한 값들은 사전타입으로 제공 (form.cleaned_data)
# # 검증에 실패시 form.error 에 오류 정보를 저장
schedule.username = form.cleaned_data['username']
schedule.pub_date = form.cleaned_data['pub_date']
schedule.save()
return redirect('schedule:view_schedule')
else:
form = ScheduleForm()
return render(request, 'schedule/write_schedule.html', {'form': form})
def delete(request, posts_id):
post = Post.objects.get(id=posts_id)
post.delete()
posts = Post.objects.all().order_by('-id')
return render(request, 'schedule/view_schedule.html', {'posts': posts}) | from django.shortcuts import render, redirect
from .models import Post
from .forms import ScheduleForm
from django.core.paginator import Paginator
# Create your views here.
def view_schedule(request):
all_posts = Post.objects.all().order_by('pub_date')
page = int(request.GET.get('p', 1))
pagenator = Paginator(all_posts, 5)
posts = pagenator.get_page(page)
return render(request, 'schedule/view_schedule.html', {'posts': posts})
def write_schedule(request):
if request.method == "POST":
form = ScheduleForm(request.POST)
if form.is_valid():
# form의 모든 validators 호출 유효성 검증 수행
# user_id = request.session.get('user')
# user = User.objects.get(pk=user_id)
schedule = Post()
schedule.title = form.cleaned_data['title']
# # 검증에 성공한 값들은 사전타입으로 제공 (form.cleaned_data)
# # 검증에 실패시 form.error 에 오류 정보를 저장
schedule.username = form.cleaned_data['username']
schedule.pub_date = form.cleaned_data['pub_date']
schedule.save()
return redirect('schedule:view_schedule')
else:
form = ScheduleForm()
return render(request, 'schedule/write_schedule.html', {'form': form})
def delete(request, posts_id):
post = Post.objects.get(id=posts_id)
post.delete()
posts = Post.objects.all().order_by('-id')
return render(request, 'schedule/view_schedule.html', {'posts': posts}) | ko | 0.92538 | # Create your views here. # form의 모든 validators 호출 유효성 검증 수행 # user_id = request.session.get('user') # user = User.objects.get(pk=user_id) # # 검증에 성공한 값들은 사전타입으로 제공 (form.cleaned_data) # # 검증에 실패시 form.error 에 오류 정보를 저장 | 2.196984 | 2 |
archetype/settings/local_stg.py | kingsdigitallab/archetype-django | 1 | 8787 | <reponame>kingsdigitallab/archetype-django
from .base import * # noqa
CACHE_REDIS_DATABASE = '1'
CACHES['default']['LOCATION'] = '127.0.0.1:6379:' + CACHE_REDIS_DATABASE
INTERNAL_IPS = INTERNAL_IPS + ('', )
ALLOWED_HOSTS = ['']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'app_archetype_stg',
'USER': 'app_archetype',
'PASSWORD': '',
'HOST': ''
},
}
| from .base import * # noqa
CACHE_REDIS_DATABASE = '1'
CACHES['default']['LOCATION'] = '127.0.0.1:6379:' + CACHE_REDIS_DATABASE
INTERNAL_IPS = INTERNAL_IPS + ('', )
ALLOWED_HOSTS = ['']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'app_archetype_stg',
'USER': 'app_archetype',
'PASSWORD': '',
'HOST': ''
},
} | none | 1 | 1.366344 | 1 |
|
website/sites/admin.py | vnaskos/Website | 0 | 8788 | <reponame>vnaskos/Website
from django.contrib import admin
# Register your models here.]
from website.sites.models import Post
@admin.register(Post)
class TestAdmin2(admin.ModelAdmin):
pass | from django.contrib import admin
# Register your models here.]
from website.sites.models import Post
@admin.register(Post)
class TestAdmin2(admin.ModelAdmin):
pass | en | 0.876766 | # Register your models here.] | 1.351805 | 1 |
mcts.py | korbi98/TicTacToeGo_Zero | 0 | 8789 | <reponame>korbi98/TicTacToeGo_Zero<filename>mcts.py
# Monte Carlo tree search for TicTacToe
import numpy as np
from tictactoe import Tictactoe
import copy
from random import choice
from tree import Node
import time
class MCTS:
'''
Class defining a simple monte carlo tree search algorithm.
Attributes:
- game: instance of TicTacToe game
- current_player: player to perform next move
- number_of_rollouts: number of simulations for generating one move
- tree: list containing all possible and impossible (taken) leaf nodes
'''
def __init__(self, game, number_of_rollouts):
self.game = game
self.current_player = game.move_number%2 + 1
print(self.current_player)
self.tree = Node(None, -1, 3 - self.current_player) # Root node of tree
self.number_of_rollouts = number_of_rollouts
print("Initial game state:\n",self.game.board)
def perform_search(self):
'''Perfoming the mcts by performing the specified number of
simulations and updating the corresponding leaf node.
leaf node is choosen by traverse_tree function
'''
start_time = time.clock()
for i in range(self.number_of_rollouts):
simulated_game = copy.deepcopy(self.game)
# Traverse to leaf
leaf = self.traverse_tree(simulated_game)
# Random simulation for leaf
result = self.rollout(simulated_game)
# Update all visited nodes
self.update_tree(result, leaf)
end_time = time.clock()
print("\nFirst layer:")
for child in self.tree.children:
child.print(self.tree)
second_layer = max(self.tree.children, key= lambda x: x.visits)
print("\nSecond layer:")
for child in second_layer.children:
child.print(self.tree)
print("\nSearch took:", round(end_time-start_time, 4), "seconds")
result = [0 for i in range(self.game.size**2)]
for child in self.tree.children:
result[child.boardposition] = child.visits
return result
def traverse_tree(self, simulated_game):
'''Choose next leaf for performing rollout. When node is fully
expanded, child with highest UCT is choosen. If not a
random unexplored node is choosen.
'''
current_node = self.tree #root
while current_node.isExpanded():
current_node = current_node.UTC_traverse(self.tree)
x,y = simulated_game.get_coords(current_node.boardposition)
simulated_game.setField(x,y)
# create children if empty
if not current_node.children:
current_node.getPossibleChildren(simulated_game.board)
# terminate if board is full
if not simulated_game.move_number < simulated_game.size**2 or simulated_game.checkboard():
return current_node
x,y = simulated_game.get_coords(current_node.boardposition)
simulated_game.setField(x,y)
# Choose random unexplored leaf
unexplored_leafs = list(filter(lambda x: x.visits == 0, current_node.children))
return choice(unexplored_leafs)
def rollout(self, simulated_game):
'''perform random play for choosen leaf node till terminal
state is reached'''
while (not simulated_game.checkboard()) and simulated_game.move_number < simulated_game.size**2:
simulated_game.perform_random_move()
res = simulated_game.checkboard()
print("Finished simulation player", res, "won. Terminal state is:")
simulated_game.printBoard()
return res
def update_tree(self, result, leaf):
'''update all visited nodes in tree'''
self.tree.visits += 1
current_node = leaf
while current_node.parent:
#current_node.print(self.tree)
current_node.update(result)
current_node = current_node.parent
| # Monte Carlo tree search for TicTacToe
import numpy as np
from tictactoe import Tictactoe
import copy
from random import choice
from tree import Node
import time
class MCTS:
'''
Class defining a simple monte carlo tree search algorithm.
Attributes:
- game: instance of TicTacToe game
- current_player: player to perform next move
- number_of_rollouts: number of simulations for generating one move
- tree: list containing all possible and impossible (taken) leaf nodes
'''
def __init__(self, game, number_of_rollouts):
self.game = game
self.current_player = game.move_number%2 + 1
print(self.current_player)
self.tree = Node(None, -1, 3 - self.current_player) # Root node of tree
self.number_of_rollouts = number_of_rollouts
print("Initial game state:\n",self.game.board)
def perform_search(self):
'''Perfoming the mcts by performing the specified number of
simulations and updating the corresponding leaf node.
leaf node is choosen by traverse_tree function
'''
start_time = time.clock()
for i in range(self.number_of_rollouts):
simulated_game = copy.deepcopy(self.game)
# Traverse to leaf
leaf = self.traverse_tree(simulated_game)
# Random simulation for leaf
result = self.rollout(simulated_game)
# Update all visited nodes
self.update_tree(result, leaf)
end_time = time.clock()
print("\nFirst layer:")
for child in self.tree.children:
child.print(self.tree)
second_layer = max(self.tree.children, key= lambda x: x.visits)
print("\nSecond layer:")
for child in second_layer.children:
child.print(self.tree)
print("\nSearch took:", round(end_time-start_time, 4), "seconds")
result = [0 for i in range(self.game.size**2)]
for child in self.tree.children:
result[child.boardposition] = child.visits
return result
def traverse_tree(self, simulated_game):
'''Choose next leaf for performing rollout. When node is fully
expanded, child with highest UCT is choosen. If not a
random unexplored node is choosen.
'''
current_node = self.tree #root
while current_node.isExpanded():
current_node = current_node.UTC_traverse(self.tree)
x,y = simulated_game.get_coords(current_node.boardposition)
simulated_game.setField(x,y)
# create children if empty
if not current_node.children:
current_node.getPossibleChildren(simulated_game.board)
# terminate if board is full
if not simulated_game.move_number < simulated_game.size**2 or simulated_game.checkboard():
return current_node
x,y = simulated_game.get_coords(current_node.boardposition)
simulated_game.setField(x,y)
# Choose random unexplored leaf
unexplored_leafs = list(filter(lambda x: x.visits == 0, current_node.children))
return choice(unexplored_leafs)
def rollout(self, simulated_game):
'''perform random play for choosen leaf node till terminal
state is reached'''
while (not simulated_game.checkboard()) and simulated_game.move_number < simulated_game.size**2:
simulated_game.perform_random_move()
res = simulated_game.checkboard()
print("Finished simulation player", res, "won. Terminal state is:")
simulated_game.printBoard()
return res
def update_tree(self, result, leaf):
'''update all visited nodes in tree'''
self.tree.visits += 1
current_node = leaf
while current_node.parent:
#current_node.print(self.tree)
current_node.update(result)
current_node = current_node.parent | en | 0.79936 | # Monte Carlo tree search for TicTacToe Class defining a simple monte carlo tree search algorithm. Attributes: - game: instance of TicTacToe game - current_player: player to perform next move - number_of_rollouts: number of simulations for generating one move - tree: list containing all possible and impossible (taken) leaf nodes # Root node of tree Perfoming the mcts by performing the specified number of simulations and updating the corresponding leaf node. leaf node is choosen by traverse_tree function # Traverse to leaf # Random simulation for leaf # Update all visited nodes Choose next leaf for performing rollout. When node is fully expanded, child with highest UCT is choosen. If not a random unexplored node is choosen. #root # create children if empty # terminate if board is full # Choose random unexplored leaf perform random play for choosen leaf node till terminal state is reached update all visited nodes in tree #current_node.print(self.tree) | 3.889465 | 4 |
grimer/metadata.py | pirovc/grimer | 5 | 8790 | import pandas as pd
from pandas.api.types import is_numeric_dtype
from grimer.utils import print_log
class Metadata:
valid_types = ["categorical", "numeric"]
default_type = "categorical"
def __init__(self, metadata_file, samples: list=[]):
# Read metadata and let pandas guess dtypes, index as str
self.data = pd.read_table(metadata_file, sep='\t', header=0, skiprows=0, index_col=0, dtype={0:str})
# Enforce string index
self.data.index = self.data.index.astype('str')
# Define all COLUMN TYPES as default
self.types = pd.Series(self.default_type, index=self.data.columns)
# Set types
if str(self.data.index[0]).startswith("#"):
# types defined on file
self.set_hard_types()
else:
# guessed types from read_table
self.types[self.data.dtypes.map(is_numeric_dtype)] = "numeric"
# Convert datatypes to adequate numeric values (int, float)
self.data = self.data.convert_dtypes(infer_objects=False, convert_string=False)
# Re-convert everython to object to standardize (int64 NA is not seriazable on bokeh)
self.data = self.data.astype("object")
# Remove empty fields
null_cols = self.data.isna().all(axis=0)
if any(null_cols):
self.data = self.data.loc[:, ~null_cols]
self.types = self.types[~null_cols]
print_log(str(sum(null_cols)) + " fields removed without valid values")
# Convert NaN on categorical to ""
self.data[self.types[self.types == "categorical"].index] = self.data[self.types[self.types == "categorical"].index].fillna('')
# Remove names
self.data.index.names = [None]
self.types.name = None
# sort and filter by given samples
if samples:
self.data = self.data.reindex(samples)
# Check if matched metadata and samples
null_rows = self.data.isna().all(axis=1)
if any(null_rows):
#self.data = self.data.loc[~null_rows, :]
print_log(str(sum(null_rows)) + " samples without valid metadata")
def __repr__(self):
args = ['{}={}'.format(k, repr(v)) for (k, v) in vars(self).items()]
return 'Metadata({})'.format(', '.join(args))
def set_hard_types(self):
# Get values defined on the first row
self.types = self.data.iloc[0]
# Drop row with types from main data
self.data.drop(self.types.name, inplace=True)
# Validate declared types
idx_valid = self.types.isin(self.valid_types)
if not idx_valid.all():
print_log("Invalid metadata types replaced by: " + self.default_type)
self.types[~idx_valid] = self.default_type
# Enforce column type on dataframe
self.data[self.types[self.types == "categorical"].index] = self.data[self.types[self.types == "categorical"].index].astype(str)
self.data[self.types[self.types == "numeric"].index] = self.data[self.types[self.types == "numeric"].index].apply(pd.to_numeric)
def get_col_headers(self):
return self.data.columns
def get_data(self, metadata_type: str=None):
if metadata_type is not None:
return self.data[self.types[self.types == metadata_type].index]
else:
return self.data
def get_col(self, col):
return self.data[col]
def get_unique_values(self, col):
return sorted(self.get_col(col).dropna().unique())
def get_formatted_unique_values(self, col):
if self.types[col] == "categorical":
return self.get_unique_values(col)
else:
return list(map('{:.16g}'.format, self.get_unique_values(col)))
def get_type(self, col):
return self.types[col]
def get_subset(self, column, value):
return self.data[self.data[column] == value]
| import pandas as pd
from pandas.api.types import is_numeric_dtype
from grimer.utils import print_log
class Metadata:
valid_types = ["categorical", "numeric"]
default_type = "categorical"
def __init__(self, metadata_file, samples: list=[]):
# Read metadata and let pandas guess dtypes, index as str
self.data = pd.read_table(metadata_file, sep='\t', header=0, skiprows=0, index_col=0, dtype={0:str})
# Enforce string index
self.data.index = self.data.index.astype('str')
# Define all COLUMN TYPES as default
self.types = pd.Series(self.default_type, index=self.data.columns)
# Set types
if str(self.data.index[0]).startswith("#"):
# types defined on file
self.set_hard_types()
else:
# guessed types from read_table
self.types[self.data.dtypes.map(is_numeric_dtype)] = "numeric"
# Convert datatypes to adequate numeric values (int, float)
self.data = self.data.convert_dtypes(infer_objects=False, convert_string=False)
# Re-convert everython to object to standardize (int64 NA is not seriazable on bokeh)
self.data = self.data.astype("object")
# Remove empty fields
null_cols = self.data.isna().all(axis=0)
if any(null_cols):
self.data = self.data.loc[:, ~null_cols]
self.types = self.types[~null_cols]
print_log(str(sum(null_cols)) + " fields removed without valid values")
# Convert NaN on categorical to ""
self.data[self.types[self.types == "categorical"].index] = self.data[self.types[self.types == "categorical"].index].fillna('')
# Remove names
self.data.index.names = [None]
self.types.name = None
# sort and filter by given samples
if samples:
self.data = self.data.reindex(samples)
# Check if matched metadata and samples
null_rows = self.data.isna().all(axis=1)
if any(null_rows):
#self.data = self.data.loc[~null_rows, :]
print_log(str(sum(null_rows)) + " samples without valid metadata")
def __repr__(self):
args = ['{}={}'.format(k, repr(v)) for (k, v) in vars(self).items()]
return 'Metadata({})'.format(', '.join(args))
def set_hard_types(self):
# Get values defined on the first row
self.types = self.data.iloc[0]
# Drop row with types from main data
self.data.drop(self.types.name, inplace=True)
# Validate declared types
idx_valid = self.types.isin(self.valid_types)
if not idx_valid.all():
print_log("Invalid metadata types replaced by: " + self.default_type)
self.types[~idx_valid] = self.default_type
# Enforce column type on dataframe
self.data[self.types[self.types == "categorical"].index] = self.data[self.types[self.types == "categorical"].index].astype(str)
self.data[self.types[self.types == "numeric"].index] = self.data[self.types[self.types == "numeric"].index].apply(pd.to_numeric)
def get_col_headers(self):
return self.data.columns
def get_data(self, metadata_type: str=None):
if metadata_type is not None:
return self.data[self.types[self.types == metadata_type].index]
else:
return self.data
def get_col(self, col):
return self.data[col]
def get_unique_values(self, col):
return sorted(self.get_col(col).dropna().unique())
def get_formatted_unique_values(self, col):
if self.types[col] == "categorical":
return self.get_unique_values(col)
else:
return list(map('{:.16g}'.format, self.get_unique_values(col)))
def get_type(self, col):
return self.types[col]
def get_subset(self, column, value):
return self.data[self.data[column] == value]
| en | 0.668764 | # Read metadata and let pandas guess dtypes, index as str # Enforce string index # Define all COLUMN TYPES as default # Set types # types defined on file # guessed types from read_table # Convert datatypes to adequate numeric values (int, float) # Re-convert everython to object to standardize (int64 NA is not seriazable on bokeh) # Remove empty fields # Convert NaN on categorical to "" # Remove names # sort and filter by given samples # Check if matched metadata and samples #self.data = self.data.loc[~null_rows, :] # Get values defined on the first row # Drop row with types from main data # Validate declared types # Enforce column type on dataframe | 2.946081 | 3 |
allennlp/training/metric_tracker.py | MSLars/allennlp | 11,433 | 8791 | from typing import Optional, Dict, Any, List, Union
from allennlp.common.checks import ConfigurationError
class MetricTracker:
"""
This class tracks a metric during training for the dual purposes of early stopping
and for knowing whether the current value is the best so far. It mimics the PyTorch
`state_dict` / `load_state_dict` interface, so that it can be checkpointed along with
your model and optimizer.
Some metrics improve by increasing; others by decreasing. You can provide a
`metric_name` that starts with "+" to indicate an increasing metric, or "-"
to indicate a decreasing metric.
# Parameters
metric_name : `Union[str, List[str]]`
Specifies the metric or metrics to track. Metric names have to start with
"+" for increasing metrics or "-" for decreasing ones. If you specify more
than one, it tracks the sum of the increasing metrics metrics minus the sum
of the decreasing metrics.
patience : `int`, optional (default = `None`)
If provided, then `should_stop_early()` returns True if we go this
many epochs without seeing a new best value.
"""
def __init__(
self,
metric_name: Union[str, List[str]],
patience: Optional[int] = None,
) -> None:
self._patience = patience
self._best_so_far: Optional[float] = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch: Optional[int] = None
self.best_epoch_metrics: Dict[str, float] = {}
if isinstance(metric_name, str):
metric_name = [metric_name]
self.tracked_metrics = []
for name in metric_name:
if name.startswith("+"):
self.tracked_metrics.append((1.0, name[1:]))
elif name.startswith("-"):
self.tracked_metrics.append((-1.0, name[1:]))
else:
raise ConfigurationError("metric_name must start with + or -")
def clear(self) -> None:
"""
Clears out the tracked metrics, but keeps the patience
"""
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
self.best_epoch_metrics.clear()
def state_dict(self) -> Dict[str, Any]:
"""
A `Trainer` can use this to serialize the state of the metric tracker.
"""
return {
"best_so_far": self._best_so_far,
"epochs_with_no_improvement": self._epochs_with_no_improvement,
"is_best_so_far": self._is_best_so_far,
"epoch_number": self._epoch_number,
"best_epoch": self.best_epoch,
"best_epoch_metrics": self.best_epoch_metrics,
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
A `Trainer` can use this to hydrate a metric tracker from a serialized state.
"""
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = state_dict["epochs_with_no_improvement"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._epoch_number = state_dict["epoch_number"]
self.best_epoch = state_dict["best_epoch"]
# Even though we don't promise backwards compatibility for the --recover flag,
# it's particularly easy and harmless to provide it here, so we do it.
self.best_epoch_metrics = state_dict.get("best_epoch_metrics", {})
def add_metrics(self, metrics: Dict[str, float]) -> None:
"""
Record a new value of the metric and update the various things that depend on it.
"""
combined_score = self.combined_score(metrics)
new_best = (self._best_so_far is None) or (combined_score > self._best_so_far)
if new_best:
self._best_so_far = combined_score
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch = self._epoch_number
else:
self._epochs_with_no_improvement += 1
self._is_best_so_far = False
self._epoch_number += 1
def is_best_so_far(self) -> bool:
"""
Returns true if the most recent value of the metric is the best so far.
"""
return self._is_best_so_far
def should_stop_early(self) -> bool:
"""
Returns true if improvement has stopped for long enough.
"""
if self._patience is None:
return False
else:
return self._epochs_with_no_improvement >= self._patience
def combined_score(self, metrics: Dict[str, float]) -> float:
try:
return sum(
factor * metrics[metric_name] for factor, metric_name in self.tracked_metrics
)
except KeyError as e:
raise ConfigurationError(
f"You configured the trainer to use the {e.args[0]} "
"metric for early stopping, but the model did not produce that metric."
)
| from typing import Optional, Dict, Any, List, Union
from allennlp.common.checks import ConfigurationError
class MetricTracker:
"""
This class tracks a metric during training for the dual purposes of early stopping
and for knowing whether the current value is the best so far. It mimics the PyTorch
`state_dict` / `load_state_dict` interface, so that it can be checkpointed along with
your model and optimizer.
Some metrics improve by increasing; others by decreasing. You can provide a
`metric_name` that starts with "+" to indicate an increasing metric, or "-"
to indicate a decreasing metric.
# Parameters
metric_name : `Union[str, List[str]]`
Specifies the metric or metrics to track. Metric names have to start with
"+" for increasing metrics or "-" for decreasing ones. If you specify more
than one, it tracks the sum of the increasing metrics metrics minus the sum
of the decreasing metrics.
patience : `int`, optional (default = `None`)
If provided, then `should_stop_early()` returns True if we go this
many epochs without seeing a new best value.
"""
def __init__(
self,
metric_name: Union[str, List[str]],
patience: Optional[int] = None,
) -> None:
self._patience = patience
self._best_so_far: Optional[float] = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch: Optional[int] = None
self.best_epoch_metrics: Dict[str, float] = {}
if isinstance(metric_name, str):
metric_name = [metric_name]
self.tracked_metrics = []
for name in metric_name:
if name.startswith("+"):
self.tracked_metrics.append((1.0, name[1:]))
elif name.startswith("-"):
self.tracked_metrics.append((-1.0, name[1:]))
else:
raise ConfigurationError("metric_name must start with + or -")
def clear(self) -> None:
"""
Clears out the tracked metrics, but keeps the patience
"""
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
self.best_epoch_metrics.clear()
def state_dict(self) -> Dict[str, Any]:
"""
A `Trainer` can use this to serialize the state of the metric tracker.
"""
return {
"best_so_far": self._best_so_far,
"epochs_with_no_improvement": self._epochs_with_no_improvement,
"is_best_so_far": self._is_best_so_far,
"epoch_number": self._epoch_number,
"best_epoch": self.best_epoch,
"best_epoch_metrics": self.best_epoch_metrics,
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
A `Trainer` can use this to hydrate a metric tracker from a serialized state.
"""
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = state_dict["epochs_with_no_improvement"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._epoch_number = state_dict["epoch_number"]
self.best_epoch = state_dict["best_epoch"]
# Even though we don't promise backwards compatibility for the --recover flag,
# it's particularly easy and harmless to provide it here, so we do it.
self.best_epoch_metrics = state_dict.get("best_epoch_metrics", {})
def add_metrics(self, metrics: Dict[str, float]) -> None:
"""
Record a new value of the metric and update the various things that depend on it.
"""
combined_score = self.combined_score(metrics)
new_best = (self._best_so_far is None) or (combined_score > self._best_so_far)
if new_best:
self._best_so_far = combined_score
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch = self._epoch_number
else:
self._epochs_with_no_improvement += 1
self._is_best_so_far = False
self._epoch_number += 1
def is_best_so_far(self) -> bool:
"""
Returns true if the most recent value of the metric is the best so far.
"""
return self._is_best_so_far
def should_stop_early(self) -> bool:
"""
Returns true if improvement has stopped for long enough.
"""
if self._patience is None:
return False
else:
return self._epochs_with_no_improvement >= self._patience
def combined_score(self, metrics: Dict[str, float]) -> float:
try:
return sum(
factor * metrics[metric_name] for factor, metric_name in self.tracked_metrics
)
except KeyError as e:
raise ConfigurationError(
f"You configured the trainer to use the {e.args[0]} "
"metric for early stopping, but the model did not produce that metric."
)
| en | 0.811103 | This class tracks a metric during training for the dual purposes of early stopping and for knowing whether the current value is the best so far. It mimics the PyTorch `state_dict` / `load_state_dict` interface, so that it can be checkpointed along with your model and optimizer. Some metrics improve by increasing; others by decreasing. You can provide a `metric_name` that starts with "+" to indicate an increasing metric, or "-" to indicate a decreasing metric. # Parameters metric_name : `Union[str, List[str]]` Specifies the metric or metrics to track. Metric names have to start with "+" for increasing metrics or "-" for decreasing ones. If you specify more than one, it tracks the sum of the increasing metrics metrics minus the sum of the decreasing metrics. patience : `int`, optional (default = `None`) If provided, then `should_stop_early()` returns True if we go this many epochs without seeing a new best value. Clears out the tracked metrics, but keeps the patience A `Trainer` can use this to serialize the state of the metric tracker. A `Trainer` can use this to hydrate a metric tracker from a serialized state. # Even though we don't promise backwards compatibility for the --recover flag, # it's particularly easy and harmless to provide it here, so we do it. Record a new value of the metric and update the various things that depend on it. Returns true if the most recent value of the metric is the best so far. Returns true if improvement has stopped for long enough. | 2.698195 | 3 |
authors/apps/profiles/renderers.py | MuhweziDeo/Ah-backend-xmen | 4 | 8792 | from authors.apps.utils.renderers import AppJSONRenderer
import json
from rest_framework.renderers import JSONRenderer
class UserProfileJSONRenderer(AppJSONRenderer):
name = 'profile'
class UserProfileListRenderer(JSONRenderer):
"""
Returns profiles of existing users
"""
charset = 'utf-8'
def render(self, data, media_type=None, renderer_context=None):
""" present a list of
user profiles in json format
"""
return json.dumps({
'profiles':data
})
class ReadStatsJsonRenderer(AppJSONRenderer):
name = 'read_stats'
| from authors.apps.utils.renderers import AppJSONRenderer
import json
from rest_framework.renderers import JSONRenderer
class UserProfileJSONRenderer(AppJSONRenderer):
name = 'profile'
class UserProfileListRenderer(JSONRenderer):
"""
Returns profiles of existing users
"""
charset = 'utf-8'
def render(self, data, media_type=None, renderer_context=None):
""" present a list of
user profiles in json format
"""
return json.dumps({
'profiles':data
})
class ReadStatsJsonRenderer(AppJSONRenderer):
name = 'read_stats'
| en | 0.643342 | Returns profiles of existing users present a list of user profiles in json format | 2.524953 | 3 |
json_analyzer.py | bantenz/NetworkConfigParser | 0 | 8793 | <reponame>bantenz/NetworkConfigParser<gh_stars>0
import json
from deepdiff import DeepDiff
import pprint
def get_json(file_name):
with open(file_name) as json_file:
json_data = json.load(json_file)
return json_data
def compare_json(Hostname, Command, Data1, Data2):
if (Data1 == Data2):
print ("%s - %s output is same" % (Hostname, Command))
else:
print ("%s - %s output is different" % (Hostname, Command))
pprint.pprint(DeepDiff(Data1, Data2))
def main():
Hostname = raw_input('Input Hostname of the device : ').lower()
Command = raw_input('Input Command : ').lower()
Filename1 = raw_input('Input First JSON File : ').lower()
Filename2 = raw_input('Input Second JSON File : ').lower()
Data1 = get_json(Filename1)
Data2 = get_json(Filename2)
compare_json(Hostname, Command, Data1, Data2)
if __name__ == "__main__":
# If this Python file runs by itself, run below command. If imported, this section is not run
main()
| import json
from deepdiff import DeepDiff
import pprint
def get_json(file_name):
with open(file_name) as json_file:
json_data = json.load(json_file)
return json_data
def compare_json(Hostname, Command, Data1, Data2):
if (Data1 == Data2):
print ("%s - %s output is same" % (Hostname, Command))
else:
print ("%s - %s output is different" % (Hostname, Command))
pprint.pprint(DeepDiff(Data1, Data2))
def main():
Hostname = raw_input('Input Hostname of the device : ').lower()
Command = raw_input('Input Command : ').lower()
Filename1 = raw_input('Input First JSON File : ').lower()
Filename2 = raw_input('Input Second JSON File : ').lower()
Data1 = get_json(Filename1)
Data2 = get_json(Filename2)
compare_json(Hostname, Command, Data1, Data2)
if __name__ == "__main__":
# If this Python file runs by itself, run below command. If imported, this section is not run
main() | en | 0.793427 | # If this Python file runs by itself, run below command. If imported, this section is not run | 3.143137 | 3 |
fiwareglancesync/sync.py | telefonicaid/fiware-glancesync | 0 | 8794 | <reponame>telefonicaid/fiware-glancesync
#!/usr/bin/env python
# -- encoding: utf-8 --
#
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
#
import sys
import StringIO
import os
import os.path
import datetime
import argparse
import logging
from fiwareglancesync.glancesync import GlanceSync
class Sync(object):
def __init__(self, regions, override_d=None):
"""init object"""
GlanceSync.init_logs()
self.glancesync = GlanceSync(options_dict=override_d)
regions_expanded = list()
already_sorted = True
for region in regions:
if region.endswith(':'):
regions_expanded.extend(self.glancesync.get_regions(
target=region[:-1]))
already_sorted = False
else:
regions_expanded.append(region)
regions = regions_expanded
if not regions:
regions = self.glancesync.get_regions()
already_sorted = False
if not already_sorted:
regions_unsorted = regions
regions = list()
for region in self.glancesync.preferable_order:
if region in regions_unsorted:
regions.append(region)
regions_unsorted.remove(region)
regions.extend(regions_unsorted)
self.regions = regions
def report_status(self):
"""Report the synchronisation status of the regions"""
for region in self.regions:
try:
stream = StringIO.StringIO()
self.glancesync.export_sync_region_status(region, stream)
print(stream.getvalue())
except Exception:
# Don't do anything. Message has been already printed
# try next region
continue
def parallel_sync(self):
"""Run the synchronisation in several regions in parallel. The
synchronisation inside the region is sequential (i.e. several
regions are synchronised simultaneously, but only one image at time
is uploaded for each region)"""
max_children = self.glancesync.max_children
now = datetime.datetime.now()
datestr = str(now.year) + str(now.month).zfill(2) + \
str(now.day).zfill(2) + '_' + str(now.hour).zfill(2) +\
str(now.minute).zfill(2)
msg = '======Master is ' + self.glancesync.master_region
print(msg)
sys.stdout.flush()
os.mkdir('sync_' + datestr)
children = dict()
for region in self.regions:
try:
if len(children) >= max_children:
self._wait_child(children)
pid = os.fork()
if pid > 0:
children[pid] = region
continue
else:
path = os.path.join('sync_' + datestr, region + '.txt')
handler = logging.FileHandler(path)
handler.setFormatter(logging.Formatter('%(message)s'))
logger = self.glancesync.log
# Remove old handlers
for h in logger.handlers:
logger.removeHandler(h)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = 0
self.glancesync.sync_region(region)
# After a fork, os_exit() and not sys.exit() must be used.
os._exit(0)
except Exception:
raise
sys.stderr.flush()
sys.exit(-1)
while len(children) > 0:
self._wait_child(children)
print('All is done.')
def sequential_sync(self, dry_run=False):
"""Run the synchronisation sequentially (that is, do not start the
synchronisation to a region before the previous one was completed or
failed
:param dry_run: if true, do not synchronise images actually
"""
msg = '======Master is ' + self.glancesync.master_region
print(msg)
for region in self.regions:
try:
msg = "======" + region
print(msg)
sys.stdout.flush()
self.glancesync.sync_region(region, dry_run=dry_run)
except Exception:
# Don't do anything. Message has been already printed
# try next region
continue
def _wait_child(self, children):
""" Wait until one of the regions ends its synchronisation and then
print the result
:param children:
:return: a dictionary or regions, indexed by the pid of the process
"""
finish_direct_child = False
while not finish_direct_child:
(pid, status) = os.wait()
if pid not in children:
continue
else:
finish_direct_child = True
if status == 0:
msg = 'Region {0} has finished'.format(children[pid])
print(msg)
else:
msg = 'Region {0} has finished with errors'
print(msg.format(children[pid]))
del children[pid]
sys.stdout.flush()
def show_regions(self):
"""print a full list of the regions available (excluding the
master region) in all the targets defined in the configuration file"""
regions = self.glancesync.get_regions()
for target in self.glancesync.targets.keys():
if target == 'facade' or target == 'master':
continue
regions.extend(self.glancesync.get_regions(target=target))
print(' '.join(regions))
def make_backup(self):
"""make a backup of the metadata in the regions specified at the
constructor (in addition to the master region). The backup is created
in a directory named 'backup_glance_' with the date and time as suffix
There is a file for each region (the name is backup_<region>.csv) and
inside the file a line for each image.
Only the information about public images/ the images owned by
the tenant, can be obtained, regardless if the user is an admin. This
is a limitation of the glance API"""
now = datetime.datetime.now().isoformat()
directory = 'backup_glance_' + now
os.mkdir(directory)
regions = set(self.regions)
regions.add(self.glancesync.master_region)
for region in regions:
try:
self.glancesync.backup_glancemetadata_region(region, directory)
except Exception:
# do nothing. Already logged.
continue
if __name__ == '__main__':
# Parse cmdline
description = 'A tool to sync images from a master region to other '\
'regions'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('regions', metavar='region', type=str, nargs='*',
help='region where the images are uploaded to')
parser.add_argument('--parallel', action='store_true',
help='sync several regions in parallel')
parser.add_argument(
'--config', nargs='+', help='override configuration options. (e.g. ' +
"main.master_region=Valladolid metadata_condition='image.name=name1')")
group = parser.add_mutually_exclusive_group()
group.add_argument('--dry-run', action='store_true',
help='do not upload actually the images')
group.add_argument('--show-status', action='store_true',
help='do not sync, but show the synchronisation status')
group.add_argument('--show-regions', action='store_true',
help='don not sync, only show the available regions')
group.add_argument(
'--make-backup', action='store_true',
help="do no sync, make a backup of the regions' metadata")
meta = parser.parse_args()
options = dict()
if meta.config:
for option in meta.config:
pair = option.split('=')
if len(pair) != 2:
parser.error('config options must have the format key=value')
sys.exit(-1)
options[pair[0].strip()] = pair[1]
# Run cmd
sync = Sync(meta.regions, options)
if meta.show_status:
sync.report_status()
elif meta.parallel:
sync.parallel_sync()
elif meta.show_regions:
sync.show_regions()
elif meta.make_backup:
sync.make_backup()
else:
sync.sequential_sync(meta.dry_run)
| #!/usr/bin/env python
# -- encoding: utf-8 --
#
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
#
import sys
import StringIO
import os
import os.path
import datetime
import argparse
import logging
from fiwareglancesync.glancesync import GlanceSync
class Sync(object):
def __init__(self, regions, override_d=None):
"""init object"""
GlanceSync.init_logs()
self.glancesync = GlanceSync(options_dict=override_d)
regions_expanded = list()
already_sorted = True
for region in regions:
if region.endswith(':'):
regions_expanded.extend(self.glancesync.get_regions(
target=region[:-1]))
already_sorted = False
else:
regions_expanded.append(region)
regions = regions_expanded
if not regions:
regions = self.glancesync.get_regions()
already_sorted = False
if not already_sorted:
regions_unsorted = regions
regions = list()
for region in self.glancesync.preferable_order:
if region in regions_unsorted:
regions.append(region)
regions_unsorted.remove(region)
regions.extend(regions_unsorted)
self.regions = regions
def report_status(self):
"""Report the synchronisation status of the regions"""
for region in self.regions:
try:
stream = StringIO.StringIO()
self.glancesync.export_sync_region_status(region, stream)
print(stream.getvalue())
except Exception:
# Don't do anything. Message has been already printed
# try next region
continue
def parallel_sync(self):
"""Run the synchronisation in several regions in parallel. The
synchronisation inside the region is sequential (i.e. several
regions are synchronised simultaneously, but only one image at time
is uploaded for each region)"""
max_children = self.glancesync.max_children
now = datetime.datetime.now()
datestr = str(now.year) + str(now.month).zfill(2) + \
str(now.day).zfill(2) + '_' + str(now.hour).zfill(2) +\
str(now.minute).zfill(2)
msg = '======Master is ' + self.glancesync.master_region
print(msg)
sys.stdout.flush()
os.mkdir('sync_' + datestr)
children = dict()
for region in self.regions:
try:
if len(children) >= max_children:
self._wait_child(children)
pid = os.fork()
if pid > 0:
children[pid] = region
continue
else:
path = os.path.join('sync_' + datestr, region + '.txt')
handler = logging.FileHandler(path)
handler.setFormatter(logging.Formatter('%(message)s'))
logger = self.glancesync.log
# Remove old handlers
for h in logger.handlers:
logger.removeHandler(h)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = 0
self.glancesync.sync_region(region)
# After a fork, os_exit() and not sys.exit() must be used.
os._exit(0)
except Exception:
raise
sys.stderr.flush()
sys.exit(-1)
while len(children) > 0:
self._wait_child(children)
print('All is done.')
def sequential_sync(self, dry_run=False):
"""Run the synchronisation sequentially (that is, do not start the
synchronisation to a region before the previous one was completed or
failed
:param dry_run: if true, do not synchronise images actually
"""
msg = '======Master is ' + self.glancesync.master_region
print(msg)
for region in self.regions:
try:
msg = "======" + region
print(msg)
sys.stdout.flush()
self.glancesync.sync_region(region, dry_run=dry_run)
except Exception:
# Don't do anything. Message has been already printed
# try next region
continue
def _wait_child(self, children):
""" Wait until one of the regions ends its synchronisation and then
print the result
:param children:
:return: a dictionary or regions, indexed by the pid of the process
"""
finish_direct_child = False
while not finish_direct_child:
(pid, status) = os.wait()
if pid not in children:
continue
else:
finish_direct_child = True
if status == 0:
msg = 'Region {0} has finished'.format(children[pid])
print(msg)
else:
msg = 'Region {0} has finished with errors'
print(msg.format(children[pid]))
del children[pid]
sys.stdout.flush()
def show_regions(self):
"""print a full list of the regions available (excluding the
master region) in all the targets defined in the configuration file"""
regions = self.glancesync.get_regions()
for target in self.glancesync.targets.keys():
if target == 'facade' or target == 'master':
continue
regions.extend(self.glancesync.get_regions(target=target))
print(' '.join(regions))
def make_backup(self):
"""make a backup of the metadata in the regions specified at the
constructor (in addition to the master region). The backup is created
in a directory named 'backup_glance_' with the date and time as suffix
There is a file for each region (the name is backup_<region>.csv) and
inside the file a line for each image.
Only the information about public images/ the images owned by
the tenant, can be obtained, regardless if the user is an admin. This
is a limitation of the glance API"""
now = datetime.datetime.now().isoformat()
directory = 'backup_glance_' + now
os.mkdir(directory)
regions = set(self.regions)
regions.add(self.glancesync.master_region)
for region in regions:
try:
self.glancesync.backup_glancemetadata_region(region, directory)
except Exception:
# do nothing. Already logged.
continue
if __name__ == '__main__':
# Parse cmdline
description = 'A tool to sync images from a master region to other '\
'regions'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('regions', metavar='region', type=str, nargs='*',
help='region where the images are uploaded to')
parser.add_argument('--parallel', action='store_true',
help='sync several regions in parallel')
parser.add_argument(
'--config', nargs='+', help='override configuration options. (e.g. ' +
"main.master_region=Valladolid metadata_condition='image.name=name1')")
group = parser.add_mutually_exclusive_group()
group.add_argument('--dry-run', action='store_true',
help='do not upload actually the images')
group.add_argument('--show-status', action='store_true',
help='do not sync, but show the synchronisation status')
group.add_argument('--show-regions', action='store_true',
help='don not sync, only show the available regions')
group.add_argument(
'--make-backup', action='store_true',
help="do no sync, make a backup of the regions' metadata")
meta = parser.parse_args()
options = dict()
if meta.config:
for option in meta.config:
pair = option.split('=')
if len(pair) != 2:
parser.error('config options must have the format key=value')
sys.exit(-1)
options[pair[0].strip()] = pair[1]
# Run cmd
sync = Sync(meta.regions, options)
if meta.show_status:
sync.report_status()
elif meta.parallel:
sync.parallel_sync()
elif meta.show_regions:
sync.show_regions()
elif meta.make_backup:
sync.make_backup()
else:
sync.sequential_sync(meta.dry_run) | en | 0.865413 | #!/usr/bin/env python # -- encoding: utf-8 -- # # Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U # # This file is part of FI-WARE project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # # For those usages not covered by the Apache version 2.0 License please # contact with <EMAIL> # init object Report the synchronisation status of the regions # Don't do anything. Message has been already printed # try next region Run the synchronisation in several regions in parallel. The synchronisation inside the region is sequential (i.e. several regions are synchronised simultaneously, but only one image at time is uploaded for each region) # Remove old handlers # After a fork, os_exit() and not sys.exit() must be used. Run the synchronisation sequentially (that is, do not start the synchronisation to a region before the previous one was completed or failed :param dry_run: if true, do not synchronise images actually # Don't do anything. Message has been already printed # try next region Wait until one of the regions ends its synchronisation and then print the result :param children: :return: a dictionary or regions, indexed by the pid of the process print a full list of the regions available (excluding the master region) in all the targets defined in the configuration file make a backup of the metadata in the regions specified at the constructor (in addition to the master region). The backup is created in a directory named 'backup_glance_' with the date and time as suffix There is a file for each region (the name is backup_<region>.csv) and inside the file a line for each image. Only the information about public images/ the images owned by the tenant, can be obtained, regardless if the user is an admin. This is a limitation of the glance API # do nothing. Already logged. # Parse cmdline # Run cmd | 1.987208 | 2 |
models/object_detection/pytorch/ssd-resnet34/training/cpu/mlperf_logger.py | Pandinosaurus/models-intelai | 0 | 8795 | ### This file is originally from: [mlcommons repo](https://github.com/mlcommons/training/tree/9947bdf21ee3f2488fa4b362eec2ce7deb2ec4dd/single_stage_detector/ssd/mlperf_logger.py)
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import os
from mlperf_logging import mllog
from mlperf_logging.mllog import constants as mllog_const
mllogger = mllog.get_mllogger()
mllog.config(
filename=(os.getenv("COMPLIANCE_FILE") or "mlperf_compliance.log"),
root_dir=os.path.normpath(os.path.dirname(os.path.realpath(__file__))))
def ssd_print(*args, sync=True, **kwargs):
use_cuda = os.getenv('USE_CUDA')
if sync and use_cuda=='True':
barrier()
if get_rank() == 0:
kwargs['stack_offset'] = 2
mllogger.event(*args, **kwargs)
def barrier():
"""
Works as a temporary distributed barrier, currently pytorch
doesn't implement barrier for NCCL backend.
Calls all_reduce on dummy tensor and synchronizes with GPU.
"""
if torch.distributed.is_initialized():
torch.distributed.all_reduce(torch.cuda.FloatTensor(1))
torch.cuda.synchronize()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = os.getenv('RANK', os.getenv('LOCAL_RANK', 0))
return rank
def broadcast_seeds(seed, device):
if torch.distributed.is_initialized():
seeds_tensor = torch.LongTensor([seed]).to(device)
torch.distributed.broadcast(seeds_tensor, 0)
seed = seeds_tensor.item()
return seed
| ### This file is originally from: [mlcommons repo](https://github.com/mlcommons/training/tree/9947bdf21ee3f2488fa4b362eec2ce7deb2ec4dd/single_stage_detector/ssd/mlperf_logger.py)
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import os
from mlperf_logging import mllog
from mlperf_logging.mllog import constants as mllog_const
mllogger = mllog.get_mllogger()
mllog.config(
filename=(os.getenv("COMPLIANCE_FILE") or "mlperf_compliance.log"),
root_dir=os.path.normpath(os.path.dirname(os.path.realpath(__file__))))
def ssd_print(*args, sync=True, **kwargs):
use_cuda = os.getenv('USE_CUDA')
if sync and use_cuda=='True':
barrier()
if get_rank() == 0:
kwargs['stack_offset'] = 2
mllogger.event(*args, **kwargs)
def barrier():
"""
Works as a temporary distributed barrier, currently pytorch
doesn't implement barrier for NCCL backend.
Calls all_reduce on dummy tensor and synchronizes with GPU.
"""
if torch.distributed.is_initialized():
torch.distributed.all_reduce(torch.cuda.FloatTensor(1))
torch.cuda.synchronize()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = os.getenv('RANK', os.getenv('LOCAL_RANK', 0))
return rank
def broadcast_seeds(seed, device):
if torch.distributed.is_initialized():
seeds_tensor = torch.LongTensor([seed]).to(device)
torch.distributed.broadcast(seeds_tensor, 0)
seed = seeds_tensor.item()
return seed
| en | 0.840162 | ### This file is originally from: [mlcommons repo](https://github.com/mlcommons/training/tree/9947bdf21ee3f2488fa4b362eec2ce7deb2ec4dd/single_stage_detector/ssd/mlperf_logger.py) # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Works as a temporary distributed barrier, currently pytorch doesn't implement barrier for NCCL backend. Calls all_reduce on dummy tensor and synchronizes with GPU. Gets distributed rank or returns zero if distributed is not initialized. | 2.0774 | 2 |
omtk/models/model_avar_surface_lips.py | CDufour909/omtk_unreal | 0 | 8796 | import math
import pymel.core as pymel
from omtk.core.classNode import Node
from omtk.libs import libAttr
from omtk.libs import libRigging
from . import model_avar_surface
class SplitterNode(Node):
"""
A splitter is a node network that take the parameterV that is normally sent through the follicles and
split it between two destination: the follicles and the jaw ref constraint.
The more the jaw is opened, the more we'll transfer to the jaw ref before sending to the follicle.
This is mainly used to ensure that any lip movement created by the jaw is canceled when the
animator try to correct the lips and the jaw is open. Otherwise since the jaw space and the surface space
To compute the displacement caused by the was, we'll usethe circumference around the jaw pivot.
This create an 'approximation' that might be wrong if some translation also occur in the jaw.
todo: test with corrective jaw translation
"""
def __init__(self):
super(SplitterNode, self).__init__() # useless
self.attr_inn_jaw_pt = None
self.attr_inn_jaw_radius = None
self.attr_inn_surface_v = None
self.attr_inn_surface_range_v = None
self.attr_inn_jaw_default_ratio = None
self.attr_out_surface_v = None
self.attr_out_jaw_ratio = None
def build(self, nomenclature_rig, **kwargs):
super(SplitterNode, self).build(**kwargs)
#
# Create inn and out attributes.
#
grp_splitter_inn = pymel.createNode(
'network',
name=nomenclature_rig.resolve('udSplitterInn')
)
# The jaw opening amount in degree.
self.attr_inn_jaw_pt = libAttr.addAttr(grp_splitter_inn, 'innJawOpen')
# The relative uv coordinates normally sent to the follicles.
# Note that this value is expected to change at the output of the SplitterNode (see outSurfaceU and outSurfaceV)
self.attr_inn_surface_u = libAttr.addAttr(grp_splitter_inn, 'innSurfaceU')
self.attr_inn_surface_v = libAttr.addAttr(grp_splitter_inn, 'innSurfaceV')
# Use this switch to disable completely the splitter.
self.attr_inn_bypass = libAttr.addAttr(grp_splitter_inn, 'innBypassAmount')
# The arc length in world space of the surface controlling the follicles.
self.attr_inn_surface_range_v = libAttr.addAttr(grp_splitter_inn,
'innSurfaceRangeV') # How many degree does take the jaw to create 1 unit of surface deformation? (ex: 20)
# How much inn percent is the lips following the jaw by default.
# Note that this value is expected to change at the output of the SplitterNode (see attr_out_jaw_ratio)
self.attr_inn_jaw_default_ratio = libAttr.addAttr(grp_splitter_inn, 'jawDefaultRatio')
# The radius of the influence circle normally resolved by using the distance between the jaw and the avar as radius.
self.attr_inn_jaw_radius = libAttr.addAttr(grp_splitter_inn, 'jawRadius')
grp_splitter_out = pymel.createNode(
'network',
name=nomenclature_rig.resolve('udSplitterOut')
)
self.attr_out_surface_u = libAttr.addAttr(grp_splitter_out, 'outSurfaceU')
self.attr_out_surface_v = libAttr.addAttr(grp_splitter_out, 'outSurfaceV')
self.attr_out_jaw_ratio = libAttr.addAttr(grp_splitter_out,
'outJawRatio') # How much percent this influence follow the jaw after cancellation.
#
# Connect inn and out network nodes so they can easily be found from the SplitterNode.
#
attr_inn = libAttr.addAttr(grp_splitter_inn, longName='inn', attributeType='message')
attr_out = libAttr.addAttr(grp_splitter_out, longName='out', attributeType='message')
pymel.connectAttr(self.node.message, attr_inn)
pymel.connectAttr(self.node.message, attr_out)
#
# Create node networks
# Step 1: Get the jaw displacement in uv space (parameterV only).
#
attr_jaw_circumference = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawCircumference'),
input1X=self.attr_inn_jaw_radius,
input2X=(math.pi * 2.0)
).outputX
attr_jaw_open_circle_ratio = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawOpenCircleRatio'),
operation=2, # divide
input1X=self.attr_inn_jaw_pt,
input2X=360.0
).outputX
attr_jaw_active_circumference = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawActiveCircumference'),
input1X=attr_jaw_circumference,
input2X=attr_jaw_open_circle_ratio
).outputX
attr_jaw_v_range = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getActiveJawRangeInSurfaceSpace'),
operation=2, # divide
input1X=attr_jaw_active_circumference,
input2X=self.attr_inn_surface_range_v
).outputX
#
# Step 2: Resolve the output jaw_ratio
#
# Note that this can throw a zero division warning in Maya.
# To prevent that we'll use some black-magic-ugly-ass-trick.
attr_jaw_ratio_cancelation = libRigging.create_safe_division(
self.attr_inn_surface_v,
attr_jaw_v_range,
nomenclature_rig,
'getJawRatioCancellation'
)
attr_jaw_ratio_out_raw = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawRatioOutUnlimited'),
operation=2, # substraction,
input1D=(
self.attr_inn_jaw_default_ratio,
attr_jaw_ratio_cancelation
)
).output1D
attr_jaw_ratio_out_limited = libRigging.create_utility_node(
'clamp',
name=nomenclature_rig.resolve('getJawRatioOutLimited'),
inputR=attr_jaw_ratio_out_raw,
minR=0.0,
maxR=1.0
).outputR
#
# Step 3: Resolve attr_out_surface_u & attr_out_surface_v
#
attr_inn_jaw_default_ratio_inv = libRigging.create_utility_node(
'reverse',
name=nomenclature_rig.resolve('getJawDefaultRatioInv'),
inputX=self.attr_inn_jaw_default_ratio
).outputX
util_jaw_uv_default_ratio = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawDefaultRatioUvSpace'),
input1X=self.attr_inn_jaw_default_ratio,
input1Y=attr_inn_jaw_default_ratio_inv,
input2X=attr_jaw_v_range,
input2Y=attr_jaw_v_range
)
attr_jaw_uv_default_ratio = util_jaw_uv_default_ratio.outputX
attr_jaw_uv_default_ratio_inv = util_jaw_uv_default_ratio.outputY
attr_jaw_uv_limit_max = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawSurfaceLimitMax'),
operation=2, # substract
input1D=(attr_jaw_v_range, attr_jaw_uv_default_ratio_inv)
).output1D
attr_jaw_uv_limit_min = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawSurfaceLimitMin'),
operation=2, # substract
input1D=(attr_jaw_uv_default_ratio, attr_jaw_v_range)
).output1D
attr_jaw_cancel_range = libRigging.create_utility_node(
'clamp',
name=nomenclature_rig.resolve('getJawCancelRange'),
inputR=self.attr_inn_surface_v,
minR=attr_jaw_uv_limit_min,
maxR=attr_jaw_uv_limit_max
).outputR
attr_out_surface_v_cancelled = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getCanceledUv'),
operation=2, # substraction
input1D=(self.attr_inn_surface_v, attr_jaw_cancel_range)
).output1D
#
# Connect output attributes
#
attr_inn_bypass_inv = libRigging.create_utility_node(
'reverse',
name=nomenclature_rig.resolve('getBypassInv'),
inputX=self.attr_inn_bypass
).outputX
# Connect output jaw_ratio
attr_output_jaw_ratio = libRigging.create_utility_node(
'blendWeighted',
input=(attr_jaw_ratio_out_limited, self.attr_inn_jaw_default_ratio),
weight=(attr_inn_bypass_inv, self.attr_inn_bypass)
).output
pymel.connectAttr(attr_output_jaw_ratio, self.attr_out_jaw_ratio)
# Connect output surface u
pymel.connectAttr(self.attr_inn_surface_u, self.attr_out_surface_u)
# Connect output surface_v
attr_output_surface_v = libRigging.create_utility_node(
'blendWeighted',
input=(attr_out_surface_v_cancelled, self.attr_inn_surface_v),
weight=(attr_inn_bypass_inv, self.attr_inn_bypass)
).output
pymel.connectAttr(attr_output_surface_v, self.attr_out_surface_v)
class AvarSurfaceLipModel(model_avar_surface.AvarSurfaceModel):
"""
Custom avar model for the complex situation that is the lips.
This ensure that we are moving according to the jaw before sliding on the surface.
"""
def __init__(self, *args, **kwargs):
super(AvarSurfaceLipModel, self).__init__(*args, **kwargs)
self._attr_inn_jaw_bindpose = None
self._attr_inn_jaw_pitch = None
self._attr_inn_jaw_ratio_default = None
self._attr_inn_bypass_splitter = None
self._attr_out_jaw_ratio = None
def _create_interface(self):
super(AvarSurfaceLipModel, self)._create_interface()
self._attr_inn_jaw_bindpose = libAttr.addAttr(self.grp_rig, 'innJawBindPose', dataType='matrix')
self._attr_inn_jaw_pitch = libAttr.addAttr(self.grp_rig, 'innJawPitch', defaultValue=0)
self._attr_inn_jaw_ratio_default = libAttr.addAttr(self.grp_rig, 'innJawRatioDefault', defaultValue=0)
self._attr_inn_bypass_splitter = libAttr.addAttr(self.grp_rig, 'innBypassSplitter')
self._attr_inn_ud_bypass = libAttr.addAttr(self.grp_rig, 'innBypassUD')
# self._attr_inn_surface_length_u = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthU', defaultValue=0)
# self._attr_inn_surface_length_v = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthV', defaultValue=0)
self._attr_out_jaw_ratio = libAttr.addAttr(self.grp_rig, 'outJawRatio')
def connect_avar(self, avar):
super(AvarSurfaceLipModel, self).connect_avar(avar)
# Note: We expect a FaceLipAvar
pymel.connectAttr(avar._attr_jaw_bind_tm, self._attr_inn_jaw_bindpose)
pymel.connectAttr(avar._attr_jaw_pitch, self._attr_inn_jaw_pitch)
pymel.connectAttr(avar._attr_inn_jaw_ratio_default, self._attr_inn_jaw_ratio_default)
pymel.connectAttr(avar._attr_bypass_splitter, self._attr_inn_bypass_splitter)
pymel.connectAttr(avar.attr_ud_bypass, self._attr_inn_ud_bypass)
def _get_follicle_relative_uv_attr(self, **kwargs):
nomenclature_rig = self.get_nomenclature_rig()
attr_u, attr_v = super(AvarSurfaceLipModel, self)._get_follicle_relative_uv_attr(**kwargs)
util_decompose_jaw_bind_tm = libRigging.create_utility_node(
'decomposeMatrix',
inputMatrix=self._attr_inn_jaw_bindpose,
)
#
# Create and connect Splitter Node
#
splitter = SplitterNode()
splitter.build(
nomenclature_rig,
name=nomenclature_rig.resolve('splitter')
)
splitter.setParent(self.grp_rig)
# Resolve the radius of the jaw influence. Used by the splitter.
attr_jaw_radius = libRigging.create_utility_node(
'distanceBetween',
name=nomenclature_rig.resolve('getJawRadius'),
point1=self.grp_offset.translate,
point2=util_decompose_jaw_bind_tm.outputTranslate
).distance
# Resolve the jaw pitch. Used by the splitter.
attr_jaw_pitch = self._attr_inn_jaw_pitch
# Connect the splitter inputs
pymel.connectAttr(attr_u, splitter.attr_inn_surface_u)
pymel.connectAttr(attr_v, splitter.attr_inn_surface_v)
pymel.connectAttr(self._attr_inn_jaw_ratio_default, splitter.attr_inn_jaw_default_ratio)
pymel.connectAttr(self._attr_length_v, splitter.attr_inn_surface_range_v)
pymel.connectAttr(attr_jaw_radius, splitter.attr_inn_jaw_radius)
pymel.connectAttr(attr_jaw_pitch, splitter.attr_inn_jaw_pt)
pymel.connectAttr(self._attr_inn_bypass_splitter, splitter.attr_inn_bypass)
attr_u = splitter.attr_out_surface_u
attr_v = splitter.attr_out_surface_v
# Create constraint to controller the jaw reference
pymel.connectAttr(splitter.attr_out_jaw_ratio, self._attr_out_jaw_ratio)
#
# Implement the 'bypass' avars.
# Thoses avars bypass the splitter, used in corner cases only.
#
attr_attr_ud_bypass_adjusted = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getAdjustedUdBypass'),
input1X=self._attr_inn_ud_bypass,
input2X=self.multiplier_ud
).outputX
attr_v = libRigging.create_utility_node(
'addDoubleLinear',
name=nomenclature_rig.resolve('addBypassAvar'),
input1=attr_v,
input2=attr_attr_ud_bypass_adjusted
).output
return attr_u, attr_v
| import math
import pymel.core as pymel
from omtk.core.classNode import Node
from omtk.libs import libAttr
from omtk.libs import libRigging
from . import model_avar_surface
class SplitterNode(Node):
"""
A splitter is a node network that take the parameterV that is normally sent through the follicles and
split it between two destination: the follicles and the jaw ref constraint.
The more the jaw is opened, the more we'll transfer to the jaw ref before sending to the follicle.
This is mainly used to ensure that any lip movement created by the jaw is canceled when the
animator try to correct the lips and the jaw is open. Otherwise since the jaw space and the surface space
To compute the displacement caused by the was, we'll usethe circumference around the jaw pivot.
This create an 'approximation' that might be wrong if some translation also occur in the jaw.
todo: test with corrective jaw translation
"""
def __init__(self):
super(SplitterNode, self).__init__() # useless
self.attr_inn_jaw_pt = None
self.attr_inn_jaw_radius = None
self.attr_inn_surface_v = None
self.attr_inn_surface_range_v = None
self.attr_inn_jaw_default_ratio = None
self.attr_out_surface_v = None
self.attr_out_jaw_ratio = None
def build(self, nomenclature_rig, **kwargs):
super(SplitterNode, self).build(**kwargs)
#
# Create inn and out attributes.
#
grp_splitter_inn = pymel.createNode(
'network',
name=nomenclature_rig.resolve('udSplitterInn')
)
# The jaw opening amount in degree.
self.attr_inn_jaw_pt = libAttr.addAttr(grp_splitter_inn, 'innJawOpen')
# The relative uv coordinates normally sent to the follicles.
# Note that this value is expected to change at the output of the SplitterNode (see outSurfaceU and outSurfaceV)
self.attr_inn_surface_u = libAttr.addAttr(grp_splitter_inn, 'innSurfaceU')
self.attr_inn_surface_v = libAttr.addAttr(grp_splitter_inn, 'innSurfaceV')
# Use this switch to disable completely the splitter.
self.attr_inn_bypass = libAttr.addAttr(grp_splitter_inn, 'innBypassAmount')
# The arc length in world space of the surface controlling the follicles.
self.attr_inn_surface_range_v = libAttr.addAttr(grp_splitter_inn,
'innSurfaceRangeV') # How many degree does take the jaw to create 1 unit of surface deformation? (ex: 20)
# How much inn percent is the lips following the jaw by default.
# Note that this value is expected to change at the output of the SplitterNode (see attr_out_jaw_ratio)
self.attr_inn_jaw_default_ratio = libAttr.addAttr(grp_splitter_inn, 'jawDefaultRatio')
# The radius of the influence circle normally resolved by using the distance between the jaw and the avar as radius.
self.attr_inn_jaw_radius = libAttr.addAttr(grp_splitter_inn, 'jawRadius')
grp_splitter_out = pymel.createNode(
'network',
name=nomenclature_rig.resolve('udSplitterOut')
)
self.attr_out_surface_u = libAttr.addAttr(grp_splitter_out, 'outSurfaceU')
self.attr_out_surface_v = libAttr.addAttr(grp_splitter_out, 'outSurfaceV')
self.attr_out_jaw_ratio = libAttr.addAttr(grp_splitter_out,
'outJawRatio') # How much percent this influence follow the jaw after cancellation.
#
# Connect inn and out network nodes so they can easily be found from the SplitterNode.
#
attr_inn = libAttr.addAttr(grp_splitter_inn, longName='inn', attributeType='message')
attr_out = libAttr.addAttr(grp_splitter_out, longName='out', attributeType='message')
pymel.connectAttr(self.node.message, attr_inn)
pymel.connectAttr(self.node.message, attr_out)
#
# Create node networks
# Step 1: Get the jaw displacement in uv space (parameterV only).
#
attr_jaw_circumference = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawCircumference'),
input1X=self.attr_inn_jaw_radius,
input2X=(math.pi * 2.0)
).outputX
attr_jaw_open_circle_ratio = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawOpenCircleRatio'),
operation=2, # divide
input1X=self.attr_inn_jaw_pt,
input2X=360.0
).outputX
attr_jaw_active_circumference = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawActiveCircumference'),
input1X=attr_jaw_circumference,
input2X=attr_jaw_open_circle_ratio
).outputX
attr_jaw_v_range = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getActiveJawRangeInSurfaceSpace'),
operation=2, # divide
input1X=attr_jaw_active_circumference,
input2X=self.attr_inn_surface_range_v
).outputX
#
# Step 2: Resolve the output jaw_ratio
#
# Note that this can throw a zero division warning in Maya.
# To prevent that we'll use some black-magic-ugly-ass-trick.
attr_jaw_ratio_cancelation = libRigging.create_safe_division(
self.attr_inn_surface_v,
attr_jaw_v_range,
nomenclature_rig,
'getJawRatioCancellation'
)
attr_jaw_ratio_out_raw = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawRatioOutUnlimited'),
operation=2, # substraction,
input1D=(
self.attr_inn_jaw_default_ratio,
attr_jaw_ratio_cancelation
)
).output1D
attr_jaw_ratio_out_limited = libRigging.create_utility_node(
'clamp',
name=nomenclature_rig.resolve('getJawRatioOutLimited'),
inputR=attr_jaw_ratio_out_raw,
minR=0.0,
maxR=1.0
).outputR
#
# Step 3: Resolve attr_out_surface_u & attr_out_surface_v
#
attr_inn_jaw_default_ratio_inv = libRigging.create_utility_node(
'reverse',
name=nomenclature_rig.resolve('getJawDefaultRatioInv'),
inputX=self.attr_inn_jaw_default_ratio
).outputX
util_jaw_uv_default_ratio = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getJawDefaultRatioUvSpace'),
input1X=self.attr_inn_jaw_default_ratio,
input1Y=attr_inn_jaw_default_ratio_inv,
input2X=attr_jaw_v_range,
input2Y=attr_jaw_v_range
)
attr_jaw_uv_default_ratio = util_jaw_uv_default_ratio.outputX
attr_jaw_uv_default_ratio_inv = util_jaw_uv_default_ratio.outputY
attr_jaw_uv_limit_max = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawSurfaceLimitMax'),
operation=2, # substract
input1D=(attr_jaw_v_range, attr_jaw_uv_default_ratio_inv)
).output1D
attr_jaw_uv_limit_min = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getJawSurfaceLimitMin'),
operation=2, # substract
input1D=(attr_jaw_uv_default_ratio, attr_jaw_v_range)
).output1D
attr_jaw_cancel_range = libRigging.create_utility_node(
'clamp',
name=nomenclature_rig.resolve('getJawCancelRange'),
inputR=self.attr_inn_surface_v,
minR=attr_jaw_uv_limit_min,
maxR=attr_jaw_uv_limit_max
).outputR
attr_out_surface_v_cancelled = libRigging.create_utility_node(
'plusMinusAverage',
name=nomenclature_rig.resolve('getCanceledUv'),
operation=2, # substraction
input1D=(self.attr_inn_surface_v, attr_jaw_cancel_range)
).output1D
#
# Connect output attributes
#
attr_inn_bypass_inv = libRigging.create_utility_node(
'reverse',
name=nomenclature_rig.resolve('getBypassInv'),
inputX=self.attr_inn_bypass
).outputX
# Connect output jaw_ratio
attr_output_jaw_ratio = libRigging.create_utility_node(
'blendWeighted',
input=(attr_jaw_ratio_out_limited, self.attr_inn_jaw_default_ratio),
weight=(attr_inn_bypass_inv, self.attr_inn_bypass)
).output
pymel.connectAttr(attr_output_jaw_ratio, self.attr_out_jaw_ratio)
# Connect output surface u
pymel.connectAttr(self.attr_inn_surface_u, self.attr_out_surface_u)
# Connect output surface_v
attr_output_surface_v = libRigging.create_utility_node(
'blendWeighted',
input=(attr_out_surface_v_cancelled, self.attr_inn_surface_v),
weight=(attr_inn_bypass_inv, self.attr_inn_bypass)
).output
pymel.connectAttr(attr_output_surface_v, self.attr_out_surface_v)
class AvarSurfaceLipModel(model_avar_surface.AvarSurfaceModel):
"""
Custom avar model for the complex situation that is the lips.
This ensure that we are moving according to the jaw before sliding on the surface.
"""
def __init__(self, *args, **kwargs):
super(AvarSurfaceLipModel, self).__init__(*args, **kwargs)
self._attr_inn_jaw_bindpose = None
self._attr_inn_jaw_pitch = None
self._attr_inn_jaw_ratio_default = None
self._attr_inn_bypass_splitter = None
self._attr_out_jaw_ratio = None
def _create_interface(self):
super(AvarSurfaceLipModel, self)._create_interface()
self._attr_inn_jaw_bindpose = libAttr.addAttr(self.grp_rig, 'innJawBindPose', dataType='matrix')
self._attr_inn_jaw_pitch = libAttr.addAttr(self.grp_rig, 'innJawPitch', defaultValue=0)
self._attr_inn_jaw_ratio_default = libAttr.addAttr(self.grp_rig, 'innJawRatioDefault', defaultValue=0)
self._attr_inn_bypass_splitter = libAttr.addAttr(self.grp_rig, 'innBypassSplitter')
self._attr_inn_ud_bypass = libAttr.addAttr(self.grp_rig, 'innBypassUD')
# self._attr_inn_surface_length_u = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthU', defaultValue=0)
# self._attr_inn_surface_length_v = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthV', defaultValue=0)
self._attr_out_jaw_ratio = libAttr.addAttr(self.grp_rig, 'outJawRatio')
def connect_avar(self, avar):
super(AvarSurfaceLipModel, self).connect_avar(avar)
# Note: We expect a FaceLipAvar
pymel.connectAttr(avar._attr_jaw_bind_tm, self._attr_inn_jaw_bindpose)
pymel.connectAttr(avar._attr_jaw_pitch, self._attr_inn_jaw_pitch)
pymel.connectAttr(avar._attr_inn_jaw_ratio_default, self._attr_inn_jaw_ratio_default)
pymel.connectAttr(avar._attr_bypass_splitter, self._attr_inn_bypass_splitter)
pymel.connectAttr(avar.attr_ud_bypass, self._attr_inn_ud_bypass)
def _get_follicle_relative_uv_attr(self, **kwargs):
nomenclature_rig = self.get_nomenclature_rig()
attr_u, attr_v = super(AvarSurfaceLipModel, self)._get_follicle_relative_uv_attr(**kwargs)
util_decompose_jaw_bind_tm = libRigging.create_utility_node(
'decomposeMatrix',
inputMatrix=self._attr_inn_jaw_bindpose,
)
#
# Create and connect Splitter Node
#
splitter = SplitterNode()
splitter.build(
nomenclature_rig,
name=nomenclature_rig.resolve('splitter')
)
splitter.setParent(self.grp_rig)
# Resolve the radius of the jaw influence. Used by the splitter.
attr_jaw_radius = libRigging.create_utility_node(
'distanceBetween',
name=nomenclature_rig.resolve('getJawRadius'),
point1=self.grp_offset.translate,
point2=util_decompose_jaw_bind_tm.outputTranslate
).distance
# Resolve the jaw pitch. Used by the splitter.
attr_jaw_pitch = self._attr_inn_jaw_pitch
# Connect the splitter inputs
pymel.connectAttr(attr_u, splitter.attr_inn_surface_u)
pymel.connectAttr(attr_v, splitter.attr_inn_surface_v)
pymel.connectAttr(self._attr_inn_jaw_ratio_default, splitter.attr_inn_jaw_default_ratio)
pymel.connectAttr(self._attr_length_v, splitter.attr_inn_surface_range_v)
pymel.connectAttr(attr_jaw_radius, splitter.attr_inn_jaw_radius)
pymel.connectAttr(attr_jaw_pitch, splitter.attr_inn_jaw_pt)
pymel.connectAttr(self._attr_inn_bypass_splitter, splitter.attr_inn_bypass)
attr_u = splitter.attr_out_surface_u
attr_v = splitter.attr_out_surface_v
# Create constraint to controller the jaw reference
pymel.connectAttr(splitter.attr_out_jaw_ratio, self._attr_out_jaw_ratio)
#
# Implement the 'bypass' avars.
# Thoses avars bypass the splitter, used in corner cases only.
#
attr_attr_ud_bypass_adjusted = libRigging.create_utility_node(
'multiplyDivide',
name=nomenclature_rig.resolve('getAdjustedUdBypass'),
input1X=self._attr_inn_ud_bypass,
input2X=self.multiplier_ud
).outputX
attr_v = libRigging.create_utility_node(
'addDoubleLinear',
name=nomenclature_rig.resolve('addBypassAvar'),
input1=attr_v,
input2=attr_attr_ud_bypass_adjusted
).output
return attr_u, attr_v
| en | 0.848686 | A splitter is a node network that take the parameterV that is normally sent through the follicles and split it between two destination: the follicles and the jaw ref constraint. The more the jaw is opened, the more we'll transfer to the jaw ref before sending to the follicle. This is mainly used to ensure that any lip movement created by the jaw is canceled when the animator try to correct the lips and the jaw is open. Otherwise since the jaw space and the surface space To compute the displacement caused by the was, we'll usethe circumference around the jaw pivot. This create an 'approximation' that might be wrong if some translation also occur in the jaw. todo: test with corrective jaw translation # useless # # Create inn and out attributes. # # The jaw opening amount in degree. # The relative uv coordinates normally sent to the follicles. # Note that this value is expected to change at the output of the SplitterNode (see outSurfaceU and outSurfaceV) # Use this switch to disable completely the splitter. # The arc length in world space of the surface controlling the follicles. # How many degree does take the jaw to create 1 unit of surface deformation? (ex: 20) # How much inn percent is the lips following the jaw by default. # Note that this value is expected to change at the output of the SplitterNode (see attr_out_jaw_ratio) # The radius of the influence circle normally resolved by using the distance between the jaw and the avar as radius. # How much percent this influence follow the jaw after cancellation. # # Connect inn and out network nodes so they can easily be found from the SplitterNode. # # # Create node networks # Step 1: Get the jaw displacement in uv space (parameterV only). # # divide # divide # # Step 2: Resolve the output jaw_ratio # # Note that this can throw a zero division warning in Maya. # To prevent that we'll use some black-magic-ugly-ass-trick. # substraction, # # Step 3: Resolve attr_out_surface_u & attr_out_surface_v # # substract # substract # substraction # # Connect output attributes # # Connect output jaw_ratio # Connect output surface u # Connect output surface_v Custom avar model for the complex situation that is the lips. This ensure that we are moving according to the jaw before sliding on the surface. # self._attr_inn_surface_length_u = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthU', defaultValue=0) # self._attr_inn_surface_length_v = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthV', defaultValue=0) # Note: We expect a FaceLipAvar # # Create and connect Splitter Node # # Resolve the radius of the jaw influence. Used by the splitter. # Resolve the jaw pitch. Used by the splitter. # Connect the splitter inputs # Create constraint to controller the jaw reference # # Implement the 'bypass' avars. # Thoses avars bypass the splitter, used in corner cases only. # | 2.627216 | 3 |
project/server/main/feed.py | dataesr/harvest-theses | 0 | 8797 | <filename>project/server/main/feed.py
import datetime
import os
import pymongo
import requests
from urllib import parse
from urllib.parse import quote_plus
import json
from retry import retry
from bs4 import BeautifulSoup
import math
from project.server.main.logger import get_logger
from project.server.main.utils_swift import upload_object
from project.server.main.parse import parse_theses, get_idref_from_OS
from project.server.main.referentiel import harvest_and_save_idref
logger = get_logger(__name__)
def get_num_these(soup):
num_theses = []
for d in soup.find_all('doc'):
num_theses.append(d.find('str', {'name': 'num'}).text)
return num_theses
@retry(delay=60, tries=5)
def get_num_these_between_dates(start_date, end_date):
start_date_str = start_date.strftime("%d/%m/%Y")
end_date_str = end_date.strftime("%d/%m/%Y")
start_date_str_iso = start_date.strftime("%Y%m%d")
end_date_str_iso = end_date.strftime("%Y%m%d")
start = 0
url = "http://theses.fr/?q=&zone1=titreRAs&val1=&op1=AND&zone2=auteurs&val2=&op2=AND&zone3=etabSoutenances&val3=&op3=AND&zone4=dateSoutenance&val4a={}&val4b={}&start={}&format=xml"
logger.debug(url.format(start_date_str, end_date_str, start))
r = requests.get(url.format(start_date_str, end_date_str, start))
soup = BeautifulSoup(r.text, 'lxml')
nb_res = soup.find('result', {'name': 'response'}).attrs['numfound']
logger.debug("{} resultats entre {} et {}".format(nb_res, start_date_str_iso, end_date_str_iso ))
num_theses = get_num_these(soup)
nb_pages_remaining = math.ceil(int(nb_res)/1000)
for p in range(1, nb_pages_remaining):
logger.debug("page {} for entre {} et {}".format(p, start_date_str_iso, end_date_str_iso))
r = requests.get(url.format(start_date_str, end_date_str, p * 1000))
soup = BeautifulSoup(r.text, 'lxml')
num_theses += get_num_these(soup)
return num_theses
def save_data(data, collection_name, year_start, year_end, chunk_index, referentiel):
logger.debug(f'save_data theses {collection_name} {chunk_index}')
year_start_end = 'all_years'
if year_start and year_end:
year_start_end = f'{year_start}_{year_end}'
# 1. save raw data to OS
current_file = f'theses_{year_start_end}_{chunk_index}.json'
json.dump(data, open(current_file, 'w'))
os.system(f'gzip {current_file}')
upload_object('theses', f'{current_file}.gz', f'{collection_name}/raw/{current_file}.gz')
os.system(f'rm -rf {current_file}.gz')
# 2.transform data and save in mongo
current_file_parsed = f'theses_parsed_{year_start_end}_{chunk_index}.json'
data_parsed = [parse_theses(e, referentiel, collection_name) for e in data]
json.dump(data_parsed, open(current_file_parsed, 'w'))
# insert_data(collection_name, current_file_parsed)
os.system(f'gzip {current_file_parsed}')
upload_object('theses', f'{current_file_parsed}.gz', f'{collection_name}/parsed/{current_file_parsed}.gz')
os.system(f'rm -rf {current_file_parsed}.gz')
def harvest_and_insert(collection_name):
# 1. save aurehal structures
harvest_and_save_idref(collection_name)
referentiel = get_idref_from_OS(collection_name)
# 2. drop mongo
#logger.debug(f'dropping {collection_name} collection before insertion')
#myclient = pymongo.MongoClient('mongodb://mongo:27017/')
#myclient['theses'][collection_name].drop()
# 3. save publications
year_start = None
year_end = None
if year_start is None:
year_start = 1990
if year_end is None:
year_end = datetime.date.today().year
harvest_and_insert_one_year(collection_name, year_start, year_end, referentiel)
@retry(delay=60, tries=5)
def download_these_notice(these_id):
res = {'id': these_id}
r_tefudoc = requests.get("http://www.theses.fr/{}.tefudoc".format(these_id))
r_xml = requests.get("http://www.theses.fr/{}.xml".format(these_id))
if r_tefudoc.text[0:5] == "<?xml":
res['tefudoc'] = r_tefudoc.text
if r_xml.text[0:5] == "<?xml":
res['xml'] = r_xml.text
return res
def harvest_and_insert_one_year(collection_name, year_start, year_end, referentiel):
year_start_end = 'all_years'
if year_start and year_end:
year_start_end = f'{year_start}_{year_end}'
start_date = datetime.datetime(year_start,1,1)
end_date = datetime.datetime(year_end + 1,1,1) + datetime.timedelta(days = -1)
all_num_theses = get_num_these_between_dates(start_date, end_date)
# todo save by chunk
chunk_index = 0
data = []
MAX_DATA_SIZE = 25000
nb_theses = len(all_num_theses)
logger.debug(f'{nb_theses} theses to download and parse')
for ix, nnt in enumerate(all_num_theses):
if ix % 100 == 0:
logger.debug(f'theses {year_start_end} {ix}')
res = download_these_notice(nnt)
data.append(res)
if (len(data) > MAX_DATA_SIZE) or (ix == nb_theses - 1):
if data:
save_data(data, collection_name, year_start, year_end, chunk_index, referentiel)
data = []
chunk_index += 1
def insert_data(collection_name, output_file):
myclient = pymongo.MongoClient('mongodb://mongo:27017/')
mydb = myclient['theses']
## mongo start
start = datetime.datetime.now()
mongoimport = f"mongoimport --numInsertionWorkers 2 --uri mongodb://mongo:27017/theses --file {output_file}" \
f" --collection {collection_name} --jsonArray"
logger.debug(f'Mongoimport {output_file} start at {start}')
logger.debug(f'{mongoimport}')
os.system(mongoimport)
logger.debug(f'Checking indexes on collection {collection_name}')
mycol = mydb[collection_name]
#mycol.create_index('docid')
end = datetime.datetime.now()
delta = end - start
logger.debug(f'Mongoimport done in {delta}')
## mongo done
| <filename>project/server/main/feed.py
import datetime
import os
import pymongo
import requests
from urllib import parse
from urllib.parse import quote_plus
import json
from retry import retry
from bs4 import BeautifulSoup
import math
from project.server.main.logger import get_logger
from project.server.main.utils_swift import upload_object
from project.server.main.parse import parse_theses, get_idref_from_OS
from project.server.main.referentiel import harvest_and_save_idref
logger = get_logger(__name__)
def get_num_these(soup):
num_theses = []
for d in soup.find_all('doc'):
num_theses.append(d.find('str', {'name': 'num'}).text)
return num_theses
@retry(delay=60, tries=5)
def get_num_these_between_dates(start_date, end_date):
start_date_str = start_date.strftime("%d/%m/%Y")
end_date_str = end_date.strftime("%d/%m/%Y")
start_date_str_iso = start_date.strftime("%Y%m%d")
end_date_str_iso = end_date.strftime("%Y%m%d")
start = 0
url = "http://theses.fr/?q=&zone1=titreRAs&val1=&op1=AND&zone2=auteurs&val2=&op2=AND&zone3=etabSoutenances&val3=&op3=AND&zone4=dateSoutenance&val4a={}&val4b={}&start={}&format=xml"
logger.debug(url.format(start_date_str, end_date_str, start))
r = requests.get(url.format(start_date_str, end_date_str, start))
soup = BeautifulSoup(r.text, 'lxml')
nb_res = soup.find('result', {'name': 'response'}).attrs['numfound']
logger.debug("{} resultats entre {} et {}".format(nb_res, start_date_str_iso, end_date_str_iso ))
num_theses = get_num_these(soup)
nb_pages_remaining = math.ceil(int(nb_res)/1000)
for p in range(1, nb_pages_remaining):
logger.debug("page {} for entre {} et {}".format(p, start_date_str_iso, end_date_str_iso))
r = requests.get(url.format(start_date_str, end_date_str, p * 1000))
soup = BeautifulSoup(r.text, 'lxml')
num_theses += get_num_these(soup)
return num_theses
def save_data(data, collection_name, year_start, year_end, chunk_index, referentiel):
logger.debug(f'save_data theses {collection_name} {chunk_index}')
year_start_end = 'all_years'
if year_start and year_end:
year_start_end = f'{year_start}_{year_end}'
# 1. save raw data to OS
current_file = f'theses_{year_start_end}_{chunk_index}.json'
json.dump(data, open(current_file, 'w'))
os.system(f'gzip {current_file}')
upload_object('theses', f'{current_file}.gz', f'{collection_name}/raw/{current_file}.gz')
os.system(f'rm -rf {current_file}.gz')
# 2.transform data and save in mongo
current_file_parsed = f'theses_parsed_{year_start_end}_{chunk_index}.json'
data_parsed = [parse_theses(e, referentiel, collection_name) for e in data]
json.dump(data_parsed, open(current_file_parsed, 'w'))
# insert_data(collection_name, current_file_parsed)
os.system(f'gzip {current_file_parsed}')
upload_object('theses', f'{current_file_parsed}.gz', f'{collection_name}/parsed/{current_file_parsed}.gz')
os.system(f'rm -rf {current_file_parsed}.gz')
def harvest_and_insert(collection_name):
# 1. save aurehal structures
harvest_and_save_idref(collection_name)
referentiel = get_idref_from_OS(collection_name)
# 2. drop mongo
#logger.debug(f'dropping {collection_name} collection before insertion')
#myclient = pymongo.MongoClient('mongodb://mongo:27017/')
#myclient['theses'][collection_name].drop()
# 3. save publications
year_start = None
year_end = None
if year_start is None:
year_start = 1990
if year_end is None:
year_end = datetime.date.today().year
harvest_and_insert_one_year(collection_name, year_start, year_end, referentiel)
@retry(delay=60, tries=5)
def download_these_notice(these_id):
res = {'id': these_id}
r_tefudoc = requests.get("http://www.theses.fr/{}.tefudoc".format(these_id))
r_xml = requests.get("http://www.theses.fr/{}.xml".format(these_id))
if r_tefudoc.text[0:5] == "<?xml":
res['tefudoc'] = r_tefudoc.text
if r_xml.text[0:5] == "<?xml":
res['xml'] = r_xml.text
return res
def harvest_and_insert_one_year(collection_name, year_start, year_end, referentiel):
year_start_end = 'all_years'
if year_start and year_end:
year_start_end = f'{year_start}_{year_end}'
start_date = datetime.datetime(year_start,1,1)
end_date = datetime.datetime(year_end + 1,1,1) + datetime.timedelta(days = -1)
all_num_theses = get_num_these_between_dates(start_date, end_date)
# todo save by chunk
chunk_index = 0
data = []
MAX_DATA_SIZE = 25000
nb_theses = len(all_num_theses)
logger.debug(f'{nb_theses} theses to download and parse')
for ix, nnt in enumerate(all_num_theses):
if ix % 100 == 0:
logger.debug(f'theses {year_start_end} {ix}')
res = download_these_notice(nnt)
data.append(res)
if (len(data) > MAX_DATA_SIZE) or (ix == nb_theses - 1):
if data:
save_data(data, collection_name, year_start, year_end, chunk_index, referentiel)
data = []
chunk_index += 1
def insert_data(collection_name, output_file):
myclient = pymongo.MongoClient('mongodb://mongo:27017/')
mydb = myclient['theses']
## mongo start
start = datetime.datetime.now()
mongoimport = f"mongoimport --numInsertionWorkers 2 --uri mongodb://mongo:27017/theses --file {output_file}" \
f" --collection {collection_name} --jsonArray"
logger.debug(f'Mongoimport {output_file} start at {start}')
logger.debug(f'{mongoimport}')
os.system(mongoimport)
logger.debug(f'Checking indexes on collection {collection_name}')
mycol = mydb[collection_name]
#mycol.create_index('docid')
end = datetime.datetime.now()
delta = end - start
logger.debug(f'Mongoimport done in {delta}')
## mongo done
| en | 0.377705 | # 1. save raw data to OS # 2.transform data and save in mongo # insert_data(collection_name, current_file_parsed) # 1. save aurehal structures # 2. drop mongo #logger.debug(f'dropping {collection_name} collection before insertion') #myclient = pymongo.MongoClient('mongodb://mongo:27017/') #myclient['theses'][collection_name].drop() # 3. save publications # todo save by chunk ## mongo start #mycol.create_index('docid') ## mongo done | 2.335372 | 2 |
DQM/L1TMonitor/python/L1TGCT_cfi.py | ckamtsikis/cmssw | 852 | 8798 | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
l1tGct = DQMEDAnalyzer('L1TGCT',
gctCentralJetsSource = cms.InputTag("gctDigis","cenJets"),
gctForwardJetsSource = cms.InputTag("gctDigis","forJets"),
gctTauJetsSource = cms.InputTag("gctDigis","tauJets"),
gctIsoTauJetsSource = cms.InputTag("gctDigis","fake"),
gctEnergySumsSource = cms.InputTag("gctDigis"),
gctIsoEmSource = cms.InputTag("gctDigis","isoEm"),
gctNonIsoEmSource = cms.InputTag("gctDigis","nonIsoEm"),
monitorDir = cms.untracked.string("L1T/L1TGCT"),
verbose = cms.untracked.bool(False),
stage1_layer2_ = cms.bool(False),
DQMStore = cms.untracked.bool(True),
disableROOToutput = cms.untracked.bool(True),
filterTriggerType = cms.int32(1)
)
| import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
l1tGct = DQMEDAnalyzer('L1TGCT',
gctCentralJetsSource = cms.InputTag("gctDigis","cenJets"),
gctForwardJetsSource = cms.InputTag("gctDigis","forJets"),
gctTauJetsSource = cms.InputTag("gctDigis","tauJets"),
gctIsoTauJetsSource = cms.InputTag("gctDigis","fake"),
gctEnergySumsSource = cms.InputTag("gctDigis"),
gctIsoEmSource = cms.InputTag("gctDigis","isoEm"),
gctNonIsoEmSource = cms.InputTag("gctDigis","nonIsoEm"),
monitorDir = cms.untracked.string("L1T/L1TGCT"),
verbose = cms.untracked.bool(False),
stage1_layer2_ = cms.bool(False),
DQMStore = cms.untracked.bool(True),
disableROOToutput = cms.untracked.bool(True),
filterTriggerType = cms.int32(1)
)
| none | 1 | 1.445069 | 1 |
|
utilities.py | gandhiy/lipMIP | 11 | 8799 | <reponame>gandhiy/lipMIP
""" General all-purpose utilities """
import sys
import torch
import torch.nn.functional as F
import numpy as np
import gurobipy as gb
import matplotlib.pyplot as plt
import io
import contextlib
import tempfile
import time
import re
import pickle
import inspect
import glob
import os
COMPLETED_JOB_DIR = os.path.join(os.path.dirname(__file__), 'jobs', 'completed')
# ===============================================================================
# = Helpful all-purpose functions =
# ===============================================================================
class ParameterObject:
def __init__(self, **kwargs):
self.attr_list = []
assert 'attr_list' not in kwargs
for k,v in kwargs.items():
setattr(self, k, v)
self.attr_list.append(k)
def change_attrs(self, **kwargs):
new_kwargs = {}
for attr in self.attr_list:
if attr in kwargs:
new_kwargs[attr] = kwargs[attr]
else:
new_kwargs[attr] = getattr(self, attr)
return self.__class__(**new_kwargs)
class Factory(ParameterObject):
def __init__(self, constructor, **kwargs):
self.constructor = constructor
super(Factory, self).__init__(**kwargs)
def __call__(self, **kwargs):
cons_args = inspect.getfullargspec(self.constructor).args
# Make default args from attributes
args = {k: getattr(self, k) for k in self.attr_list if k in cons_args}
# Update the default args
for k,v in kwargs.items():
if k in cons_args:
args[k] = v
# Build object
return self.constructor(**args)
def __repr__(self):
return '<Factory: %s>' % self.constructor.__self__.__name__
class DoEvery:
@classmethod
def dummy(cls, *args, **kwargs):
pass
def __init__(self, func, freq):
""" Simple class that holds onto a function and it returns
this function every freq iterations
ARGS:
func: function object to be returned every freq iterations
freq: int - how often to return the function
"""
self.func = func
self.freq = freq
self.i = 0
def __call__(self, *args, **kwargs):
if self.i % self.freq == 0:
returner = self.func
else:
returner = self.dummy
self.i += 1
return returner(*args, **kwargs)
class Timer:
def __init__(self, start_on_init=True):
if start_on_init:
self.start()
def start(self):
self.start_time = time.time()
def stop(self):
self.stop_time = time.time()
return self.stop_time - self.start_time
def reset(self):
self.start_time = self.stop_time = None
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
return [_.cpu() for _ in tensor_iter]
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
def safe_cuda(el):
try:
return el.cuda()
except AssertionError:
return el
return [safe_cuda(_) for _ in tensor_iter]
def prod(num_iter):
""" returns product of all elements in this iterator *'ed together"""
cumprod = 1
for el in num_iter:
cumprod *= el
return cumprod
def partition(n, m):
""" Given ints n > m, partitions n into an iterable where all
elements are m, except for the last one which is (n % m)
"""
count = 0
while count < n:
yield min([m, n - count])
count += m
def flatten_list(lol):
""" Given list of lists, flattens it into a single list. """
output = []
for el in lol:
if not isinstance(el, list):
output.append(el)
continue
output.extend(flatten_list(el))
return output
def partition_by_suffix(iterable, func):
""" Given an iterable and a boolean-valued function which takes in
elements of that iterable, outputs a list of lists, where each list
ends in an element for which the func returns true, (except for the
last one)
e.g.
iterable := [1, 2, 3, 4, 5,5, 5]
func := lambda x: (x % 2) == 0
returns [[1,2], [3,4], [5, 5, 5]]
"""
output = []
sublist = []
for el in iterable:
sublist.append(el)
if func(el):
output.append(sublist)
sublist = []
if len(sublist) > 0:
output.append(sublist)
return output
def arraylike(obj):
return isinstance(obj, (torch.Tensor, np.ndarray))
def as_numpy(tensor_or_array):
""" If given a tensor or numpy array returns that object cast numpy array
"""
if isinstance(tensor_or_array, torch.Tensor):
tensor_or_array = tensor_or_array.cpu().detach().numpy()
return tensor_or_array
def two_col(l, r):
""" Takes two numpy arrays of size N and makes a numpy array of size Nx2
"""
return np.vstack([l, r]).T
def split_pos_neg(x):
if isinstance(x, torch.Tensor):
return split_tensor_pos_neg(x)
else:
return split_ndarray_pos_neg(x)
def split_tensor_pos_neg(x):
""" Splits a tensor into positive and negative components """
pos = F.relu(x)
neg = -F.relu(-x)
return pos, neg
def split_ndarray_pos_neg(x):
""" Splits a numpy ndarray into positive and negative components """
pos = x * (x >= 0)
neg = x * (x <= 0)
return pos, neg
def swap_axes(x, source, dest):
""" Swaps the dimensions of source <-> dest for torch/numpy
ARGS:
x : numpy array or tensor
source : int index
dest : int index
RETURNS
x' - object with same data as x, but with axes swapped
"""
if isinstance(x, torch.Tensor):
return x.transpose(source, dest)
else:
return np.moveaxis(x, source, dest)
def build_var_namer(k):
return lambda d: '%s[%s]' % (k, d)
@contextlib.contextmanager
def silent():
save_stdout = sys.stdout
temp = tempfile.TemporaryFile(mode='w')
sys.stdout = temp
yield
sys.stdout = save_stdout
temp.close()
def ia_mm(matrix, intervals, lohi_dim, matrix_or_vec='matrix'):
""" Interval analysis matrix(-vec) multiplication for torch/np intervals
ARGS:
matrix : tensor or numpy array of shape (m,n) -
intervals : tensor or numpy array with shape (n1, ..., 2, n_i, ...) -
"vector" of intervals to be multiplied by a matrix
one such n_i must be equal to n (from matrix shape)
lohi_dim : int - which dimension (index) of intervals corresponds
to the lo/hi split
matrix_or_vec : string - must be matrix or vec, corresponds to whether
intervals is to be treated as a matrix or a vector.
If a v
RETURNS:
object of same type as intervals, but with the shape slightly
different: len(output[-1/-2]) == m
"""
# asserts for shapes and things
assert isinstance(matrix, torch.Tensor) # TENSOR ONLY FOR NOW
assert isinstance(intervals, torch.Tensor)
m, n = matrix.shape
assert intervals.shape[lohi_dim] == 2
assert matrix_or_vec in ['matrix', 'vec']
if matrix_or_vec == 'vec':
intervals = intervals.unsqueeze(-1)
assert lohi_dim != intervals.dim() - 2
assert intervals[dim][-2] == n
# define operators based on tensor/numpy case
matmul = lambda m, x: m.matmul(x)
stack = lambda a, b: torch.stack([a, b])
# now do IA stuff
intervals = swap_axes(intervals, 0, lohi_dim)
matrix_pos, matrix_neg = split_pos_neg(matrix)
los, his = intervals
new_los = matmul(matrix_pos, los) + matmul(matrix_neg, his)
new_his = matmul(matrix_pos, his) + matmul(matrix_neg, los)
intervals = swap_axes(stack(new_los, new_his), 0, lohi_dim)
if matrix_or_vec == 'vec':
intervals = interval.squeeze(-1)
return intervals
# =============================================================================
# = Image display functions =
# =============================================================================
def display_images(image_rows, figsize=(8, 8)):
""" Given either a tensor/np.array (or list of same), will display each
element in the row or tensor
ARGS:
image_rows: tensor or np.array or tensor[], np.array[] -
image or list of images to display
RETURNS: None, but displays images
"""
if not isinstance(image_rows, list):
image_rows = [image_rows]
np_rows = [as_numpy(row) for row in image_rows]
# Transpose channel to last dimension and stack to make rows
np_rows = [np.concatenate(_.transpose([0, 2, 3, 1]), axis=1)
for _ in np_rows]
# Now stack rows
full_image = np.concatenate(np_rows, axis=0)
# And then show image
imshow_kwargs = {}
if full_image.shape[-1] == 1:
full_image = full_image.squeeze()
imshow_kwargs['cmap'] = 'gray'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.axis('off')
ax.imshow(full_image, **imshow_kwargs)
plt.show()
# ======================================================
# = Pytorch helpers =
# ======================================================
def seq_append(seq, module):
""" Takes a nn.sequential and a nn.module and creates a nn.sequential
with the module appended to it
ARGS:
seq: nn.Sequntial object
module: <inherits nn.Module>
RETURNS:
nn.Sequential object
"""
seq_modules = [seq[_] for _ in range(len(seq))] + [module]
return nn.Sequential(*seq_modules)
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
output = []
for el in tensor_iter:
if isinstance(el, tuple):
output.append(tuple(_.cpu() for _ in el))
else:
output.append(el.cpu())
return output
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
def safe_cuda(el):
try:
if isinstance(el, tuple):
return tuple(_.cuda() for _ in el)
else:
return el.cuda()
except AssertionError:
return el
return [safe_cuda(_) for _ in tensor_iter]
# =======================================
# = Polytope class =
# =======================================
class Polytope:
INPUT_KEY = 'input'
SLACK_KEY = 'slack'
def __init__(self, A, b):
""" Represents a polytope of the form {x | AX <= b}
(where everything is a numpy array)
"""
self.A = A
self.b = b
def _input_from_model(self, model):
var_namer = build_var_namer(self.INPUT_KEY)
return np.array([model.getVarByName(var_namer(i)).X
for i in range(self.A.shape[1])])
def _build_model(self, slack=False):
""" Builds a gurobi model of this object """
with silent():
model = gb.Model()
input_namer = build_var_namer(self.INPUT_KEY)
input_vars = [model.addVar(lb=-gb.GRB.INFINITY, ub=gb.GRB.INFINITY,
name=input_namer(i))
for i in range(self.A.shape[1])]
if slack == True:
slack_var = model.addVar(lb=0, ub=1.0, name=self.SLACK_KEY)
else:
slack_var = 0
for i, row in enumerate(self.A):
model.addConstr(gb.LinExpr(row, input_vars) + slack_var <= self.b[i])
model.update()
return model
def contains(self, x, tolerance=1e-6):
return all(self.A @ x <= self.b + tolerance)
def interior_point(self):
model = self._build_model(slack=True)
slack_var = model.getVarByName(self.SLACK_KEY)
model.setObjective(slack_var, gb.GRB.MAXIMIZE)
model.update()
model.optimize()
assert model.Status == 2
return self._input_from_model(model)
def intersects_hbox(self, hbox):
""" If this intersects a given hyperbox, returns a
point contained in both
"""
model = self._build_model(slack=True)
input_namer = build_var_namer(self.INPUT_KEY)
for i, (lb, ub) in enumerate(hbox):
var = model.getVarByName(input_namer(i))
model.addConstr(lb <= var <= ub)
slack_var = model.getVarByName(self.SLACK_KEY)
model.setObjective(slack_var, gb.GRB.MAXIMIZE)
model.update()
model.optimize()
assert model.Status == 2
return self._input_from_model(model)
# =========================================================
# = experiment.Result object helpers =
# =========================================================
def filename_to_epoch(filename):
return int(re.search(r'_EPOCH\d{4}_', filename).group()[-5:-1])
def read_result_files(result_files):
output = []
for result_file in result_files:
try:
with open(result_file, 'rb') as f:
output.append((result_file, pickle.load(f)))
except Exception as err:
print("Failed on file: ", result_file, err)
return output
def job_out_series(job_outs, eval_style, method,
value_or_time='value', avg_stdev='avg'):
""" Takes in some result or resultList objects and
a 'method', and desired object, and returns these objects
in a list
ARGS:
results: Result[] or ResultList[], results to consider
eval_style: str - which method of Experiment we look at
method: str - which Lipschitz-estimation technique to consider
value_or_time: 'value' or 'time' - which number to return
avg_stdev: 'avg' or 'stdev' - for ResultList[], we can
get average or stdev values
RETURNS:
list of floats
"""
# check everything is the same type
assert value_or_time in ['value', 'time']
assert avg_stdev in ['avg', 'stdev']
assert eval_style in ['do_random_evals', 'do_unit_hypercube_eval',
'do_data_evals', 'do_large_radius_evals']
results = [job_out[eval_style] for job_out in job_outs]
output = []
for result in results:
try: #Result object case
if value_or_time == 'value':
output.append(result.values(method))
else:
output.append(result.compute_times(method))
except:
triple = result.average_stdevs(value_or_time)[method]
if avg_stdev == 'avg':
output.append(triple[0])
else:
output.append(triple[1])
return output
def collect_result_outs(filematch):
""" Uses glob to collect and load result objects matching a series
ARGS:
filematch: string with *'s associated with it
e.g. 'NAME*SUBNAME*GLOBAL.result'
RESULTS:
list of (filename, experiment.Result) objects
"""
search_str = os.path.join(COMPLETED_JOB_DIR, filematch)
sorted_filenames = sorted(glob.glob(search_str))
return read_result_files(sorted_filenames)
def collect_epochs(filename_list):
""" Given a list of (filename) objects, converts
the filenames into integers, pulling the EPOCH attribute from
the filename
str[] -> int[]
"""
def epoch_gleamer(filename):
basename = os.path.basename(filename)
return int(re.search('_EPOCH\d+_', filename).group()[6:-1])
return [epoch_gleamer(_) for _ in filename_list]
def data_from_results(result_iter, method, lip_estimator, time_or_value='value',
avg_or_stdev='avg'):
""" Given a list of experiment.Result or experiment.ResultList objects
will return the time/value for the lip_estimator of the method
for result (or avg/stdev if resultList objects)
e.g., data_from_results('do_unit_hypercube_eval', 'LipMIP',
'value') gets a list of values of the
LipMIP over the unitHypercube domain
ARGS:
method: str - name of one of the experimental methods
lip_estimator : str - name of the class of lipschitz estimator to use
time_or_value : 'time' or 'value' - returning the time or value here
avg_or_stdev : 'avg' or 'stdev' - returning either avg or stdev of
results from ResultListObjects
"""
assert method in ['do_random_evals', 'do_data_evals',
'do_unit_hypercube_eval']
assert lip_estimator in ['LipMIP', 'FastLip', 'LipLP', 'CLEVER',
'LipSDP', 'NaiveUB', 'RandomLB', 'SeqLip']
assert time_or_value in ['time', 'value']
assert avg_or_stdev in ['avg', 'stdev']
def datum_getter(result_obj):
if not hasattr(result_obj, 'average_stdevs'):
if time_or_value == 'value':
return result_obj[method].values(lip_estimator)
else:
return result_obj[method].compute_times(lip_estimator)
else:
triple = result_obj.average_stdevs(time_or_value)
if avg_or_stdev == 'avg':
return triple[0]
else:
return triple[1]
return [datum_getter(_) for _ in result_iter]
| """ General all-purpose utilities """
import sys
import torch
import torch.nn.functional as F
import numpy as np
import gurobipy as gb
import matplotlib.pyplot as plt
import io
import contextlib
import tempfile
import time
import re
import pickle
import inspect
import glob
import os
COMPLETED_JOB_DIR = os.path.join(os.path.dirname(__file__), 'jobs', 'completed')
# ===============================================================================
# = Helpful all-purpose functions =
# ===============================================================================
class ParameterObject:
def __init__(self, **kwargs):
self.attr_list = []
assert 'attr_list' not in kwargs
for k,v in kwargs.items():
setattr(self, k, v)
self.attr_list.append(k)
def change_attrs(self, **kwargs):
new_kwargs = {}
for attr in self.attr_list:
if attr in kwargs:
new_kwargs[attr] = kwargs[attr]
else:
new_kwargs[attr] = getattr(self, attr)
return self.__class__(**new_kwargs)
class Factory(ParameterObject):
def __init__(self, constructor, **kwargs):
self.constructor = constructor
super(Factory, self).__init__(**kwargs)
def __call__(self, **kwargs):
cons_args = inspect.getfullargspec(self.constructor).args
# Make default args from attributes
args = {k: getattr(self, k) for k in self.attr_list if k in cons_args}
# Update the default args
for k,v in kwargs.items():
if k in cons_args:
args[k] = v
# Build object
return self.constructor(**args)
def __repr__(self):
return '<Factory: %s>' % self.constructor.__self__.__name__
class DoEvery:
@classmethod
def dummy(cls, *args, **kwargs):
pass
def __init__(self, func, freq):
""" Simple class that holds onto a function and it returns
this function every freq iterations
ARGS:
func: function object to be returned every freq iterations
freq: int - how often to return the function
"""
self.func = func
self.freq = freq
self.i = 0
def __call__(self, *args, **kwargs):
if self.i % self.freq == 0:
returner = self.func
else:
returner = self.dummy
self.i += 1
return returner(*args, **kwargs)
class Timer:
def __init__(self, start_on_init=True):
if start_on_init:
self.start()
def start(self):
self.start_time = time.time()
def stop(self):
self.stop_time = time.time()
return self.stop_time - self.start_time
def reset(self):
self.start_time = self.stop_time = None
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
return [_.cpu() for _ in tensor_iter]
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
def safe_cuda(el):
try:
return el.cuda()
except AssertionError:
return el
return [safe_cuda(_) for _ in tensor_iter]
def prod(num_iter):
""" returns product of all elements in this iterator *'ed together"""
cumprod = 1
for el in num_iter:
cumprod *= el
return cumprod
def partition(n, m):
""" Given ints n > m, partitions n into an iterable where all
elements are m, except for the last one which is (n % m)
"""
count = 0
while count < n:
yield min([m, n - count])
count += m
def flatten_list(lol):
""" Given list of lists, flattens it into a single list. """
output = []
for el in lol:
if not isinstance(el, list):
output.append(el)
continue
output.extend(flatten_list(el))
return output
def partition_by_suffix(iterable, func):
""" Given an iterable and a boolean-valued function which takes in
elements of that iterable, outputs a list of lists, where each list
ends in an element for which the func returns true, (except for the
last one)
e.g.
iterable := [1, 2, 3, 4, 5,5, 5]
func := lambda x: (x % 2) == 0
returns [[1,2], [3,4], [5, 5, 5]]
"""
output = []
sublist = []
for el in iterable:
sublist.append(el)
if func(el):
output.append(sublist)
sublist = []
if len(sublist) > 0:
output.append(sublist)
return output
def arraylike(obj):
return isinstance(obj, (torch.Tensor, np.ndarray))
def as_numpy(tensor_or_array):
""" If given a tensor or numpy array returns that object cast numpy array
"""
if isinstance(tensor_or_array, torch.Tensor):
tensor_or_array = tensor_or_array.cpu().detach().numpy()
return tensor_or_array
def two_col(l, r):
""" Takes two numpy arrays of size N and makes a numpy array of size Nx2
"""
return np.vstack([l, r]).T
def split_pos_neg(x):
if isinstance(x, torch.Tensor):
return split_tensor_pos_neg(x)
else:
return split_ndarray_pos_neg(x)
def split_tensor_pos_neg(x):
""" Splits a tensor into positive and negative components """
pos = F.relu(x)
neg = -F.relu(-x)
return pos, neg
def split_ndarray_pos_neg(x):
""" Splits a numpy ndarray into positive and negative components """
pos = x * (x >= 0)
neg = x * (x <= 0)
return pos, neg
def swap_axes(x, source, dest):
""" Swaps the dimensions of source <-> dest for torch/numpy
ARGS:
x : numpy array or tensor
source : int index
dest : int index
RETURNS
x' - object with same data as x, but with axes swapped
"""
if isinstance(x, torch.Tensor):
return x.transpose(source, dest)
else:
return np.moveaxis(x, source, dest)
def build_var_namer(k):
return lambda d: '%s[%s]' % (k, d)
@contextlib.contextmanager
def silent():
save_stdout = sys.stdout
temp = tempfile.TemporaryFile(mode='w')
sys.stdout = temp
yield
sys.stdout = save_stdout
temp.close()
def ia_mm(matrix, intervals, lohi_dim, matrix_or_vec='matrix'):
""" Interval analysis matrix(-vec) multiplication for torch/np intervals
ARGS:
matrix : tensor or numpy array of shape (m,n) -
intervals : tensor or numpy array with shape (n1, ..., 2, n_i, ...) -
"vector" of intervals to be multiplied by a matrix
one such n_i must be equal to n (from matrix shape)
lohi_dim : int - which dimension (index) of intervals corresponds
to the lo/hi split
matrix_or_vec : string - must be matrix or vec, corresponds to whether
intervals is to be treated as a matrix or a vector.
If a v
RETURNS:
object of same type as intervals, but with the shape slightly
different: len(output[-1/-2]) == m
"""
# asserts for shapes and things
assert isinstance(matrix, torch.Tensor) # TENSOR ONLY FOR NOW
assert isinstance(intervals, torch.Tensor)
m, n = matrix.shape
assert intervals.shape[lohi_dim] == 2
assert matrix_or_vec in ['matrix', 'vec']
if matrix_or_vec == 'vec':
intervals = intervals.unsqueeze(-1)
assert lohi_dim != intervals.dim() - 2
assert intervals[dim][-2] == n
# define operators based on tensor/numpy case
matmul = lambda m, x: m.matmul(x)
stack = lambda a, b: torch.stack([a, b])
# now do IA stuff
intervals = swap_axes(intervals, 0, lohi_dim)
matrix_pos, matrix_neg = split_pos_neg(matrix)
los, his = intervals
new_los = matmul(matrix_pos, los) + matmul(matrix_neg, his)
new_his = matmul(matrix_pos, his) + matmul(matrix_neg, los)
intervals = swap_axes(stack(new_los, new_his), 0, lohi_dim)
if matrix_or_vec == 'vec':
intervals = interval.squeeze(-1)
return intervals
# =============================================================================
# = Image display functions =
# =============================================================================
def display_images(image_rows, figsize=(8, 8)):
""" Given either a tensor/np.array (or list of same), will display each
element in the row or tensor
ARGS:
image_rows: tensor or np.array or tensor[], np.array[] -
image or list of images to display
RETURNS: None, but displays images
"""
if not isinstance(image_rows, list):
image_rows = [image_rows]
np_rows = [as_numpy(row) for row in image_rows]
# Transpose channel to last dimension and stack to make rows
np_rows = [np.concatenate(_.transpose([0, 2, 3, 1]), axis=1)
for _ in np_rows]
# Now stack rows
full_image = np.concatenate(np_rows, axis=0)
# And then show image
imshow_kwargs = {}
if full_image.shape[-1] == 1:
full_image = full_image.squeeze()
imshow_kwargs['cmap'] = 'gray'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
ax.axis('off')
ax.imshow(full_image, **imshow_kwargs)
plt.show()
# ======================================================
# = Pytorch helpers =
# ======================================================
def seq_append(seq, module):
""" Takes a nn.sequential and a nn.module and creates a nn.sequential
with the module appended to it
ARGS:
seq: nn.Sequntial object
module: <inherits nn.Module>
RETURNS:
nn.Sequential object
"""
seq_modules = [seq[_] for _ in range(len(seq))] + [module]
return nn.Sequential(*seq_modules)
def cpufy(tensor_iter):
""" Takes a list of tensors and safely pushes them back onto the cpu"""
output = []
for el in tensor_iter:
if isinstance(el, tuple):
output.append(tuple(_.cpu() for _ in el))
else:
output.append(el.cpu())
return output
def cudafy(tensor_iter):
""" Takes a list of tensors and safely converts all of them to cuda"""
def safe_cuda(el):
try:
if isinstance(el, tuple):
return tuple(_.cuda() for _ in el)
else:
return el.cuda()
except AssertionError:
return el
return [safe_cuda(_) for _ in tensor_iter]
# =======================================
# = Polytope class =
# =======================================
class Polytope:
INPUT_KEY = 'input'
SLACK_KEY = 'slack'
def __init__(self, A, b):
""" Represents a polytope of the form {x | AX <= b}
(where everything is a numpy array)
"""
self.A = A
self.b = b
def _input_from_model(self, model):
var_namer = build_var_namer(self.INPUT_KEY)
return np.array([model.getVarByName(var_namer(i)).X
for i in range(self.A.shape[1])])
def _build_model(self, slack=False):
""" Builds a gurobi model of this object """
with silent():
model = gb.Model()
input_namer = build_var_namer(self.INPUT_KEY)
input_vars = [model.addVar(lb=-gb.GRB.INFINITY, ub=gb.GRB.INFINITY,
name=input_namer(i))
for i in range(self.A.shape[1])]
if slack == True:
slack_var = model.addVar(lb=0, ub=1.0, name=self.SLACK_KEY)
else:
slack_var = 0
for i, row in enumerate(self.A):
model.addConstr(gb.LinExpr(row, input_vars) + slack_var <= self.b[i])
model.update()
return model
def contains(self, x, tolerance=1e-6):
return all(self.A @ x <= self.b + tolerance)
def interior_point(self):
model = self._build_model(slack=True)
slack_var = model.getVarByName(self.SLACK_KEY)
model.setObjective(slack_var, gb.GRB.MAXIMIZE)
model.update()
model.optimize()
assert model.Status == 2
return self._input_from_model(model)
def intersects_hbox(self, hbox):
""" If this intersects a given hyperbox, returns a
point contained in both
"""
model = self._build_model(slack=True)
input_namer = build_var_namer(self.INPUT_KEY)
for i, (lb, ub) in enumerate(hbox):
var = model.getVarByName(input_namer(i))
model.addConstr(lb <= var <= ub)
slack_var = model.getVarByName(self.SLACK_KEY)
model.setObjective(slack_var, gb.GRB.MAXIMIZE)
model.update()
model.optimize()
assert model.Status == 2
return self._input_from_model(model)
# =========================================================
# = experiment.Result object helpers =
# =========================================================
def filename_to_epoch(filename):
return int(re.search(r'_EPOCH\d{4}_', filename).group()[-5:-1])
def read_result_files(result_files):
output = []
for result_file in result_files:
try:
with open(result_file, 'rb') as f:
output.append((result_file, pickle.load(f)))
except Exception as err:
print("Failed on file: ", result_file, err)
return output
def job_out_series(job_outs, eval_style, method,
value_or_time='value', avg_stdev='avg'):
""" Takes in some result or resultList objects and
a 'method', and desired object, and returns these objects
in a list
ARGS:
results: Result[] or ResultList[], results to consider
eval_style: str - which method of Experiment we look at
method: str - which Lipschitz-estimation technique to consider
value_or_time: 'value' or 'time' - which number to return
avg_stdev: 'avg' or 'stdev' - for ResultList[], we can
get average or stdev values
RETURNS:
list of floats
"""
# check everything is the same type
assert value_or_time in ['value', 'time']
assert avg_stdev in ['avg', 'stdev']
assert eval_style in ['do_random_evals', 'do_unit_hypercube_eval',
'do_data_evals', 'do_large_radius_evals']
results = [job_out[eval_style] for job_out in job_outs]
output = []
for result in results:
try: #Result object case
if value_or_time == 'value':
output.append(result.values(method))
else:
output.append(result.compute_times(method))
except:
triple = result.average_stdevs(value_or_time)[method]
if avg_stdev == 'avg':
output.append(triple[0])
else:
output.append(triple[1])
return output
def collect_result_outs(filematch):
""" Uses glob to collect and load result objects matching a series
ARGS:
filematch: string with *'s associated with it
e.g. 'NAME*SUBNAME*GLOBAL.result'
RESULTS:
list of (filename, experiment.Result) objects
"""
search_str = os.path.join(COMPLETED_JOB_DIR, filematch)
sorted_filenames = sorted(glob.glob(search_str))
return read_result_files(sorted_filenames)
def collect_epochs(filename_list):
""" Given a list of (filename) objects, converts
the filenames into integers, pulling the EPOCH attribute from
the filename
str[] -> int[]
"""
def epoch_gleamer(filename):
basename = os.path.basename(filename)
return int(re.search('_EPOCH\d+_', filename).group()[6:-1])
return [epoch_gleamer(_) for _ in filename_list]
def data_from_results(result_iter, method, lip_estimator, time_or_value='value',
avg_or_stdev='avg'):
""" Given a list of experiment.Result or experiment.ResultList objects
will return the time/value for the lip_estimator of the method
for result (or avg/stdev if resultList objects)
e.g., data_from_results('do_unit_hypercube_eval', 'LipMIP',
'value') gets a list of values of the
LipMIP over the unitHypercube domain
ARGS:
method: str - name of one of the experimental methods
lip_estimator : str - name of the class of lipschitz estimator to use
time_or_value : 'time' or 'value' - returning the time or value here
avg_or_stdev : 'avg' or 'stdev' - returning either avg or stdev of
results from ResultListObjects
"""
assert method in ['do_random_evals', 'do_data_evals',
'do_unit_hypercube_eval']
assert lip_estimator in ['LipMIP', 'FastLip', 'LipLP', 'CLEVER',
'LipSDP', 'NaiveUB', 'RandomLB', 'SeqLip']
assert time_or_value in ['time', 'value']
assert avg_or_stdev in ['avg', 'stdev']
def datum_getter(result_obj):
if not hasattr(result_obj, 'average_stdevs'):
if time_or_value == 'value':
return result_obj[method].values(lip_estimator)
else:
return result_obj[method].compute_times(lip_estimator)
else:
triple = result_obj.average_stdevs(time_or_value)
if avg_or_stdev == 'avg':
return triple[0]
else:
return triple[1]
return [datum_getter(_) for _ in result_iter] | en | 0.674301 | General all-purpose utilities # =============================================================================== # = Helpful all-purpose functions = # =============================================================================== # Make default args from attributes # Update the default args # Build object Simple class that holds onto a function and it returns this function every freq iterations ARGS: func: function object to be returned every freq iterations freq: int - how often to return the function Takes a list of tensors and safely pushes them back onto the cpu Takes a list of tensors and safely converts all of them to cuda returns product of all elements in this iterator *'ed together Given ints n > m, partitions n into an iterable where all elements are m, except for the last one which is (n % m) Given list of lists, flattens it into a single list. Given an iterable and a boolean-valued function which takes in elements of that iterable, outputs a list of lists, where each list ends in an element for which the func returns true, (except for the last one) e.g. iterable := [1, 2, 3, 4, 5,5, 5] func := lambda x: (x % 2) == 0 returns [[1,2], [3,4], [5, 5, 5]] If given a tensor or numpy array returns that object cast numpy array Takes two numpy arrays of size N and makes a numpy array of size Nx2 Splits a tensor into positive and negative components Splits a numpy ndarray into positive and negative components Swaps the dimensions of source <-> dest for torch/numpy ARGS: x : numpy array or tensor source : int index dest : int index RETURNS x' - object with same data as x, but with axes swapped Interval analysis matrix(-vec) multiplication for torch/np intervals ARGS: matrix : tensor or numpy array of shape (m,n) - intervals : tensor or numpy array with shape (n1, ..., 2, n_i, ...) - "vector" of intervals to be multiplied by a matrix one such n_i must be equal to n (from matrix shape) lohi_dim : int - which dimension (index) of intervals corresponds to the lo/hi split matrix_or_vec : string - must be matrix or vec, corresponds to whether intervals is to be treated as a matrix or a vector. If a v RETURNS: object of same type as intervals, but with the shape slightly different: len(output[-1/-2]) == m # asserts for shapes and things # TENSOR ONLY FOR NOW # define operators based on tensor/numpy case # now do IA stuff # ============================================================================= # = Image display functions = # ============================================================================= Given either a tensor/np.array (or list of same), will display each element in the row or tensor ARGS: image_rows: tensor or np.array or tensor[], np.array[] - image or list of images to display RETURNS: None, but displays images # Transpose channel to last dimension and stack to make rows # Now stack rows # And then show image # ====================================================== # = Pytorch helpers = # ====================================================== Takes a nn.sequential and a nn.module and creates a nn.sequential with the module appended to it ARGS: seq: nn.Sequntial object module: <inherits nn.Module> RETURNS: nn.Sequential object Takes a list of tensors and safely pushes them back onto the cpu Takes a list of tensors and safely converts all of them to cuda # ======================================= # = Polytope class = # ======================================= Represents a polytope of the form {x | AX <= b} (where everything is a numpy array) Builds a gurobi model of this object If this intersects a given hyperbox, returns a point contained in both # ========================================================= # = experiment.Result object helpers = # ========================================================= Takes in some result or resultList objects and a 'method', and desired object, and returns these objects in a list ARGS: results: Result[] or ResultList[], results to consider eval_style: str - which method of Experiment we look at method: str - which Lipschitz-estimation technique to consider value_or_time: 'value' or 'time' - which number to return avg_stdev: 'avg' or 'stdev' - for ResultList[], we can get average or stdev values RETURNS: list of floats # check everything is the same type #Result object case Uses glob to collect and load result objects matching a series ARGS: filematch: string with *'s associated with it e.g. 'NAME*SUBNAME*GLOBAL.result' RESULTS: list of (filename, experiment.Result) objects Given a list of (filename) objects, converts the filenames into integers, pulling the EPOCH attribute from the filename str[] -> int[] Given a list of experiment.Result or experiment.ResultList objects will return the time/value for the lip_estimator of the method for result (or avg/stdev if resultList objects) e.g., data_from_results('do_unit_hypercube_eval', 'LipMIP', 'value') gets a list of values of the LipMIP over the unitHypercube domain ARGS: method: str - name of one of the experimental methods lip_estimator : str - name of the class of lipschitz estimator to use time_or_value : 'time' or 'value' - returning the time or value here avg_or_stdev : 'avg' or 'stdev' - returning either avg or stdev of results from ResultListObjects | 2.123502 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.