ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5a1d247232406df7bbcd924fd1c0bd38712e44
|
from typing import TypeVar
from server.utils import snowflake
T = TypeVar("T")
class IdMixin:
def __eq__(self: T, other: T) -> bool:
return self.id == other.id
def __hash__(self: T):
return self.id >> 22
@property
def created_at(self: T):
return snowflake.get_timestamp(self.id)
|
py
|
1a5a1e481a439632dbe4863ccb2863cf666d16bd
|
import urllib.request,json
from .models import News_Sources, News_Articles
apiKey = None
base_url = None
news_article_url = None
def configure_request(app):
global apiKey, base_url,news_article_url
apiKey = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_SOURCE_API_BASE_URL']
news_article_url = app.config['NEWS_ARTICLE_API_BASE_URL']
def get_news(category):
"""
Function that gets the json response to our url request
"""
get_news_url = base_url.format(category,apiKey)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['sources']:
news_results_list = get_news_response['sources']
news_results = process_news_sources(news_results_list)
return news_results
def process_news_sources(news_list):
"""
Function that processes the news source result and transform them to a list of Objects
Args:
news_source_list: A list of dictionaries that contain news source details
Returns :
news_source_results: A list of news source objects
"""
news_results=[]
for news_item in news_list:
id = news_item.get('id')
name= news_item.get('name')
description = news_item.get('description')
url = news_item.get('url')
category = news_item.get('category')
country = news_item.get('country')
news_object= News_Sources(id,name,description, url, category, country)
news_results.append(news_object)
return news_results
def get_articles(id):
'''
Function that gets the json response to the url request
'''
get_articles_url = news_article_url.format(id,apiKey)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
news_articles_results = None
if get_articles_response['articles']:
articles_results_list = get_articles_response['articles']
news_articles_results = process_articles(articles_results_list)
return news_articles_results
def process_articles(article_list):
"""
Function that processes the news article result and transform them to a list of Objects
Args:
news_article_list: A list of dictionaries that contain news article details
Returns :
news_article_results: A list of news article objects
"""
news_articles_results = []
for article_item in article_list:
title = article_item.get('title')
author = article_item.get('author')
description = article_item.get('description')
url= article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
articles_object = News_Articles(title,author,description, url, urlToImage, publishedAt)
news_articles_results.append(articles_object)
return news_articles_results
|
py
|
1a5a1fea7128a63693701784e0c918e2eb9612cc
|
# -*- coding: utf-8 -*-
import unittest
import ExampleModule
class MyTest(unittest.TestCase):
def setUp(self): # 初始化工作
pass
def tearDown(self): # 退出清理工作
pass
def test_sum(self): # 具体的测试用例,一定要以test开头
self.assertEqual(ExampleModule.e_sum(1, 2), 3, 'test sum fail')
def test_sub(self):
self.assertEqual(ExampleModule.e_sub(2, 1), 1, 'test sub fail')
if __name__ == '__main__':
unittest.main()
|
py
|
1a5a20c6ff2b466f0a83f5167f5f35d770dea59b
|
import _plotly_utils.basevalidators
class XbinsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="xbins", parent_name="histogram2dcontour", **kwargs):
super(XbinsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "XBins"),
data_docs=kwargs.pop(
"data_docs",
"""
end
Sets the end value for the x axis bins. The
last bin may not end exactly at this value, we
increment the bin edge by `size` from `start`
until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use
a date string, and for category data `end` is
based on the category serial numbers.
size
Sets the size of each x axis bin. Default
behavior: If `nbinsx` is 0 or omitted, we
choose a nice round bin size such that the
number of bins is about the same as the typical
number of samples in each bin. If `nbinsx` is
provided, we choose a nice round bin size
giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as
in `axis.dtick`. For category data, the number
of categories to bin together (always defaults
to 1).
start
Sets the starting value for the x axis bins.
Defaults to the minimum data value, shifted
down if necessary to make nice round values and
to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin
edges 0.5 down, so a `size` of 5 would have a
default `start` of -0.5, so it is clear that
0-4 are in the first bin, 5-9 in the second,
but continuous data gets a start of 0 and bins
[0,5), [5,10) etc. Dates behave similarly, and
`start` should be a date string. For category
data, `start` is based on the category serial
numbers, and defaults to -0.5.
""",
),
**kwargs
)
|
py
|
1a5a21af782e333ff8cf23839a51fd419d069779
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for the speech model."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import spectrum_augmenter
from lingvo.tasks.asr import blocks
class AsrEncoder(base_layer.BaseLayer):
"""Speech encoder version 2."""
@classmethod
def Params(cls):
"""Configs for AsrEncoder."""
p = super().Params()
# spec-augment
p.Define('specaugment_network',
spectrum_augmenter.SpectrumAugmenter.Params(),
'Configs template for the augmentation network.')
p.Define('use_specaugment', False, 'Use specaugmentation or not.')
# temporal downsampling, use one of the two
p.Define('conv_subsampler', blocks.ConvolutionalDownsampler.Params(),
'Convolution subsampling layer params')
p.Define('stacking_subsampler', blocks.InputStackingDownsampler.Params(),
'Stacking subsampling layer params')
p.Define('use_conv_subsampler', False, 'Enable p.conv_subsampler')
p.Define('use_stacking_subsampler', False, 'Enable p.stacking_subsampler')
# actual encoding layers, use one of these
p.Define('lstm_block', blocks.LSTMBlock.Params(), 'LSTM layer params')
# p.Define('conformer_block', blocks.ConformerBlock.Params(), 'Conformer specs')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
##### Use specAugment or not ####
if p.use_specaugment:
self.CreateChild('specaugment', p.specaugment_network.Copy())
##### handle sub-sampling ####
assert not (p.use_conv_subsampler and p.use_stacking_subsampler), \
'Please use only one form of time subsampling'
if p.use_conv_subsampler:
self.CreateChild('sub', p.conv_subsampler.Copy())
else:
assert p.use_stacking_subsampler, 'Need one stacking module'
self.CreateChild('sub', p.stacking_subsampler.Copy())
stack_out_feats = self.sub.output_dim
##### handle encoding #####
if p.lstm_block is not None:
if p.lstm_block.input_feats is None:
p.lstm_block.input_feats = stack_out_feats
assert p.lstm_block.input_feats == stack_out_feats
self.CreateChildren('enc', p.lstm_block.Copy())
@property
def output_dim(self):
return self.enc.output_dim
@property
def _use_functional(self):
return True
@property
def supports_streaming(self):
return False
def zero_state(self, theta, batch_size):
return py_utils.NestedMap()
def FProp(self, theta, batch, state0=None):
"""Encodes source as represented by 'inputs' and 'paddings'.
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
batch: A NestedMap with fields:
- src_inputs - The inputs tensor. It is expected to be of shape [batch,
time, feature_dim, channels].
- paddings - The paddings tensor. It is expected to be of shape [batch,
time].
state0: Recurrent input state. Not supported/ignored by this encoder.
Returns:
A NestedMap containing
- 'encoded': a feature tensor of shape [time, batch, depth]
- 'padding': a 0/1 tensor of shape [time, batch]
- 'state': the updated recurrent state
"""
p = self.params
inputs, paddings = batch.src_inputs, batch.paddings
with tf.name_scope(p.name):
if p.use_specaugment and not self.do_eval:
inputs, paddings = self.specaugment.FProp(theta.specaugment, inputs,
paddings)
inputs, paddings = self.sub.FProp(theta.sub, inputs, paddings)
encoded, padding = self.enc.FProp(theta.enc, inputs, paddings)
return py_utils.NestedMap(encoded=encoded,
padding=padding,
state=py_utils.NestedMap())
|
py
|
1a5a229b71efc154b640949420db93f994dccdcb
|
import random
from colour import Color
import numpy as np
from manimlib.constants import PALETTE
from manimlib.constants import WHITE
from manimlib.utils.bezier import interpolate
from manimlib.utils.simple_functions import clip_in_place
from manimlib.utils.space_ops import normalize
def color_to_rgb(color):
if isinstance(color, str):
return hex_to_rgb(color)
elif isinstance(color, Color):
return np.array(color.get_rgb())
else:
raise Exception("Invalid color type")
def color_to_rgba(color, alpha=1):
return np.array([*color_to_rgb(color), alpha])
def rgb_to_color(rgb):
try:
return Color(rgb=rgb)
except:
return Color(WHITE)
def rgba_to_color(rgba):
return rgb_to_color(rgba[:3])
def rgb_to_hex(rgb):
return "#" + "".join('%02x' % int(255 * x) for x in rgb)
def hex_to_rgb(hex_code):
hex_part = hex_code[1:]
if len(hex_part) == 3:
"".join([2 * c for c in hex_part])
return np.array([
int(hex_part[i:i + 2], 16) / 255
for i in range(0, 6, 2)
])
def invert_color(color):
return rgb_to_color(1.0 - color_to_rgb(color))
def color_to_int_rgb(color):
return (255 * color_to_rgb(color)).astype('uint8')
def color_to_int_rgba(color, opacity=1.0):
alpha = int(255 * opacity)
return np.append(color_to_int_rgb(color), alpha)
def color_gradient(reference_colors, length_of_output):
if length_of_output == 0:
return reference_colors[0]
rgbs = list(map(color_to_rgb, reference_colors))
alphas = np.linspace(0, (len(rgbs) - 1), length_of_output)
floors = alphas.astype('int')
alphas_mod1 = alphas % 1
# End edge case
alphas_mod1[-1] = 1
floors[-1] = len(rgbs) - 2
return [
rgb_to_color(interpolate(rgbs[i], rgbs[i + 1], alpha))
for i, alpha in zip(floors, alphas_mod1)
]
def interpolate_color(color1, color2, alpha):
rgb = interpolate(color_to_rgb(color1), color_to_rgb(color2), alpha)
return rgb_to_color(rgb)
def average_color(*colors):
rgbs = np.array(list(map(color_to_rgb, colors)))
mean_rgb = np.apply_along_axis(np.mean, 0, rgbs)
return rgb_to_color(mean_rgb)
def random_bright_color():
color = random_color()
curr_rgb = color_to_rgb(color)
new_rgb = interpolate(
curr_rgb, np.ones(len(curr_rgb)), 0.5
)
return Color(rgb=new_rgb)
def random_color():
return random.choice(PALETTE)
def get_shaded_rgb(rgb, point, unit_normal_vect, light_source):
to_sun = normalize(light_source - point)
factor = 0.5 * np.dot(unit_normal_vect, to_sun)**3
if factor < 0:
factor *= 0.5
result = rgb + factor
clip_in_place(rgb + factor, 0, 1)
return result
|
py
|
1a5a22aa8804196dc6c582cbf3af352553dd54ff
|
import numpy as np
import cv2 as cv
import json
from collections import defaultdict
import os
def load_bbox(bbox_file):
img_to_bbox = {}
with open(bbox_file, 'rb') as f:
lines = f.readlines()
for line in lines[2:]:
flag = line.strip('\n').split(' ')
img_to_bbox[flag[0]] = [int(flag[-4]), int(flag[-3]), int(flag[-2]), int(flag[-1])]
return img_to_bbox
def get_trainset(data_path, name):
train_val = []
with open(data_path, 'rb') as f:
lines = f.readlines()
for line in lines[2:]:
flag = line.strip('\n').split(' ')
if flag[-1] == name:
train_val.append(flag[0])
print "num of %s is %d" % (name, len(train_val))
return train_val
def load_category(data_path, top_thresh, down_thresh, full_thresh):
img_to_category = {}
with open(data_path, 'rb') as f:
lines = f.readlines()
for line in lines[2:]:
flag = line.strip('\n').split(' ')
if int(flag[-1]) <= top_thresh:
img_to_category[flag[0]] = 1
elif int(flag[-1]) <= down_thresh:
img_to_category[flag[0]] = 2
else:
img_to_category[flag[0]] = 3
return img_to_category
def write_new_file(train_val, img_to_bbox, img_to_category, wtf_path):
with open(wtf_path, 'w') as f:
for idx, img in enumerate(train_val):
print "Processing %d/%d!!!!" % (idx+1, len(train_val))
category_id = img_to_category[img]
bbox = img_to_bbox[img]
f.write(img+' '+str(category_id)+' '+str(bbox[0])+\
' '+str(bbox[1])+' '+str(bbox[2])+' '+str(bbox[3])+'\n')
def sample_data(input_file, sample_num, output_file):
avg_num = sample_num / 3
cate_one = avg_num
cate_two = avg_num
cate_three = sample_num - avg_num*2
idx_one = []
idx_two = []
idx_three = []
with open(input_file, 'rb') as f:
lines = f.readlines()
for idx, line in enumerate(lines):
id_num = int(line.split(' ')[1])
if id_num == 1:
idx_one.append(idx)
elif id_num == 2:
idx_two.append(idx)
elif id_num == 3:
idx_three.append(idx)
else:
print 'error'
idx_one = np.array(idx_one)[np.random.permutation(len(idx_one))[:cate_one]]
idx_two = np.array(idx_two)[np.random.permutation(len(idx_two))[:cate_two]]
idx_three = np.array(idx_three)[np.random.permutation(len(idx_three))[:cate_three]]
with open(input_file, 'rb') as f_read, open(output_file, 'w') as f_write:
lines = f_read.readlines()
for idx, line in enumerate(lines):
if (idx in idx_one) or (idx in idx_two) or (idx in idx_three):
f_write.write(line)
def sta_aspect_ratio():
stas = defaultdict(list)
with open('/data/home/liuhuawei/clothing_train.txt', 'rb') as f:
lines = f.readlines()
for line in lines:
flag = line.split(' ')
cat_id = int(flag[1])
xmin = int(flag[2])
ymin = int(flag[3])
w = float(flag[4]) - xmin + 1
h = float(flag[5]) - ymin + 1
w_to_h = w / h
if cat_id == 2 and w_to_h >= 1:
stas[4].append(w_to_h)
# if cat_id == 3 and w_to_h >= 1.75:
# stas[5].append(w_to_h)
else:
stas[cat_id].append(w_to_h)
for key in stas.keys():
print key, len(stas[key]), sum(stas[key]) / len(stas[key])
def sta_gt_bbox_area():
aspect_ratio_stas = defaultdict(list)
bbox_area_stas = defaultdict(list)
infix = '/data/home/liuhuawei/clothing_data/Img/'
target_size = 600
max_size = 1000
with open('/data/home/liuhuawei/clothing_train.txt', 'rb') as f:
lines = f.readlines()
for idx, line in enumerate(lines):
print "Processing %d/%d!!!" % (idx+1, len(lines))
flag = line.split(' ')
file_name = flag[0]
cat_id = int(flag[1])
xmin = int(flag[2])
ymin = int(flag[3])
w = float(flag[4]) - xmin + 1
h = float(flag[5]) - ymin + 1
img = cv.imread(os.path.join(infix, file_name))
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
target_w = im_scale * w
target_h = im_scale * h
w_to_h = w / h
if cat_id == 2 and w_to_h >= 1:
aspect_ratio_stas[4].append(w_to_h)
bbox_area_stas[4].append((target_w, target_h))
else:
aspect_ratio_stas[cat_id].append(w_to_h)
bbox_area_stas[cat_id].append((target_w, target_h))
for key in aspect_ratio_stas.keys():
print key, len(aspect_ratio_stas[key]), \
sum(aspect_ratio_stas[key]) / len(aspect_ratio_stas[key])
for key in bbox_area_stas.keys():
value = np.array(bbox_area_stas[key])
print key, value.shape[0], value.mean(axis=0)
def sta_cat_data():
with open('/data/home/liuhuawei/clothing_data/Anno/list_category_img.txt', 'rb') as f:
cat_res = np.zeros(50, dtype=np.int32)
lines = f.readlines()
for line in lines[2:]:
## 0-index based
id_num = int(line.split(' ')[-1]) - 1
cat_res[id_num] += 1
print cat_res
print np.where(cat_res>500)[0]
print np.where(cat_res>1000)[0]
print np.sum(cat_res)
np.savetxt('/data/home/liuhuawei/clothing_data/Anno/cat_stas.txt', cat_res, fmt='%d', delimiter=',')
def sta_attr_data():
with open('/data/home/liuhuawei/clothing_data/Anno/list_attr_img.txt', 'rb') as f:
attr_res = np.zeros(1000, dtype=np.int32)
lines = f.readlines()
# flag = lines[2].strip().split(' ')
# print len(flag)
# print flag
# print flag[-1]
# print flag[-1000]
# print flag[720:750]
# # print len(lines)
# # print lines[2]
for idx, line in enumerate(lines[2:]):
print "Processing %d/%d!!!" % (idx+1, len(lines))
flag = line.strip('\n').split(' ')
flag = [k for k in flag[1:] if k != '']
assert len(flag) == 1000, len(flag)
for i in xrange(1000):
id_str = flag[i]
id_num = int(id_str)
assert id_num == 1 or id_num == -1, id_num
attr_res[i] += id_num == 1
print np.where(attr_res>=1000)[0]
print len(np.where(attr_res>=1000)[0])
np.savetxt('/data/home/liuhuawei/clothing_data/Anno/tmp.txt', attr_res, fmt='%d', delimiter=',')
# print attr_res
print np.sum(attr_res)
def txt_to_json(txt_file, category_to_id, output_json_file):
infix = '/data/home/liuhuawei/clothing_data/Img/'
json_dict = {}
images = []
annotations = []
categories = []
for category in category_to_id:
cat_dict = {}
cat_dict['supercategory'] = 'none'
cat_dict['id'] = category_to_id[category]
cat_dict['name'] = category
categories.append(cat_dict)
with open(txt_file, 'rb') as f:
lines = f.readlines()
for idx, line in enumerate(lines):
print 'preprocessing %d/%d for json!!!' % (idx+1, len(lines))
flag = line.strip('\n').split(' ')
img_name_with_jpg = flag[0]
category_id = int(flag[1])
xmin = int(flag[2])
ymin = int(flag[3])
w = int(flag[4]) - xmin + 1
h = int(flag[5]) - ymin + 1
bbox = [xmin, ymin, w, h]
img_full_path = infix + img_name_with_jpg
# img_relative_path = img_name_with_jpg.split('.')[0]
img_relative_path = img_name_with_jpg.replace('/', '_')
img_data = cv.imread(img_full_path)
H, W, _ = img_data.shape
img_dict = {}
img_dict['img'] = img_relative_path
img_dict['file_name'] = img_full_path
img_dict['height'] = H
img_dict['width'] = W
img_dict['url'] = 'none'
img_dict['id'] = idx+1
images.append(img_dict)
anno_dict = {}
anno_dict['segmentation'] = 'none'
anno_dict['area'] = h * w
anno_dict['iscrowd'] = 0
anno_dict['image_id'] = idx + 1
anno_dict['bbox'] = bbox
anno_dict['source_id'] = img_relative_path
anno_dict['category_id'] = category_id
anno_dict['id'] = idx + 1
annotations.append(anno_dict)
json_dict['images'] = images
json_dict['annotations'] = annotations
json_dict['categories'] = categories
with open(output_json_file, 'wt') as f:
f.write(json.dumps(json_dict))
if __name__ == '__main__':
# # sta_aspect_ratio()
# # sta_gt_bbox_area()
# sta_cat_data()
# # sta_attr_data()
# # sta_data()
## Path to the partition file
file_path = '/data/home/liuhuawei/clothing_data/Eval/list_eval_partition.txt'
## Path to the bbox file
bbox_path = '/data/home/liuhuawei/clothing_data/Anno/list_bbox.txt'
## Path to category-img file, each image labels a category in [1,2,3] for ['upbody', 'downbody', 'fullbody']
category_path = '/data/home/liuhuawei/clothing_data/Anno/list_category_img.txt'
## get dataset of "name"
name = 'train'
data = get_trainset(file_path, name)
##1-20
top_thresh = 20
##21-36
down_thresh = 36
##37-50
full_thresh = 50
# ## get bbox and category dicts for images
# img_to_bbox = load_bbox(bbox_path)
# img_to_category = load_category(category_path, top_thresh, down_thresh, full_thresh)
# ## write txt file with each row like 'img_name category_id xmin ymin xmax ymax'
wtf_path = '/data/home/liuhuawei/clothing_%s.txt' % name
# write_new_file(data, img_to_bbox, img_to_category, wtf_path)
## sample data in test set
sample_num = 30000
wtf_path_sample = '/data/home/liuhuawei/clothing_%s_sample_%d.txt' % (name, sample_num,)
sample_data(wtf_path, sample_num, wtf_path_sample)
## convert txt file to json file
output_json_file = '/data/home/liuhuawei/clothing_%s_sample_%d.json' % (name, sample_num,)
category_to_id = dict([('upbody',1) ,('downbody',2), ('fullbody', 3)])
txt_to_json(wtf_path_sample, category_to_id, output_json_file)
|
py
|
1a5a24ab4609720575f835a92c4c9cb4a74a1123
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Implements the adaptive form of the loss.
You should only use this function if 1) you want the loss to change it's shape
during training (otherwise use general.py) or 2) you want to impose the loss on
a wavelet or DCT image representation, a only this function has easy support for
that.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from robust_loss import distribution
from robust_loss import util
from robust_loss import wavelet
def _check_scale(scale_lo, scale_init):
"""Helper function for checking `scale_lo` and `scale_init`."""
if not np.isscalar(scale_lo):
raise ValueError('`scale_lo` must be a scalar, but is of type {}'.format(
type(scale_lo)))
if not np.isscalar(scale_init):
raise ValueError('`scale_init` must be a scalar, but is of type {}'.format(
type(scale_init)))
if not scale_lo > 0:
raise ValueError('`scale_lo` must be > 0, but is {}'.format(scale_lo))
if not scale_init >= scale_lo:
raise ValueError('`scale_init` = {} must be >= `scale_lo` = {}'.format(
scale_init, scale_lo))
def _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=''):
"""Helper function for constructing scale variables."""
if scale_lo == scale_init:
# If the difference between the minimum and initial scale is zero, then
# we just fix `scale` to be a constant.
scale = tf.tile(
tf.cast(scale_init, float_dtype)[tf.newaxis, tf.newaxis],
(1, x.shape[1]))
else:
# Otherwise we construct a "latent" scale variable and define `scale`
# As an affine function of a softplus on that latent variable.
latent_scale = tf.get_variable(
'LatentScale' + var_suffix, initializer=tf.zeros((1, x.shape[1]), float_dtype))
scale = util.affine_softplus(latent_scale, lo=scale_lo, ref=scale_init)
return scale
def lossfun(x,
alpha_lo=0.001,
alpha_hi=1.999,
alpha_init=None,
scale_lo=1e-5,
scale_init=1.,
var_suffix='',
**kwargs):
"""Computes the adaptive form of the robust loss on a matrix.
This function behaves differently from general.lossfun() and
distribution.nllfun(), which are "stateless", allow the caller to specify the
shape and scale of the loss, and allow for arbitrary sized inputs. This
function only allows for rank-2 inputs for the residual `x`, and expects that
`x` is of the form [batch_index, dimension_index]. This function then
constructs free parameters (TF variables) that define the alpha and scale
parameters for each dimension of `x`, such that all alphas are in
(`alpha_lo`, `alpha_hi`) and all scales are in (`scale_lo`, Infinity).
The assumption is that `x` is, say, a matrix where x[i,j] corresponds to a
pixel at location j for image i, with the idea being that all pixels at
location j should be modeled with the same shape and scale parameters across
all images in the batch. This function also returns handles to the scale and
shape parameters being optimized over, mostly for debugging and introspection.
If the user wants to fix alpha or scale to be a constant, this can be done by
setting alpha_lo=alpha_hi or scale_lo=scale_init respectively.
Args:
x: The residual for which the loss is being computed. Must be a rank-2
tensor, where the innermost dimension is the batch index, and the
outermost dimension corresponds to different "channels", where this
function will assign each channel its own variable shape (alpha) and scale
parameters that are constructed as TF variables and can be optimized over.
Must be a TF tensor or numpy array of single or double precision floats.
The precision of `x` will determine the precision of the latent variables
used to model scale and alpha internally.
alpha_lo: The lowest possible value for loss's alpha parameters, must be >=
0 and a scalar. Should probably be in (0, 2).
alpha_hi: The highest possible value for loss's alpha parameters, must be >=
alpha_lo and a scalar. Should probably be in (0, 2).
alpha_init: The value that the loss's alpha parameters will be initialized
to, must be in (`alpha_lo`, `alpha_hi`), unless `alpha_lo` == `alpha_hi`
in which case this will be ignored. Defaults to (`alpha_lo` + `alpha_hi`)
/ 2
scale_lo: The lowest possible value for the loss's scale parameters. Must be
> 0 and a scalar. This value may have more of an effect than you think, as
the loss is unbounded as scale approaches zero (say, at a delta function).
scale_init: The initial value used for the loss's scale parameters. This
also defines the zero-point of the latent representation of scales, so SGD
may cause optimization to gravitate towards producing scales near this
value.
**kwargs: Arguments to be passed to the underlying distribution.nllfun().
Returns:
A tuple of the form (`loss`, `alpha`, `scale`).
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `alpha`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct a scale variable for each dimension of `x` but not for each
batch element. This contains the current estimated scale parameter for
each dimension, and will change during optimization.
`alpha`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct an alpha variable for each dimension of `x` but not for each
batch element. This contains the current estimated alpha parameter for
each dimension, and will change during optimization.
Raises:
ValueError: If any of the arguments are invalid.
"""
_check_scale(scale_lo, scale_init)
if not np.isscalar(alpha_lo):
raise ValueError('`alpha_lo` must be a scalar, but is of type {}'.format(
type(alpha_lo)))
if not np.isscalar(alpha_hi):
raise ValueError('`alpha_hi` must be a scalar, but is of type {}'.format(
type(alpha_hi)))
if alpha_init is not None and not np.isscalar(alpha_init):
raise ValueError(
'`alpha_init` must be None or a scalar, but is of type {}'.format(
type(alpha_init)))
if not alpha_lo >= 0:
raise ValueError('`alpha_lo` must be >= 0, but is {}'.format(alpha_lo))
if not alpha_hi >= alpha_lo:
raise ValueError('`alpha_hi` = {} must be >= `alpha_lo` = {}'.format(
alpha_hi, alpha_lo))
if alpha_init is not None and alpha_lo != alpha_hi:
if not (alpha_init > alpha_lo and alpha_init < alpha_hi):
raise ValueError(
'`alpha_init` = {} must be in (`alpha_lo`, `alpha_hi`) = ({} {})'
.format(alpha_init, alpha_lo, alpha_hi))
float_dtype = x.dtype
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 2), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
if alpha_lo == alpha_hi:
# If the range of alphas is a single item, then we just fix `alpha` to be
# a constant.
alpha = tf.tile(
tf.cast(alpha_lo, float_dtype)[tf.newaxis, tf.newaxis],
(1, x.shape[1]))
else:
# Otherwise we construct a "latent" alpha variable and define `alpha`
# As an affine function of a sigmoid on that latent variable, initialized
# such that `alpha` starts off as `alpha_init`.
if alpha_init is None:
alpha_init = (alpha_lo + alpha_hi) / 2.
latent_alpha_init = util.inv_affine_sigmoid(
alpha_init, lo=alpha_lo, hi=alpha_hi)
latent_alpha = tf.get_variable(
'LatentAlpha' + var_suffix,
initializer=tf.fill((1, x.shape[1]),
tf.cast(latent_alpha_init, dtype=float_dtype)))
alpha = util.affine_sigmoid(latent_alpha, lo=alpha_lo, hi=alpha_hi)
scale = _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=var_suffix)
loss = distribution.nllfun(x, alpha, scale, **kwargs)
return loss, alpha, scale
def lossfun_students(x, scale_lo=1e-5, scale_init=1., var_suffix=''):
"""A variant of lossfun() that uses the NLL of a Student's t-distribution.
Args:
x: The residual for which the loss is being computed. Must be a rank-2
tensor, where the innermost dimension is the batch index, and the
outermost dimension corresponds to different "channels", where this
function will assign each channel its own variable shape (log-df) and
scale parameters that are constructed as TF variables and can be optimized
over. Must be a TF tensor or numpy array of single or double precision
floats. The precision of `x` will determine the precision of the latent
variables used to model scale and log-df internally.
scale_lo: The lowest possible value for the loss's scale parameters. Must be
> 0 and a scalar. This value may have more of an effect than you think, as
the loss is unbounded as scale approaches zero (say, at a delta function).
scale_init: The initial value used for the loss's scale parameters. This
also defines the zero-point of the latent representation of scales, so SGD
may cause optimization to gravitate towards producing scales near this
value.
Returns:
A tuple of the form (`loss`, `log_df`, `scale`).
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `log_df`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct a scale variable for each dimension of `x` but not for each
batch element. This contains the current estimated scale parameter for
each dimension, and will change during optimization.
`log_df`: a TF tensor of the same type as x, of size (1, x.shape[1]), as we
construct an log-DF variable for each dimension of `x` but not for each
batch element. This contains the current estimated log(degrees-of-freedom)
parameter for each dimension, and will change during optimization.
Raises:
ValueError: If any of the arguments are invalid.
"""
_check_scale(scale_lo, scale_init)
float_dtype = x.dtype
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 2), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
log_df = tf.get_variable(
name='LogDf', initializer=tf.zeros((1, x.shape[1]), float_dtype))
scale = _construct_scale(x, scale_lo, scale_init, float_dtype, var_suffix=var_suffix)
loss = util.students_t_nll(x, tf.math.exp(log_df), scale)
return loss, log_df, scale
def image_lossfun(x,
color_space='YUV',
representation='CDF9/7',
wavelet_num_levels=5,
wavelet_scale_base=1,
use_students_t=False,
summarize_loss=True,
**kwargs):
"""Computes the adaptive form of the robust loss on a set of images.
This function is a wrapper around lossfun() above. Like lossfun(), this
function is not "stateless" --- it requires inputs of a specific shape and
size, and constructs TF variables describing each non-batch dimension in `x`.
`x` is expected to be the difference between sets of RGB images, and the other
arguments to this function allow for the color space and spatial
representation of `x` to be changed before the loss is imposed. By default,
this function uses a CDF9/7 wavelet decomposition in a YUV color space, which
often works well. This function also returns handles to the scale and
shape parameters (both in the shape of images) being optimized over,
and summarizes both parameters in TensorBoard.
Args:
x: A set of image residuals for which the loss is being computed. Must be a
rank-4 tensor of size (num_batches, width, height, color_channels). This
is assumed to be a set of differences between RGB images.
color_space: The color space that `x` will be transformed into before
computing the loss. Must be 'RGB' (in which case no transformation is
applied) or 'YUV' (in which case we actually use a volume-preserving
scaled YUV colorspace so that log-likelihoods still have meaning, see
util.rgb_to_syuv()). Note that changing this argument does not change the
assumption that `x` is the set of differences between RGB images, it just
changes what color space `x` is converted to from RGB when computing the
loss.
representation: The spatial image representation that `x` will be
transformed into after converting the color space and before computing the
loss. If this is a valid type of wavelet according to
wavelet.generate_filters() then that is what will be used, but we also
support setting this to 'DCT' which applies a 2D DCT to the images, and to
'PIXEL' which applies no transformation to the image, thereby causing the
loss to be imposed directly on pixels.
wavelet_num_levels: If `representation` is a kind of wavelet, this is the
number of levels used when constructing wavelet representations. Otherwise
this is ignored. Should probably be set to as large as possible a value
that is supported by the input resolution, such as that produced by
wavelet.get_max_num_levels().
wavelet_scale_base: If `representation` is a kind of wavelet, this is the
base of the scaling used when constructing wavelet representations.
Otherwise this is ignored. For image_lossfun() to be volume preserving (a
useful property when evaluating generative models) this value must be ==
1. If the goal of this loss isn't proper statistical modeling, then
modifying this value (say, setting it to 0.5 or 2) may significantly
improve performance.
use_students_t: If true, use the NLL of Student's T-distribution instead
of the adaptive loss. This causes all `alpha_*` inputs to be ignored.
summarize_loss: Whether or not to make TF summaries describing the latent
state of the loss function. True by default.
**kwargs: Arguments to be passed to the underlying lossfun().
Returns:
A tuple of the form (`loss`, `alpha`, `scale`). If use_students_t == True,
then `log(df)` is returned instead of `alpha`.
`loss`: a TF tensor of the same type and shape as input `x`, containing
the loss at each element of `x` as a function of `x`, `alpha`, and
`scale`. These "losses" are actually negative log-likelihoods (as produced
by distribution.nllfun()) and so they are not actually bounded from below
by zero. You'll probably want to minimize their sum or mean.
`scale`: a TF tensor of the same type as x, of size
(width, height, color_channels),
as we construct a scale variable for each spatial and color dimension of `x`
but not for each batch element. This contains the current estimated scale
parameter for each dimension, and will change during optimization.
`alpha`: a TF tensor of the same type as x, of size
(width, height, color_channels),
as we construct an alpha variable for each spatial and color dimension of
`x` but not for each batch element. This contains the current estimated
alpha parameter for each dimension, and will change during optimization.
Raises:
ValueError: if `color_space` of `representation` are unsupported color
spaces or image representations, respectively.
"""
color_spaces = ['RGB', 'YUV']
if color_space not in color_spaces:
raise ValueError('`color_space` must be in {}, but is {!r}'.format(
color_spaces, color_space))
representations = wavelet.generate_filters() + ['DCT', 'PIXEL']
if representation not in representations:
raise ValueError('`representation` must be in {}, but is {!r}'.format(
representations, representation))
assert_ops = [tf.Assert(tf.equal(tf.rank(x), 4), [tf.rank(x)])]
with tf.control_dependencies(assert_ops):
if color_space == 'YUV':
x = util.rgb_to_syuv(x)
# If `color_space` == 'RGB', do nothing.
# Reshape `x` from
# (num_batches, width, height, num_channels) to
# (num_batches * num_channels, width, height)
_, width, height, num_channels = x.shape.as_list()
x_stack = tf.reshape(tf.transpose(x, (0, 3, 1, 2)), (-1, width, height))
# Turn each channel in `x_stack` into the spatial representation specified
# by `representation`.
if representation in wavelet.generate_filters():
x_stack = wavelet.flatten(
wavelet.rescale(
wavelet.construct(x_stack, wavelet_num_levels, representation),
wavelet_scale_base))
elif representation == 'DCT':
x_stack = util.image_dct(x_stack)
# If `representation` == 'PIXEL', do nothing.
# Reshape `x_stack` from
# (num_batches * num_channels, width, height) to
# (num_batches, num_channels * width * height)
x_mat = tf.reshape(
tf.transpose(
tf.reshape(x_stack, [-1, num_channels, width, height]),
[0, 2, 3, 1]), [-1, width * height * num_channels])
# Set up the adaptive loss. Note, if `use_students_t` == True then
# `alpha_mat` actually contains "log(df)" values.
if use_students_t:
loss_mat, alpha_mat, scale_mat = lossfun_students(x_mat, **kwargs)
else:
loss_mat, alpha_mat, scale_mat = lossfun(x_mat, **kwargs)
# Reshape the loss function's outputs to have the shapes as the input.
loss = tf.reshape(loss_mat, [-1, width, height, num_channels])
alpha = tf.reshape(alpha_mat, [width, height, num_channels])
scale = tf.reshape(scale_mat, [width, height, num_channels])
if summarize_loss:
# Summarize the `alpha` and `scale` parameters as images (normalized to
# [0, 1]) and histograms.
# Note that these may look unintuitive unless the colorspace is 'RGB' and
# the image representation is 'PIXEL', as the image summaries (like most
# images) are rendered as RGB pixels.
alpha_min = tf.reduce_min(alpha)
alpha_max = tf.reduce_max(alpha)
tf.summary.image(
'robust/alpha',
(alpha[tf.newaxis] - alpha_min) / (alpha_max - alpha_min + 1e-10))
tf.summary.histogram('robust/alpha', alpha)
log_scale = tf.math.log(scale)
log_scale_min = tf.reduce_min(log_scale)
log_scale_max = tf.reduce_max(log_scale)
tf.summary.image('robust/log_scale',
(log_scale[tf.newaxis] - log_scale_min) /
(log_scale_max - log_scale_min + 1e-10))
tf.summary.histogram('robust/log_scale', log_scale)
return loss, alpha, scale
|
py
|
1a5a251667ec0718b3b699563df656fbdd88298c
|
import json
import os
import subprocess
import functools
import sys
from urllib.parse import urlparse
from pathlib import Path, PurePath
import web3
import pkg_resources
# import grpc
# from grpc_tools.protoc import main as protoc
RESOURCES_PATH = PurePath(os.path.realpath(
__file__)).parent.joinpath("resources")
class DefaultAttributeObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
if v is not None:
setattr(self, k, v)
def getstring(self, item):
return getattr(self, item)
def getint(self, item):
if getattr(self, item) is None:
return None
return int(getattr(self, item))
def getfloat(self, item):
if getattr(self, item) is None:
return None
return float(getattr(self, item))
def getboolean(self, item):
if getattr(self, item) is None:
return None
i = self.getstring(item)
if i in ["yes", "on", "true", "True", "1"]:
return True
return False
def __getattr__(self, item):
return self.__dict__.get(item, None)
def __repr__(self):
return self.__dict__.__repr__()
def __str__(self):
return self.__dict__.__str__()
def get_web3(rpc_endpoint):
if rpc_endpoint.startswith("ws:"):
provider = web3.WebsocketProvider(rpc_endpoint)
else:
provider = web3.HTTPProvider(rpc_endpoint)
return web3.Web3(provider)
def serializable(o):
if isinstance(o, bytes):
return o.hex()
else:
return o.__dict__
def safe_address_converter(a):
if not web3.eth.is_checksum_address(a):
raise Exception(
"%s is not is not a valid Ethereum checksum address" % a)
return a
def type_converter(t):
if t.endswith("[]"):
return lambda x: list(map(type_converter(t.replace("[]", "")), json.loads(x)))
else:
if "int" in t:
return lambda x: web3.Web3.toInt(text=x)
elif "bytes32" in t:
return lambda x: web3.Web3.toBytes(text=x).ljust(32, b"\0") if not x.startswith("0x") else web3.Web3.toBytes(hexstr=x).ljust(32, b"\0")
elif "byte" in t:
return lambda x: web3.Web3.toBytes(text=x) if not x.startswith("0x") else web3.Web3.toBytes(hexstr=x)
elif "address" in t:
return safe_address_converter
else:
return str
def bytes32_to_str(b):
return b.rstrip(b"\0").decode("utf-8")
def _add_next_paths(path, entry_path, seen_paths, next_paths):
with open(path) as f:
for line in f:
if line.strip().startswith("import"):
import_statement = "".join(line.split('"')[1::2])
if not import_statement.startswith("google/protobuf"):
import_statement_path = Path(
path.parent.joinpath(import_statement)).resolve()
if entry_path.parent in path.parents:
if import_statement_path not in seen_paths:
seen_paths.add(import_statement_path)
next_paths.append(import_statement_path)
else:
raise ValueError(
"Path must not be a parent of entry path")
def walk_imports(entry_path):
seen_paths = set()
next_paths = []
for file_path in os.listdir(entry_path):
if file_path.endswith(".proto"):
file_path = entry_path.joinpath(file_path)
seen_paths.add(file_path)
next_paths.append(file_path)
while next_paths:
path = next_paths.pop()
if os.path.isfile(path):
_add_next_paths(path, entry_path, seen_paths, next_paths)
else:
raise IOError("Import path must be a valid file: {}".format(path))
return seen_paths
def get_contract_def(contract_name, contract_artifacts_root=RESOURCES_PATH.joinpath("contracts")):
contract_def = {}
with open(Path(__file__).absolute().parent.joinpath(contract_artifacts_root, "abi", "{}.json".format(contract_name))) as f:
contract_def["abi"] = json.load(f)
if os.path.isfile(Path(__file__).absolute().parent.joinpath(contract_artifacts_root, "networks", "{}.json".format(contract_name))):
with open(Path(__file__).absolute().parent.joinpath(contract_artifacts_root, "networks", "{}.json".format(contract_name))) as f:
contract_def["networks"] = json.load(f)
return contract_def
def read_temp_tar(f):
f.flush()
f.seek(0)
return f
def get_cli_version():
return pkg_resources.get_distribution("snet-cli").version
# def compile_proto(entry_path, codegen_dir, proto_file=None, target_language="python"):
# try:
# if not os.path.exists(codegen_dir):
# os.makedirs(codegen_dir)
# proto_include = pkg_resources.resource_filename('grpc_tools', '_proto')
#
# compiler_args = [
# "-I{}".format(entry_path),
# "-I{}".format(proto_include)
# ]
#
# if target_language == "python":
# compiler_args.insert(0, "protoc")
# compiler_args.append("--python_out={}".format(codegen_dir))
# compiler_args.append("--grpc_python_out={}".format(codegen_dir))
# compiler = protoc
# elif target_language == "nodejs":
# protoc_node_compiler_path = Path(RESOURCES_PATH.joinpath("node_modules").joinpath("grpc-tools").joinpath("bin").joinpath("protoc.js")).absolute()
# grpc_node_plugin_path = Path(RESOURCES_PATH.joinpath("node_modules").joinpath("grpc-tools").joinpath("bin").joinpath("grpc_node_plugin")).resolve()
# if not os.path.isfile(protoc_node_compiler_path) or not os.path.isfile(grpc_node_plugin_path):
# print("Missing required node.js protoc compiler. Retrieving from npm...")
# subprocess.run(["npm", "install"], cwd=RESOURCES_PATH)
# compiler_args.append("--js_out=import_style=commonjs,binary:{}".format(codegen_dir))
# compiler_args.append("--grpc_out={}".format(codegen_dir))
# compiler_args.append("--plugin=protoc-gen-grpc={}".format(grpc_node_plugin_path))
# compiler = lambda args: subprocess.run([str(protoc_node_compiler_path)] + args)
#
# if proto_file:
# compiler_args.append(str(proto_file))
# else:
# compiler_args.extend([str(p) for p in entry_path.glob("**/*.proto")])
#
# if not compiler(compiler_args):
# return True
# else:
# return False
#
# except Exception as e:
# print(e)
# return False
def abi_get_element_by_name(abi, name):
""" Return element of abi (return None if fails to find) """
if (abi and "abi" in abi):
for a in abi["abi"]:
if ("name" in a and a["name"] == name):
return a
return None
def abi_decode_struct_to_dict(abi, struct_list):
return {el_abi["name"]: el for el_abi, el in zip(abi["outputs"], struct_list)}
def int4bytes_big(b):
return int.from_bytes(b, byteorder='big')
def is_valid_endpoint(url):
"""
Just ensures the url has a scheme (http/https), and a net location (IP or domain name).
Can make more advanced or do on-network tests if needed, but this is really just to catch obvious errors.
>>> is_valid_endpoint("https://34.216.72.29:6206")
True
>>> is_valid_endpoint("blahblah")
False
>>> is_valid_endpoint("blah://34.216.72.29")
False
>>> is_valid_endpoint("http://34.216.72.29:%%%")
False
>>> is_valid_endpoint("http://192.168.0.2:9999")
True
"""
try:
result = urlparse(url)
if result.port:
_port = int(result.port)
return (
all([result.scheme, result.netloc]) and
result.scheme in ['http', 'https']
)
except ValueError:
return False
def remove_http_https_prefix(endpoint):
"""remove http:// or https:// prefix if presented in endpoint"""
endpoint = endpoint.replace("https://", "")
endpoint = endpoint.replace("http://", "")
return endpoint
# def open_grpc_channel(endpoint):
# """
# open grpc channel:
# - for http:// we open insecure_channel
# - for https:// we open secure_channel (with default credentials)
# - without prefix we open insecure_channel
# """
# if (endpoint.startswith("https://")):
# return grpc.secure_channel(remove_http_https_prefix(endpoint), grpc.ssl_channel_credentials())
# return grpc.insecure_channel(remove_http_https_prefix(endpoint))
def rgetattr(obj, attr):
"""
>>> from types import SimpleNamespace
>>> args = SimpleNamespace(a=1, b=SimpleNamespace(c=2, d='e'))
>>> rgetattr(args, "a")
1
>>> rgetattr(args, "b.c")
2
"""
return functools.reduce(getattr, [obj] + attr.split('.'))
def get_contract_object(w3, contract_file):
with open(RESOURCES_PATH.joinpath("contracts", "abi", contract_file)) as f:
abi = json.load(f)
with open(RESOURCES_PATH.joinpath("contracts", "networks", contract_file)) as f:
networks = json.load(f)
address = w3.toChecksumAddress(networks[w3.version.network]["address"])
return w3.eth.contract(abi=abi, address=address)
def get_contract_deployment_block(w3, contract_file):
with open(RESOURCES_PATH.joinpath("contracts", "networks", contract_file)) as f:
networks = json.load(f)
txn_hash = networks[w3.version.network]["transactionHash"]
return w3.eth.getTransactionReceipt(txn_hash).blockNumber
def normalize_private_key(private_key):
if private_key.startswith("0x"):
private_key = bytes(bytearray.fromhex(private_key[2:]))
else:
private_key = bytes(bytearray.fromhex(private_key))
return private_key
def get_address_from_private(private_key):
return web3.eth.Account.privateKeyToAccount(private_key).address
class add_to_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
|
py
|
1a5a26b4460ccfb4a2c7271af83dfc4d45ef1b3d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Detect IP v4 or v6 addresses the system uses to talk to outside world.
Original code from
https://github.com/vincentbernat/puppet-workstation/blob/master/modules/system/templates/network/ddns-updater.erb
Refactored/modified by Thomas Waldmann to just detect the IP.
"""
from __future__ import print_function
import errno
import socket
IPV4 = "ipv4"
IPV6_ANY = "ipv6"
IPV6_PUBLIC = "ipv6_public"
IPV6_TMP = "ipv6_tmp"
# reserved IPs for documentation/example purposes
OUTSIDE_IPV4 = "192.0.2.1"
OUTSIDE_IPV6 = "2001:db8::1"
# Not everything is available in Python
if not hasattr(socket, "IPV6_ADDR_PREFERENCES"):
socket.IPV6_ADDR_PREFERENCES = 72
if not hasattr(socket, "IPV6_PREFER_SRC_TMP"):
socket.IPV6_PREFER_SRC_TMP = 1
if not hasattr(socket, "IPV6_PREFER_SRC_PUBLIC"):
socket.IPV6_PREFER_SRC_PUBLIC = 2
class GetIpException(Exception):
"""Generic base class for all exceptions raised here."""
def detect_ip(kind):
"""
Detect IP address.
kind can be:
IPV4 - returns IPv4 address
IPV6_ANY - returns any IPv6 address (no preference)
IPV6_PUBLIC - returns public IPv6 address
IPV6_TMP - returns temporary IPV6 address (privacy extensions)
This function either returns an IP address (str) or
raises a GetIpException.
"""
if kind not in (IPV4, IPV6_PUBLIC, IPV6_TMP, IPV6_ANY):
raise ValueError("invalid kind specified")
# We create an UDP socket and connect it to a public host.
# We query the OS to know what our address is.
# No packet will really be sent since we are using UDP.
af = socket.AF_INET if kind == IPV4 else socket.AF_INET6
s = socket.socket(af, socket.SOCK_DGRAM)
try:
if kind in [IPV6_PUBLIC, IPV6_TMP, ]:
# caller wants some specific kind of IPv6 address (not IPV6_ANY)
try:
if kind == IPV6_PUBLIC:
preference = socket.IPV6_PREFER_SRC_PUBLIC
elif kind == IPV6_TMP:
preference = socket.IPV6_PREFER_SRC_TMP
s.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_ADDR_PREFERENCES, preference)
except socket.error as e:
if e.errno == errno.ENOPROTOOPT:
raise GetIpException("Kernel doesn't support IPv6 address preference")
else:
raise GetIpException("Unable to set IPv6 address preference: %s" % e)
try:
outside_ip = OUTSIDE_IPV4 if kind == IPV4 else OUTSIDE_IPV6
s.connect((outside_ip, 9))
except (socket.error, socket.gaierror) as e:
raise GetIpException(str(e))
ip = s.getsockname()[0]
finally:
s.close()
return ip
if __name__ == "__main__":
print("IP v4:", detect_ip(IPV4)) # noqa
print("IP v6 public:", detect_ip(IPV6_PUBLIC)) # noqa
print("IP v6 tmp:", detect_ip(IPV6_TMP)) # noqa
print("IP v6 any:", detect_ip(IPV6_ANY)) # noqa
|
py
|
1a5a271f7cef74a4565e8d6807a9bda84f66464c
|
#-*- coding:utf-8 -*-
import json
import copy
import json
from flask import render_template, abort, request, url_for, redirect, g
from flask.ext.babel import gettext
import time
import datetime
from rrd import app
from rrd.model.screen import DashboardScreen
from rrd.model.graph import DashboardGraph
from rrd.model.endpoint import Endpoint
from rrd import consts
from rrd.utils.graph_urls import generate_graph_urls
from rrd import config
@app.route("/screen", methods=["GET", "POST"])
def dash_screens():
top_screens = DashboardScreen.gets_by_pid(pid='0') or []
top_screens = sorted(top_screens, key=lambda x: x.name)
return render_template("screen/index.html", **locals())
@app.route("/screen/<int:sid>/delete")
def dash_screen_delete(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
DashboardScreen.remove(sid)
return redirect("/screen")
@app.route("/screen/<int:sid>/edit", methods=["GET", "POST"])
def dash_screen_edit(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
if request.method == "POST":
screen_name = request.form.get("screen_name")
screen.update(name=screen_name)
return redirect("/screen/%s" % screen.id)
else:
return render_template("screen/edit.html", **locals())
@app.route("/screen/<int:sid>/clone", methods=["GET", "POST"])
def dash_screen_clone(sid):
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no such screen")
if request.method == "POST":
screen_name = request.form.get("screen_name")
with_graph = request.form.get("with_graph")
new_s = DashboardScreen.add(screen.pid, screen_name)
if not new_s:
abort(404, gettext("screen create fail"))
if with_graph:
old_graphs = DashboardGraph.gets_by_screen_id(sid)
for o in old_graphs:
DashboardGraph.add(o.title, o.hosts, o.counters, new_s.id,
o.timespan, o.graph_type, o.method, o.position)
return redirect("/screen/%s" % new_s.id)
else:
return render_template("screen/clone.html", **locals())
@app.route("/graph/<int:gid>/delete")
def dash_graph_delete(gid):
graph = DashboardGraph.get(gid)
if not graph:
abort(404, "no such graph")
DashboardGraph.remove(gid)
return redirect("/screen/" + graph.screen_id)
@app.route("/screen/<int:sid>")
def dash_screen(sid):
start = request.args.get("start")
end = request.args.get("end")
top_screens = DashboardScreen.gets_by_pid(pid=0)
top_screens = sorted(top_screens, key=lambda x: x.name)
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
if str(screen.pid) == '0':
sub_screens = DashboardScreen.gets_by_pid(pid=sid)
sub_screens = sorted(sub_screens, key=lambda x: x.name)
return render_template("screen/top_screen.html", **locals())
pscreen = DashboardScreen.get(screen.pid)
sub_screens = DashboardScreen.gets_by_pid(pid=screen.pid)
sub_screens = sorted(sub_screens, key=lambda x: x.name)
graphs = DashboardGraph.gets_by_screen_id(screen.id)
all_graphs = []
for graph in graphs:
all_graphs.extend(generate_graph_urls(graph, start, end) or [])
all_graphs = sorted(all_graphs, key=lambda x: (x.position, x.id))
return render_template("screen/screen.html", **locals())
@app.route("/screen/embed/<int:sid>")
def dash_screen_embed(sid):
start = request.args.get("start")
end = request.args.get("end")
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
if screen.pid == '0':
abort(404, "top screen")
graphs = DashboardGraph.gets_by_screen_id(screen.id)
all_graphs = []
for graph in graphs:
all_graphs.extend(generate_graph_urls(graph, start, end) or [])
all_graphs = sorted(all_graphs, key=lambda x: (x.position, x.id))
return render_template("screen/screen_embed.html", **locals())
@app.route("/screen/add", methods=["GET", "POST"])
def dash_screen_add():
if request.method == "POST":
name = request.form.get("screen_name")
pid = request.form.get("pid", '0')
screen = DashboardScreen.add(pid, name)
return redirect("/screen/%s" % screen.id)
else:
pid = request.args.get("pid", '0')
try:
screen = DashboardScreen.get(pid)
except:
screen = None
return render_template("screen/add.html", **locals())
@app.route("/screen/<int:sid>/graph", methods=["GET", "POST"])
def dash_graph_add(sid):
all_screens = DashboardScreen.gets_all()
top_screens = [x for x in all_screens if x.pid == '0']
children = []
for t in top_screens:
children.append([x for x in all_screens if x.pid == t.id])
screen = DashboardScreen.get(sid)
if not screen:
abort(404, "no screen")
pscreen = DashboardScreen.get(screen.pid)
if request.method == "POST":
title = request.form.get("title")
hosts = request.form.get("hosts", "").strip()
hosts = hosts and hosts.split("\n") or []
hosts = [x.strip() for x in hosts]
counters = request.form.get("counters", "").strip()
counters = counters and counters.split("\n") or []
counters = [x.strip() for x in counters]
timespan = int(request.form.get("timespan", 3600))
graph_type = request.form.get("graph_type", 'h')
method = request.form.get("method", '').upper()
position = int(request.form.get("position", 0))
graph = DashboardGraph.add(title, hosts, counters, sid,
timespan, graph_type, method, position)
return redirect("/screen/%s" % sid)
else:
limit = 10000
gid = request.args.get("gid")
graph = gid and DashboardGraph.get(gid)
options = {}
options['hosts'] = Endpoint.search(''.split(), limit=limit)
ids = []
for ep in options['hosts']:
ids.append(ep.id)
options['counters'] = EndpointCounter.gets_by_endpoint_ids(ids[0:1], limit=limit)
return render_template("screen/graph_add.html", config=config, **locals())
@app.route("/graph/<int:gid>/edit", methods=["GET", "POST"])
def dash_graph_edit(gid):
error = ""
graph = DashboardGraph.get(gid)
if not graph:
abort(404, "no graph")
all_screens = DashboardScreen.gets_all()
top_screens = [x for x in all_screens if x.pid == '0']
children = []
for t in top_screens:
children.append([x for x in all_screens if x.pid == t.id])
screen = DashboardScreen.get(graph.screen_id)
if not screen:
abort(404, "no screen")
pscreen = DashboardScreen.get(screen.pid)
if request.method == "POST":
ajax = request.form.get("ajax", "")
screen_id = request.form.get("screen_id")
title = request.form.get("title", "").strip()
hosts = request.form.get("hosts", "").strip()
hosts = hosts and hosts.split("\n") or []
hosts = [x.strip() for x in hosts]
counters = request.form.get("counters", "").strip()
counters = counters and counters.split("\n") or []
counters = [x.strip() for x in counters]
timespan = request.form.get("timespan", 3600)
graph_type = request.form.get("graph_type", 'h')
method = request.form.get("method", '').upper()
position = request.form.get("position", 0)
graph = graph.update(title, hosts, counters, screen_id,
timespan, graph_type, method, position)
error = gettext("edit successful")
if not ajax:
return render_template("screen/graph_edit.html", config=config, **locals())
else:
return "ok"
else:
ajax = request.args.get("ajax", "")
return render_template("screen/graph_edit.html", **locals())
@app.route("/graph/multi_edit", methods=["GET", "POST"])
def dash_graph_multi_edit():
ret = {
"ok": False,
"msg": "",
"data": [],
}
if request.method == "POST":
d = request.data
try:
jdata = json.loads(d)
except ValueError:
jdata = None
if not jdata:
return json.dumps({
"ok": False,
"msg": "no_data_post",
})
rows = []
for x in jdata:
rows.append({"id": x["id"], "hosts": x["endpoints"], "counters": x["counters"]})
DashboardGraph.update_multi(rows)
return json.dumps({
"ok": True,
"msg": "",
})
elif request.method == "GET":
sid = request.args.get("sid")
if not sid or not DashboardScreen.get(sid):
ret["msg"] = "no_screen"
return json.dumps(ret)
ret["ok"] = True
graphs = DashboardGraph.gets_by_screen_id(sid)
ret['data'] = [{"id": x.id, "title": x.title, "endpoints": x.hosts, "counters": x.counters} for x in graphs]
return json.dumps(ret)
|
py
|
1a5a27281f00b41c613831b2ac22fb392b650796
|
import asyncio
import functools
import traceback
import unittest
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.locks import Event
from tornado.log import gen_log, app_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.web import Application, RequestHandler
try:
import tornado.websocket # noqa: F401
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import (
WebSocketHandler,
websocket_connect,
WebSocketError,
WebSocketClosedError,
)
try:
from tornado import speedups
except ImportError:
speedups = None # type: ignore
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for tests to see the close code and reason on the
server side.
"""
def initialize(self, close_future=None, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
if self.close_future is not None:
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
@gen.coroutine
def on_message(self, message):
try:
yield self.write_message(message, isinstance(message, bytes))
except asyncio.CancelledError:
pass
except WebSocketClosedError:
pass
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
methods_to_test = [
functools.partial(self.write, "This should not work"),
functools.partial(self.redirect, "http://localhost/elsewhere"),
functools.partial(self.set_header, "X-Test", ""),
functools.partial(self.set_cookie, "Chocolate", "Chip"),
functools.partial(self.set_status, 503),
self.flush,
self.finish,
]
for method in methods_to_test:
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
method()
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get("X-Test", ""))
class HeaderEchoHandler(TestWebSocketHandler):
def set_default_headers(self):
self.set_header("X-Extra-Response-Header", "Extra-Response-Value")
def prepare(self):
for k, v in self.request.headers.get_all():
if k.lower().startswith("x-test"):
self.set_header(k, v)
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write("ok")
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class PathArgsHandler(TestWebSocketHandler):
def open(self, arg):
self.write_message(arg)
class CoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, **kwargs):
super(CoroutineOnMessageHandler, self).initialize(**kwargs)
self.sleeping = 0
@gen.coroutine
def on_message(self, message):
if self.sleeping > 0:
self.write_message("another coroutine is already sleeping")
self.sleeping += 1
yield gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)
class RenderMessageHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(self.render_string("message.html", message=message))
class SubprotocolHandler(TestWebSocketHandler):
def initialize(self, **kwargs):
super(SubprotocolHandler, self).initialize(**kwargs)
self.select_subprotocol_called = False
def select_subprotocol(self, subprotocols):
if self.select_subprotocol_called:
raise Exception("select_subprotocol called twice")
self.select_subprotocol_called = True
if "goodproto" in subprotocols:
return "goodproto"
return None
def open(self):
if not self.select_subprotocol_called:
raise Exception("select_subprotocol not called")
self.write_message("subprotocol=%s" % self.selected_subprotocol)
class OpenCoroutineHandler(TestWebSocketHandler):
def initialize(self, test, **kwargs):
super(OpenCoroutineHandler, self).initialize(**kwargs)
self.test = test
self.open_finished = False
@gen.coroutine
def open(self):
yield self.test.message_sent.wait()
yield gen.sleep(0.010)
self.open_finished = True
def on_message(self, message):
if not self.open_finished:
raise Exception("on_message called before open finished")
self.write_message("ok")
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, **kwargs):
ws = yield websocket_connect(
"ws://127.0.0.1:%d%s" % (self.get_http_port(), path), **kwargs
)
raise gen.Return(ws)
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future() # type: Future[None]
return Application(
[
("/echo", EchoHandler, dict(close_future=self.close_future)),
("/non_ws", NonWebSocketHandler),
("/header", HeaderHandler, dict(close_future=self.close_future)),
(
"/header_echo",
HeaderEchoHandler,
dict(close_future=self.close_future),
),
(
"/close_reason",
CloseReasonHandler,
dict(close_future=self.close_future),
),
(
"/error_in_on_message",
ErrorInOnMessageHandler,
dict(close_future=self.close_future),
),
(
"/async_prepare",
AsyncPrepareHandler,
dict(close_future=self.close_future),
),
(
"/path_args/(.*)",
PathArgsHandler,
dict(close_future=self.close_future),
),
(
"/coroutine",
CoroutineOnMessageHandler,
dict(close_future=self.close_future),
),
("/render", RenderMessageHandler, dict(close_future=self.close_future)),
(
"/subprotocol",
SubprotocolHandler,
dict(close_future=self.close_future),
),
(
"/open_coroutine",
OpenCoroutineHandler,
dict(close_future=self.close_future, test=self),
),
],
template_loader=DictLoader({"message.html": "<b>{{ message }}</b>"}),
)
def get_http_client(self):
# These tests require HTTP/1; force the use of SimpleAsyncHTTPClient.
return SimpleAsyncHTTPClient()
def tearDown(self):
super(WebSocketTest, self).tearDown()
RequestHandler._template_loaders.clear()
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch("/echo")
self.assertEqual(response.code, 400)
def test_missing_websocket_key(self):
response = self.fetch(
"/echo",
headers={
"Connection": "Upgrade",
"Upgrade": "WebSocket",
"Sec-WebSocket-Version": "13",
},
)
self.assertEqual(response.code, 400)
def test_bad_websocket_version(self):
response = self.fetch(
"/echo",
headers={
"Connection": "Upgrade",
"Upgrade": "WebSocket",
"Sec-WebSocket-Version": "12",
},
)
self.assertEqual(response.code, 426)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect("/echo")
yield ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
def test_websocket_callbacks(self):
websocket_connect(
"ws://127.0.0.1:%d/echo" % self.get_http_port(), callback=self.stop
)
ws = self.wait().result()
ws.write_message("hello")
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, "hello")
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect("/echo")
ws.write_message(b"hello \xe9", binary=True)
response = yield ws.read_message()
self.assertEqual(response, b"hello \xe9")
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect("/echo")
ws.write_message(u"hello \u00e9")
response = yield ws.read_message()
self.assertEqual(response, u"hello \u00e9")
@gen_test
def test_render_message(self):
ws = yield self.ws_connect("/render")
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "<b>hello</b>")
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect("/error_in_on_message")
ws.write_message("hello")
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect("/notfound")
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect("/non_ws")
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
"ws://127.0.0.1:%d/" % port, connect_timeout=3600
)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect("ws://127.0.0.1:%d/echo" % self.get_http_port())
ws.write_message("hello")
ws.write_message("world")
# Close the underlying stream.
ws.stream.close()
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest(
"ws://127.0.0.1:%d/header" % self.get_http_port(),
headers={"X-Test": "hello"},
)
)
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_websocket_header_echo(self):
# Ensure that headers can be returned in the response.
# Specifically, that arbitrary headers passed through websocket_connect
# can be returned.
ws = yield websocket_connect(
HTTPRequest(
"ws://127.0.0.1:%d/header_echo" % self.get_http_port(),
headers={"X-Test-Hello": "hello"},
)
)
self.assertEqual(ws.headers.get("X-Test-Hello"), "hello")
self.assertEqual(
ws.headers.get("X-Extra-Response-Header"), "Extra-Response-Value"
)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect("/close_reason")
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect("/echo")
ws.close(1001, "goodbye")
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, "goodbye")
@gen_test
def test_write_after_close(self):
ws = yield self.ws_connect("/close_reason")
msg = yield ws.read_message()
self.assertIs(msg, None)
with self.assertRaises(WebSocketClosedError):
ws.write_message("hello")
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect("/async_prepare")
ws.write_message("hello")
res = yield ws.read_message()
self.assertEqual(res, "hello")
@gen_test
def test_path_args(self):
ws = yield self.ws_connect("/path_args/hello")
res = yield ws.read_message()
self.assertEqual(res, "hello")
@gen_test
def test_coroutine(self):
ws = yield self.ws_connect("/coroutine")
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message("hello1")
yield ws.write_message("hello2")
res = yield ws.read_message()
self.assertEqual(res, "hello1")
res = yield ws.read_message()
self.assertEqual(res, "hello2")
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "http://127.0.0.1:%d" % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers))
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "http://127.0.0.1:%d/something" % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers))
ws.write_message("hello")
response = yield ws.read_message()
self.assertEqual(response, "hello")
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
headers = {"Origin": "127.0.0.1:%d" % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = "ws://127.0.0.1:%d/echo" % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {"Origin": "http://somewhereelse.com"}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = "ws://localhost:%d/echo" % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {"Origin": "http://subtenant.localhost"}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers))
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_subprotocols(self):
ws = yield self.ws_connect(
"/subprotocol", subprotocols=["badproto", "goodproto"]
)
self.assertEqual(ws.selected_subprotocol, "goodproto")
res = yield ws.read_message()
self.assertEqual(res, "subprotocol=goodproto")
@gen_test
def test_subprotocols_not_offered(self):
ws = yield self.ws_connect("/subprotocol")
self.assertIs(ws.selected_subprotocol, None)
res = yield ws.read_message()
self.assertEqual(res, "subprotocol=None")
@gen_test
def test_open_coroutine(self):
self.message_sent = Event()
ws = yield self.ws_connect("/open_coroutine")
yield ws.write_message("hello")
self.message_sent.set()
res = yield ws.read_message()
self.assertEqual(res, "ok")
class NativeCoroutineOnMessageHandler(TestWebSocketHandler):
def initialize(self, **kwargs):
super().initialize(**kwargs)
self.sleeping = 0
async def on_message(self, message):
if self.sleeping > 0:
self.write_message("another coroutine is already sleeping")
self.sleeping += 1
await gen.sleep(0.01)
self.sleeping -= 1
self.write_message(message)
class WebSocketNativeCoroutineTest(WebSocketBaseTestCase):
def get_app(self):
return Application([("/native", NativeCoroutineOnMessageHandler)])
@gen_test
def test_native_coroutine(self):
ws = yield self.ws_connect("/native")
# Send both messages immediately, coroutine must process one at a time.
yield ws.write_message("hello1")
yield ws.write_message("hello2")
res = yield ws.read_message()
self.assertEqual(res, "hello1")
res = yield ws.read_message()
self.assertEqual(res, "hello2")
class CompressionTestMixin(object):
MESSAGE = "Hello world. Testing 123 123"
def get_app(self):
class LimitedHandler(TestWebSocketHandler):
@property
def max_message_size(self):
return 1024
def on_message(self, message):
self.write_message(str(len(message)))
return Application(
[
(
"/echo",
EchoHandler,
dict(compression_options=self.get_server_compression_options()),
),
(
"/limited",
LimitedHandler,
dict(compression_options=self.get_server_compression_options()),
),
]
)
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
"/echo", compression_options=self.get_client_compression_options()
)
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in, ws.protocol._wire_bytes_out)
@gen_test
def test_size_limit(self):
ws = yield self.ws_connect(
"/limited", compression_options=self.get_client_compression_options()
)
# Small messages pass through.
ws.write_message("a" * 128)
response = yield ws.read_message()
self.assertEqual(response, "128")
# This message is too big after decompression, but it compresses
# down to a size that will pass the initial checks.
ws.write_message("a" * 2048)
response = yield ws.read_message()
self.assertIsNone(response)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b"abcd", b""), b"")
self.assertEqual(self.mask(b"abcd", b"b"), b"\x03")
self.assertEqual(self.mask(b"abcd", b"54321"), b"TVPVP")
self.assertEqual(self.mask(b"ZXCV", b"98765432"), b"c`t`olpd")
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(
self.mask(b"\x00\x01\x02\x03", b"\xff\xfb\xfd\xfc\xfe\xfa"),
b"\xff\xfa\xff\xff\xfe\xfb",
)
self.assertEqual(
self.mask(b"\xff\xfb\xfd\xfc", b"\x00\x01\x02\x03\x04\x05"),
b"\xff\xfa\xff\xff\xfb\xfe",
)
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
class ServerPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_pong(self, data):
self.write_message("got pong")
return Application([("/", PingHandler)], websocket_ping_interval=0.01)
@gen_test
def test_server_ping(self):
ws = yield self.ws_connect("/")
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got pong")
# TODO: test that the connection gets closed if ping responses stop.
class ClientPeriodicPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_ping(self, data):
self.write_message("got ping")
return Application([("/", PingHandler)])
@gen_test
def test_client_ping(self):
ws = yield self.ws_connect("/", ping_interval=0.01)
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got ping")
# TODO: test that the connection gets closed if ping responses stop.
class ManualPingTest(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_ping(self, data):
self.write_message(data, binary=isinstance(data, bytes))
return Application([("/", PingHandler)])
@gen_test
def test_manual_ping(self):
ws = yield self.ws_connect("/")
self.assertRaises(ValueError, ws.ping, "a" * 126)
ws.ping("hello")
resp = yield ws.read_message()
# on_ping always sees bytes.
self.assertEqual(resp, b"hello")
ws.ping(b"binary hello")
resp = yield ws.read_message()
self.assertEqual(resp, b"binary hello")
class MaxMessageSizeTest(WebSocketBaseTestCase):
def get_app(self):
return Application([("/", EchoHandler)], websocket_max_message_size=1024)
@gen_test
def test_large_message(self):
ws = yield self.ws_connect("/")
# Write a message that is allowed.
msg = "a" * 1024
ws.write_message(msg)
resp = yield ws.read_message()
self.assertEqual(resp, msg)
# Write a message that is too large.
ws.write_message(msg + "b")
resp = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(resp, None)
self.assertEqual(ws.close_code, 1009)
self.assertEqual(ws.close_reason, "message too big")
# TODO: Needs tests of messages split over multiple
# continuation frames.
|
py
|
1a5a27b5cb7e1dff3bda3187a5cf81dd81d37b92
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the qubit parameter-shift QubitParamShiftTape"""
import pytest
import numpy as np
import pennylane as qml
from pennylane import numpy as pnp
from pennylane.tape import QubitParamShiftTape
from pennylane.tape.qubit_param_shift import _get_operation_recipe
from pennylane.operation import (
Operation,
OperatorPropertyUndefined,
ParameterFrequenciesUndefinedError,
)
class TestGetOperationRecipe:
"""Tests special cases for the _get_operation_recipe
copy in qubit_param_shift.py, the original is in
gradients/parameter_shift.py
"""
class DummyOp0(Operation):
num_params = 1
num_wires = 1
grad_recipe = (None,)
class DummyOp1(Operation):
num_params = 1
num_wires = 1
grad_recipe = (None,)
@property
def parameter_frequencies(self):
raise ParameterFrequenciesUndefinedError
@pytest.mark.parametrize("Op", [DummyOp0, DummyOp1])
def test_error_no_grad_info(self, Op):
op = Op(0.1, wires=0)
with pytest.raises(
OperatorPropertyUndefined,
match=f"The operation {op.name} does not have a grad_recipe,",
):
_get_operation_recipe(op, 0, None)
class TestGradMethod:
"""Tests for parameter gradient methods"""
def test_non_differentiable(self):
"""Test that a non-differentiable parameter is
correctly marked"""
psi = np.array([1, 0, 1, 0]) / np.sqrt(2)
with QubitParamShiftTape() as tape:
qml.QubitStateVector(psi, wires=[0, 1])
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0, 1])
assert tape._grad_method(0) is None
assert tape._grad_method(1) == "A"
assert tape._grad_method(2) == "A"
tape._update_gradient_info()
assert tape._par_info[0]["grad_method"] is None
assert tape._par_info[1]["grad_method"] == "A"
assert tape._par_info[2]["grad_method"] == "A"
def test_independent(self):
"""Test that an independent variable is properly marked
as having a zero gradient"""
with QubitParamShiftTape() as tape:
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.expval(qml.PauliY(0))
assert tape._grad_method(0) == "A"
assert tape._grad_method(1) == "0"
tape._update_gradient_info()
assert tape._par_info[0]["grad_method"] == "A"
assert tape._par_info[1]["grad_method"] == "0"
# in non-graph mode, it is impossible to determine
# if a parameter is independent or not
tape._graph = None
assert tape._grad_method(1, use_graph=False) == "A"
def test_finite_diff(self, monkeypatch):
"""If an op has grad_method=F, this should be respected
by the QubitParamShiftTape"""
monkeypatch.setattr(qml.RX, "grad_method", "F")
psi = np.array([1, 0, 1, 0]) / np.sqrt(2)
with QubitParamShiftTape() as tape:
qml.QubitStateVector(psi, wires=[0, 1])
qml.RX(0.543, wires=[0])
qml.RY(-0.654, wires=[1])
qml.CNOT(wires=[0, 1])
qml.probs(wires=[0, 1])
assert tape._grad_method(0) is None
assert tape._grad_method(1) == "F"
assert tape._grad_method(2) == "A"
def test_specs_num_parameter_shift_executions():
"""Tests specs has the correct number of parameter-shift executions"""
dev = qml.device("default.qubit", wires=3)
x = 0.543
y = -0.654
with qml.tape.QubitParamShiftTape() as tape:
qml.CRX(x, wires=[0, 1])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.RY(0.12345, wires=2)
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
num_exec = tape.specs["num_parameter_shift_executions"]
assert num_exec == 7
jac = tape.jacobian(dev)
assert num_exec == (dev.num_executions + 1)
class TestParameterShiftRule:
"""Tests for the parameter shift implementation"""
@pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, 2 * np.pi, 7))
@pytest.mark.parametrize("shift", [np.pi / 2, 0.3, np.sqrt(2)])
@pytest.mark.parametrize("G", [qml.RX, qml.RY, qml.RZ, qml.PhaseShift])
def test_pauli_rotation_gradient(self, mocker, G, theta, shift, tol):
"""Tests that the automatic gradients of Pauli rotations are correct."""
spy = mocker.spy(QubitParamShiftTape, "parameter_shift")
dev = qml.device("default.qubit", wires=1)
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
G(theta, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1}
autograd_val = tape.jacobian(dev, shift=shift, method="analytic")
manualgrad_val = (
tape.execute(dev, params=[theta + np.pi / 2])
- tape.execute(dev, params=[theta - np.pi / 2])
) / 2
assert np.allclose(autograd_val, manualgrad_val, atol=tol, rtol=0)
assert spy.call_args[1]["shift"] == shift
# compare to finite differences
numeric_val = tape.jacobian(dev, shift=shift, method="numeric")
assert np.allclose(autograd_val, numeric_val, atol=tol, rtol=0)
@pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, 2 * np.pi, 7))
@pytest.mark.parametrize("shift", [np.pi / 2, 0.3, np.sqrt(2)])
def test_Rot_gradient(self, mocker, theta, shift, tol):
"""Tests that the automatic gradient of a arbitrary Euler-angle-parameterized gate is correct."""
spy = mocker.spy(QubitParamShiftTape, "parameter_shift")
dev = qml.device("default.qubit", wires=1)
params = np.array([theta, theta**3, np.sqrt(2) * theta])
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
qml.Rot(*params, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2, 3}
autograd_val = tape.jacobian(dev, shift=shift, method="analytic")
manualgrad_val = np.zeros_like(autograd_val)
for idx in list(np.ndindex(*params.shape)):
s = np.zeros_like(params)
s[idx] += np.pi / 2
forward = tape.execute(dev, params=params + s)
backward = tape.execute(dev, params=params - s)
manualgrad_val[0, idx] = (forward - backward) / 2
assert np.allclose(autograd_val, manualgrad_val, atol=tol, rtol=0)
assert spy.call_args[1]["shift"] == shift
# compare to finite differences
numeric_val = tape.jacobian(dev, shift=shift, method="numeric")
assert np.allclose(autograd_val, numeric_val, atol=tol, rtol=0)
@pytest.mark.parametrize("G", [qml.CRX, qml.CRY, qml.CRZ])
def test_controlled_rotation_gradient(self, G, tol):
"""Test gradient of controlled rotation gates"""
dev = qml.device("default.qubit", wires=2)
b = 0.123
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
G(b, wires=[0, 1])
qml.expval(qml.PauliX(0))
tape.trainable_params = {1}
res = tape.execute(dev)
assert np.allclose(res, -np.cos(b / 2), atol=tol, rtol=0)
grad = tape.jacobian(dev, method="analytic")
expected = np.sin(b / 2) / 2
assert np.allclose(grad, expected, atol=tol, rtol=0)
# compare to finite differences
numeric_val = tape.jacobian(dev, method="numeric")
assert np.allclose(grad, numeric_val, atol=tol, rtol=0)
@pytest.mark.parametrize("theta", np.linspace(-2 * np.pi, np.pi, 7))
def test_CRot_gradient(self, mocker, theta, tol):
"""Tests that the automatic gradient of an arbitrary controlled Euler-angle-parameterized
gate is correct."""
spy = mocker.spy(QubitParamShiftTape, "parameter_shift")
dev = qml.device("default.qubit", wires=2)
a, b, c = np.array([theta, theta**3, np.sqrt(2) * theta])
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
qml.CRot(a, b, c, wires=[0, 1])
qml.expval(qml.PauliX(0))
tape.trainable_params = {1, 2, 3}
res = tape.execute(dev)
expected = -np.cos(b / 2) * np.cos(0.5 * (a + c))
assert np.allclose(res, expected, atol=tol, rtol=0)
grad = tape.jacobian(dev, method="analytic")
expected = np.array(
[
[
0.5 * np.cos(b / 2) * np.sin(0.5 * (a + c)),
0.5 * np.sin(b / 2) * np.cos(0.5 * (a + c)),
0.5 * np.cos(b / 2) * np.sin(0.5 * (a + c)),
]
]
)
assert np.allclose(grad, expected, atol=tol, rtol=0)
# compare to finite differences
numeric_val = tape.jacobian(dev, method="numeric")
assert np.allclose(grad, numeric_val, atol=tol, rtol=0)
def test_gradients_agree_finite_differences(self, mocker, tol):
"""Tests that the parameter-shift rule agrees with the first and second
order finite differences"""
params = np.array([0.1, -1.6, np.pi / 5])
with QubitParamShiftTape() as tape:
qml.RX(params[0], wires=[0])
qml.CNOT(wires=[0, 1])
qml.RY(-1.6, wires=[0])
qml.RY(params[1], wires=[1])
qml.CNOT(wires=[1, 0])
qml.RX(params[2], wires=[0])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {0, 2, 3}
dev = qml.device("default.qubit", wires=2)
spy_numeric = mocker.spy(tape, "numeric_pd")
spy_analytic = mocker.spy(tape, "analytic_pd")
grad_F1 = tape.jacobian(dev, method="numeric", order=1)
grad_F2 = tape.jacobian(dev, method="numeric", order=2)
spy_numeric.assert_called()
spy_analytic.assert_not_called()
grad_A = tape.jacobian(dev, method="analytic")
spy_analytic.assert_called()
# gradients computed with different methods must agree
assert np.allclose(grad_A, grad_F1, atol=tol, rtol=0)
assert np.allclose(grad_A, grad_F2, atol=tol, rtol=0)
def test_variance_gradients_agree_finite_differences(self, mocker, tol):
"""Tests that the variance parameter-shift rule agrees with the first and second
order finite differences"""
params = np.array([0.1, -1.6, np.pi / 5])
with QubitParamShiftTape() as tape:
qml.RX(params[0], wires=[0])
qml.CNOT(wires=[0, 1])
qml.RY(-1.6, wires=[0])
qml.RY(params[1], wires=[1])
qml.CNOT(wires=[1, 0])
qml.RX(params[2], wires=[0])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0)), qml.var(qml.PauliX(1))
tape.trainable_params = {0, 2, 3}
dev = qml.device("default.qubit", wires=2)
spy_numeric = mocker.spy(tape, "numeric_pd")
spy_analytic = mocker.spy(tape, "analytic_pd")
grad_F1 = tape.jacobian(dev, method="numeric", order=1)
grad_F2 = tape.jacobian(dev, method="numeric", order=2)
spy_numeric.assert_called()
spy_analytic.assert_not_called()
grad_A = tape.jacobian(dev, method="analytic")
spy_analytic.assert_called()
# gradients computed with different methods must agree
assert np.allclose(grad_A, grad_F1, atol=tol, rtol=0)
assert np.allclose(grad_A, grad_F2, atol=tol, rtol=0)
def test_processing_function_torch(self, mocker, tol):
"""Tests the processing function that is created when using the
parameter_shift method returns a numpy array.
This is a unit test specifically aimed at checking an edge case
discovered related to default.qubit.torch.
"""
torch = pytest.importorskip("torch")
results = [
torch.tensor([0.4342], dtype=torch.float64),
torch.tensor([-0.4342], dtype=torch.float64),
]
theta = torch.tensor(0.543, dtype=torch.float64)
phi = torch.tensor(-0.234, dtype=torch.float64)
pars = torch.tensor([theta, phi], dtype=torch.float64)
with qml.tape.QubitParamShiftTape() as tape:
qml.RY(pars[0], wires=[0])
qml.RX(pars[1], wires=[0])
qml.expval(qml.PauliZ(0))
tapes, func = tape.parameter_shift(0, pars)
assert type(func(results)) == np.ndarray
class TestJacobianIntegration:
"""Tests for general Jacobian integration"""
def test_single_expectation_value(self, tol):
"""Tests correct output shape and evaluation for a tape
with a single expval output"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with QubitParamShiftTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0) @ qml.PauliX(1))
res = tape.jacobian(dev, method="analytic")
assert res.shape == (1, 2)
expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_multiple_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with multiple expval outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with QubitParamShiftTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.expval(qml.PauliX(1))
res = tape.jacobian(dev, method="analytic")
assert res.shape == (2, 2)
expected = np.array([[-np.sin(x), 0], [0, np.cos(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_var_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with expval and var outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with QubitParamShiftTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.var(qml.PauliX(1))
res = tape.jacobian(dev, method="analytic")
assert res.shape == (2, 2)
expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_prob_expectation_values(self, tol):
"""Tests correct output shape and evaluation for a tape
with prob and expval outputs"""
dev = qml.device("default.qubit", wires=2)
x = 0.543
y = -0.654
with QubitParamShiftTape() as tape:
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
qml.probs(wires=[0, 1])
res = tape.jacobian(dev, method="analytic")
assert res.shape == (5, 2)
expected = (
np.array(
[
[-2 * np.sin(x), 0],
[
-(np.cos(y / 2) ** 2 * np.sin(x)),
-(np.cos(x / 2) ** 2 * np.sin(y)),
],
[
-(np.sin(x) * np.sin(y / 2) ** 2),
(np.cos(x / 2) ** 2 * np.sin(y)),
],
[
(np.sin(x) * np.sin(y / 2) ** 2),
(np.sin(x / 2) ** 2 * np.sin(y)),
],
[
(np.cos(y / 2) ** 2 * np.sin(x)),
-(np.sin(x / 2) ** 2 * np.sin(y)),
],
]
)
/ 2
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_involutory_variance(self, mocker, tol):
"""Tests qubit observable that are involutory"""
dev = qml.device("default.qubit", wires=1)
a = 0.54
spy_analytic_var = mocker.spy(QubitParamShiftTape, "parameter_shift_var")
spy_numeric = mocker.spy(QubitParamShiftTape, "numeric_pd")
spy_execute = mocker.spy(dev, "execute")
with QubitParamShiftTape() as tape:
qml.RX(a, wires=0)
qml.var(qml.PauliZ(0))
res = tape.execute(dev)
expected = 1 - np.cos(a) ** 2
assert np.allclose(res, expected, atol=tol, rtol=0)
spy_execute.call_args_list = []
# circuit jacobians
gradA = tape.jacobian(dev, method="analytic")
spy_analytic_var.assert_called()
spy_numeric.assert_not_called()
assert len(spy_execute.call_args_list) == 1 + 2 * 1
spy_execute.call_args_list = []
gradF = tape.jacobian(dev, method="numeric")
spy_numeric.assert_called()
assert len(spy_execute.call_args_list) == 2
expected = 2 * np.sin(a) * np.cos(a)
assert gradF == pytest.approx(expected, abs=tol)
assert gradA == pytest.approx(expected, abs=tol)
def test_non_involutory_variance(self, mocker, tol):
"""Tests a qubit Hermitian observable that is not involutory"""
dev = qml.device("default.qubit", wires=1)
A = np.array([[4, -1 + 6j], [-1 - 6j, 2]])
a = 0.54
spy_analytic_var = mocker.spy(QubitParamShiftTape, "parameter_shift_var")
spy_numeric = mocker.spy(QubitParamShiftTape, "numeric_pd")
spy_execute = mocker.spy(dev, "execute")
with QubitParamShiftTape() as tape:
qml.RX(a, wires=0)
qml.var(qml.Hermitian(A, 0))
tape.trainable_params = {0}
res = tape.execute(dev)
expected = (39 / 2) - 6 * np.sin(2 * a) + (35 / 2) * np.cos(2 * a)
assert np.allclose(res, expected, atol=tol, rtol=0)
spy_execute.call_args_list = []
# circuit jacobians
gradA = tape.jacobian(dev, method="analytic")
spy_analytic_var.assert_called()
spy_numeric.assert_not_called()
assert len(spy_execute.call_args_list) == 1 + 4 * 1
spy_execute.call_args_list = []
gradF = tape.jacobian(dev, method="numeric")
spy_numeric.assert_called()
assert len(spy_execute.call_args_list) == 2
expected = -35 * np.sin(2 * a) - 12 * np.cos(2 * a)
assert gradA == pytest.approx(expected, abs=tol)
assert gradF == pytest.approx(expected, abs=tol)
def test_involutory_and_noninvolutory_variance(self, mocker, tol):
"""Tests a qubit Hermitian observable that is not involutory alongside
a involutory observable."""
dev = qml.device("default.qubit", wires=2)
A = np.array([[4, -1 + 6j], [-1 - 6j, 2]])
a = 0.54
spy_analytic_var = mocker.spy(QubitParamShiftTape, "parameter_shift_var")
spy_numeric = mocker.spy(QubitParamShiftTape, "numeric_pd")
spy_execute = mocker.spy(dev, "execute")
with QubitParamShiftTape() as tape:
qml.RX(a, wires=0)
qml.RX(a, wires=1)
qml.var(qml.PauliZ(0))
qml.var(qml.Hermitian(A, 1))
tape.trainable_params = {0, 1}
res = tape.execute(dev)
expected = [1 - np.cos(a) ** 2, (39 / 2) - 6 * np.sin(2 * a) + (35 / 2) * np.cos(2 * a)]
assert np.allclose(res, expected, atol=tol, rtol=0)
spy_execute.call_args_list = []
# circuit jacobians
gradA = tape.jacobian(dev, method="analytic")
spy_analytic_var.assert_called()
spy_numeric.assert_not_called()
assert len(spy_execute.call_args_list) == 1 + 2 * 4
spy_execute.call_args_list = []
gradF = tape.jacobian(dev, method="numeric")
spy_numeric.assert_called()
assert len(spy_execute.call_args_list) == 1 + 2
expected = [2 * np.sin(a) * np.cos(a), -35 * np.sin(2 * a) - 12 * np.cos(2 * a)]
assert np.diag(gradA) == pytest.approx(expected, abs=tol)
assert np.diag(gradF) == pytest.approx(expected, abs=tol)
def test_expval_and_variance(self, tol):
"""Test that the qnode works for a combination of expectation
values and variances"""
dev = qml.device("default.qubit", wires=3)
a = 0.54
b = -0.423
c = 0.123
with QubitParamShiftTape() as tape:
qml.RX(a, wires=0)
qml.RY(b, wires=1)
qml.CNOT(wires=[1, 2])
qml.RX(c, wires=2)
qml.CNOT(wires=[0, 1])
qml.var(qml.PauliZ(0))
qml.expval(qml.PauliZ(1))
qml.var(qml.PauliZ(2))
res = tape.execute(dev)
expected = np.array(
[
np.sin(a) ** 2,
np.cos(a) * np.cos(b),
0.25 * (3 - 2 * np.cos(b) ** 2 * np.cos(2 * c) - np.cos(2 * b)),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
# # circuit jacobians
gradA = tape.jacobian(dev, method="analytic")
gradF = tape.jacobian(dev, method="numeric")
expected = np.array(
[
[2 * np.cos(a) * np.sin(a), -np.cos(b) * np.sin(a), 0],
[
0,
-np.cos(a) * np.sin(b),
0.5 * (2 * np.cos(b) * np.cos(2 * c) * np.sin(b) + np.sin(2 * b)),
],
[0, 0, np.cos(b) ** 2 * np.sin(2 * c)],
]
).T
assert gradA == pytest.approx(expected, abs=tol)
assert gradF == pytest.approx(expected, abs=tol)
class TestHessian:
"""Tests for parameter Hessian method"""
@pytest.mark.parametrize("s1", [np.pi / 2, np.pi / 4, 2])
@pytest.mark.parametrize("s2", [np.pi / 2, np.pi / 4, 3])
@pytest.mark.parametrize("G", [qml.RX, qml.RY, qml.RZ, qml.PhaseShift])
def test_pauli_rotation_hessian(self, s1, s2, G, tol):
"""Tests that the automatic Hessian of Pauli rotations are correct."""
theta = np.array([0.234, 2.443])
dev = qml.device("default.qubit", wires=2)
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0, 1.0, -1.0]) / np.sqrt(4), wires=[0, 1])
G(theta[0], wires=[0])
G(theta[1], wires=[1])
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {1, 2}
autograd_val = tape.hessian(dev, s1=s1, s2=s2)
assert autograd_val.shape == (len(theta), len(theta))
shift = np.eye(len(theta))
manualgrad_val = np.zeros((len(theta), len(theta)))
for i in range(len(theta)):
for j in range(len(theta)):
manualgrad_val[i, j] = (
tape.execute(dev, params=theta + s1 * shift[i] + s2 * shift[j])
- tape.execute(dev, params=theta - s1 * shift[i] + s2 * shift[j])
- tape.execute(dev, params=theta + s1 * shift[i] - s2 * shift[j])
+ tape.execute(dev, params=theta - s1 * shift[i] - s2 * shift[j])
) / (4 * np.sin(s1) * np.sin(s2))
assert np.allclose(autograd_val, manualgrad_val, atol=tol, rtol=0)
def test_vector_output(self, tol):
"""Tests that a vector valued output tape has a hessian with the proper result."""
dev = qml.device("default.qubit", wires=1)
x = np.array([1.0, 2.0])
with QubitParamShiftTape() as tape:
qml.RY(x[0], wires=0)
qml.RX(x[1], wires=0)
qml.probs(wires=[0])
hess = tape.hessian(dev)
expected_hess = expected_hess = np.array(
[
[
[-0.5 * np.cos(x[0]) * np.cos(x[1]), 0.5 * np.cos(x[0]) * np.cos(x[1])],
[0.5 * np.sin(x[0]) * np.sin(x[1]), -0.5 * np.sin(x[0]) * np.sin(x[1])],
],
[
[0.5 * np.sin(x[0]) * np.sin(x[1]), -0.5 * np.sin(x[0]) * np.sin(x[1])],
[-0.5 * np.cos(x[0]) * np.cos(x[1]), 0.5 * np.cos(x[0]) * np.cos(x[1])],
],
]
)
assert np.allclose(hess, expected_hess, atol=tol, rtol=0)
def test_no_trainable_params_hessian(self):
"""Test that an empty Hessian is returned when there are no trainable
parameters."""
dev = qml.device("default.qubit", wires=1)
with QubitParamShiftTape() as tape:
qml.RX(0.224, wires=[0])
qml.expval(qml.PauliZ(0))
tape.trainable_params = {}
hessian = tape.hessian(dev)
assert hessian.shape == (0, 0)
@pytest.mark.parametrize("G", [qml.CRX, qml.CRY, qml.CRZ])
def test_controlled_rotation_error(self, G, tol):
"""Test that attempting to perform the parameter-shift rule on the controlled rotation gates
results in an error."""
dev = qml.device("default.qubit", wires=2)
b = 0.123
with QubitParamShiftTape() as tape:
qml.QubitStateVector(np.array([1.0, -1.0]) / np.sqrt(2), wires=0)
G(b, wires=[0, 1])
qml.expval(qml.PauliX(0))
tape.trainable_params = {1}
res = tape.execute(dev)
assert np.allclose(res, -np.cos(b / 2), atol=tol, rtol=0)
with pytest.raises(ValueError, match="not supported for the parameter-shift Hessian"):
tape.hessian(dev, method="analytic")
@pytest.mark.parametrize("G", [qml.CRX, qml.CRY, qml.CRZ])
def test_controlled_rotation_second_derivative(self, G, tol):
"""Test that the controlled rotation gates return the correct
second derivative if first decomposed."""
dev = qml.device("default.qubit", wires=2)
init_state = qml.numpy.array([1.0, -1.0], requires_grad=False) / np.sqrt(2)
@qml.qnode(dev)
def circuit(b):
qml.QubitStateVector(init_state, wires=0)
G(b, wires=[0, 1])
return qml.expval(qml.PauliX(0))
b = pnp.array(0.123, requires_grad=True)
res = circuit(b)
assert np.allclose(res, -np.cos(b / 2), atol=tol, rtol=0)
grad = qml.grad(qml.grad(circuit))(b)
expected = np.cos(b / 2) / 4
assert np.allclose(grad, expected, atol=tol, rtol=0)
def test_non_differentiable_controlled_rotation(self, tol):
"""Tests that a non-differentiable controlled operation does not affect
the Hessian computation."""
dev = qml.device("default.qubit", wires=2)
x = 0.6
with QubitParamShiftTape() as tape:
qml.RY(x, wires=0)
qml.CRY(np.pi / 2, wires=[0, 1])
qml.expval(qml.PauliX(0))
tape.trainable_params = {0}
hess = tape.hessian(dev)
expected_hess = np.array([-np.sin(x) / np.sqrt(2)])
assert np.allclose(hess, expected_hess, atol=tol, rtol=0)
|
py
|
1a5a281e7f45f1c9c7ef4f7836be9a3116fa0cd2
|
import os
import random
import argparse
import traceback
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from tokenizers import SentencePieceBPETokenizer
from transformers import GPT2Config, GPT2LMHeadModel
from tqdm import tqdm
from data import CustomDataset, dynamic_padding_collate_fn, load_dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=-1)
parser.add_argument('--batch_size', type=int, default=-1)
parser.add_argument('--save', type=str, default='./checkpoint/')
parser.add_argument('--load', type=str, default='./checkpoint/kogpt2_subject_epoch.ckpt')
parser.add_argument('--train_dataset', type=str, default='./dataset/none_train.json', required=True)
parser.add_argument('--valid_dataset', type=str, default='./dataset/none_valid.json')
args = parser.parse_args()
if args.epoch == -1:
args.epoch = 10
if args.batch_size == -1:
args.batch_size = 1
seed = random.randint(0, 2147483647)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"{device} is used for training")
subject = 'subject'
if args.load != './checkpoint/kogpt2_subject_epoch.ckpt':
checkpoint = torch.load(args.load, map_location=device)
subject = args.load.split('_')[1]
args.load = None
tokenizer = SentencePieceBPETokenizer.from_file(
vocab_filename="./tokenizer/tokenizers_vocab.json",
merges_filename="./tokenizer/tokenizers_merges.txt",
add_prefix_space=False
)
train_dataset = None
try:
pairs = load_dataset(args.train_dataset)
train_dataset = CustomDataset(pairs, tokenizer)
print("loading dataset succeeds")
except Exception as e:
print("loading dataset fails")
traceback.print_exc()
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=dynamic_padding_collate_fn)
if args.valid_dataset == './dataset/none_valid.json':
valid_flag = False
else:
valid_flag = True
if valid_flag:
valid_dataset = None
try:
pairs = load_dataset(args.valid_dataset)
valid_dataset = CustomDataset(pairs, tokenizer)
print("loading valid dataset succeeds")
except Exception as e:
print("loading valid dataset fails")
traceback.print_exc()
valid_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, collate_fn=dynamic_padding_collate_fn)
model = GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path="taeminlee/kogpt2")
if not args.load:
model.load_state_dict(checkpoint['model_state_dict'])
model.to(device)
model.eval()
loss = 0
epoch = 1
learning_rate = 3e-5
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
# criterion = torch.nn.CrossEntropyLoss()
if not args.load:
epoch = checkpoint['epoch']
# learning_rate = checkpoint['learning_rate']
# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
print("KoGPT2 Training Starts")
for epoch in range(epoch, args.epoch + 1):
best_epoch = 0
best_loss = 10000
average_train_loss = (0.0, 0.0)
model.train()
for step, batch in tqdm(enumerate(train_dataloader), desc=f"[TRAIN] Epoch: {epoch}", total=len(train_dataloader)):
optimizer.zero_grad()
input_ids, attention_mask, labels = tuple(value.to(device) for value in batch)
outputs = model.forward(input_ids, attention_mask=attention_mask, labels=labels, return_dict=True)
loss = outputs.loss.item()
outputs.loss.backward()
optimizer.step()
average_train_loss = (average_train_loss[0] * 0.99 + loss, average_train_loss[1] * 0.99 + 1)
if best_loss > average_train_loss[0] / average_train_loss[1]:
best_loss = average_train_loss[0] / average_train_loss[1]
best_epoch = epoch
if step % 10 == 0:
print("[Epoch {0}: {1}] Loss = {2:.5f} Average Train loss = {3:.5f}".format(epoch, step, loss, average_train_loss[0] / average_train_loss[1]))
# scheduler.step(average_loss[0] / average_loss[1])
print("[Epoch {0}] Best Epcoh {1} Best loss = {2:.5f}".format(epoch, best_epoch, best_loss))
if epoch % 2 == 0:
try:
if not os.path.exists(args.save):
os.mkdir(args.save)
torch.save({
'epoch': epoch,
# 'learning_rate': learning_rate,
'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict()
# 'scheduler_state_dict': scheduler.state_dict()
}, args.save + 'kogpt2_' + subject + '_' + str(epoch) + '.ckpt')
print("saving model succeeds")
except Exception as e:
traceback.print_exc()
print("saving model fails")
torch.save({
'epoch': epoch,
# 'learning_rate': learning_rate,
'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict()
# 'scheduler_state_dict': scheduler.state_dict()
}, args.save + 'kogpt2_' + subject + '_' + str(args.epoch + 1) + '.ckpt')
|
py
|
1a5a294f858dedbc0b93ecea4e53def47c66163c
|
from .storage import (
_fips_filter,
_from_db,
_fipstable,
store_ltdb,
store_ncdb,
store_census,
store_blocks_2000,
store_blocks_2010,
)
from .util import convert_gdf, get_lehd, adjust_inflation
|
py
|
1a5a2989139fc9e699200e62b9d455a77beec250
|
from .CCDefaults import getDefault, getLocality
from .CCConstants import YES, NO
from .solar import SolarPotential
from .naturalGas import NatGasFootprint
def EvalHotWaterAssessment(inputs):
#hot_water_assessment,water_heater_type,water_heater_age
explanation = "Didn't sign up for a hot water assessment."
locality = getLocality(inputs)
points = cost = savings = 0
if inputs.get('hot_water_assessment', YES) == YES:
co2_per_kwh = getDefault(locality,"elec_lbs_co2_per_kwh",0.75) # lbs CO2 per kwh
gallons_per_person = getDefault(locality,'water_hw_gal_per_person', 13.)
default_size = float(getDefault(locality,'general_household_size_default', 4.))
people = inputs.get("household_members",default_size)
daily_hw_use = people * gallons_per_person
wh_set_temp = getDefault(locality,'water_heater_settemp', 120.)
wh_input_water_temp = getDefault(locality,'water_input_temp', 50.)
water_deltaT = wh_set_temp - wh_input_water_temp
water_specific_heat = 8.34 # BTU/gal/degF
BTU_per_kwh = 3414
co2_per_btu = co2_per_kwh / BTU_per_kwh
# calculations for heat pump water heater
wh_efficiency = getDefault(locality,'water_heater_heatpump_efficiency', 3.)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_hp = btu * co2_per_btu
wh_type = inputs.get('water_heater_type','Not sure')
if wh_type == "Electric":
wh_efficiency = getDefault(locality,'water_heater_electric_efficiency', 0.9)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_btu
elif wh_type == "Propane":
co2_per_gal = getDefault(locality, "propane_co2_per_gallon", 12.7) # https://www.eia.gov/environment/emissions/co2_vol_mass.php
btu_per_gal = getDefault(locality, "propane_btu_per_gallon", 91333.) # https://www.eia.gov/energyexplained/units-and-calculators/
wh_efficiency = getDefault(locality,'water_heater_propane_efficiency', 0.7)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_gal / btu_per_gal
elif wh_type == "Fuel Oil":
co2_per_gal = getDefault(locality,"fueloil_co2_per_gallon", 22.4) # https://www.eia.gov/environment/emissions/co2_vol_mass.php
btu_per_gal = getDefault(locality,"fueloil_btu_per_gallon", 137619.) # https://www.eia.gov/energyexplained/units-and-calculators/
wh_efficiency = getDefault(locality,'water_heater_fueloil_efficiency', 0.55)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_gal / btu_per_gal
elif wh_type == "Nat Gas" or wh_type == "Not sure":
co2_per_therm = NatGasFootprint(locality)
btu_per_therm = 100000.
wh_efficiency = getDefault(locality,'water_heater_natgas_efficiency', 0.7)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_therm / btu_per_therm
else:
explanation = "No need for a hot water assessment with a %s water heater." % wh_type
return points, cost, savings, explanation
explanation = "Getting a hot water assessment could save money and emissions, depending on several factors."
conversion_rate = getDefault('locality', 'water_hw_assessment_conversion_rate', 0.2)
points = (co2_old - co2_hp) * conversion_rate
return points, cost, savings, explanation
def EvalHeatPumpWaterHeater(inputs):
#replace_water_heater,water_heater_type,water_heater_age
# using methods from Alan Whitney's spreadsheet
explanation = "Didn't choose to install a HP Water Heater."
locality = getLocality(inputs)
points = cost = savings = 0
if inputs.get('replace_water_heater', YES) == YES:
co2_per_kwh = getDefault(locality,"elec_lbs_co2_per_kwh",0.75) # lbs CO2 per kwh
kwh_price = getDefault(locality,"elec_price_per_kwh",0.2209) # Eversource current price
gallons_per_person = getDefault(locality,'water_hw_use_per_person', 13.)
default_size = float(getDefault(locality,'general_household_size_default', 4.))
people = inputs.get("household_members",default_size)
daily_hw_use = people * gallons_per_person
wh_set_temp = getDefault(locality,'water_heater_settemp', 120.)
wh_input_water_temp = getDefault(locality,'water_input_temp', 50.)
water_deltaT = wh_set_temp - wh_input_water_temp
water_specific_heat = 8.34 # BTU/gal/degF
BTU_per_kwh = 3414
co2_per_btu = co2_per_kwh / BTU_per_kwh
btu_price = kwh_price / BTU_per_kwh
# calculations for heat pump water heater
wh_efficiency = getDefault(locality,'water_heater_heatpump_efficiency', 2.5)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_hp = btu * co2_per_btu
cost_hp = btu * btu_price
wh_type = inputs.get('water_heater_type','Not sure')
if wh_type == "Electric":
wh_efficiency = getDefault(locality,'water_heater_electric_efficiency', 0.9)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_btu
cost_old = btu * btu_price
elif wh_type == "Propane":
gallon_price = getDefault(locality,"propane_price_per_gallon", 3.09)
co2_per_gal = getDefault(locality, "propane_co2_per_gallon", 12.7) # https://www.eia.gov/environment/emissions/co2_vol_mass.php
btu_per_gal = getDefault(locality, "propane_btu_per_gallon", 91333.) # https://www.eia.gov/energyexplained/units-and-calculators/
wh_efficiency = getDefault(locality,'water_heater_propane_efficiency', 0.7)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_gal / btu_per_gal
cost_old = btu * gallon_price / btu_per_gal
elif wh_type == "Fuel Oil":
gallon_price = getDefault(locality,"fueloil_price_per_gallon", 2.92)
co2_per_gal = getDefault(locality,"fueloil_co2_per_gallon", 22.4) # https://www.eia.gov/environment/emissions/co2_vol_mass.php
btu_per_gal = getDefault(locality,"fueloil_btu_per_gallon", 137619.) # https://www.eia.gov/energyexplained/units-and-calculators/
wh_efficiency = getDefault(locality,'water_heater_fueloil_efficiency', 0.55)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_gal / btu_per_gal
cost_old = btu * gallon_price / btu_per_gal
elif wh_type == "Nat Gas" or wh_type == "Not sure":
therm_price = getDefault(locality,"natgas_price_per_therm", 1.25)
co2_per_therm = NatGasFootprint(locality)
btu_per_therm = 100000.
wh_efficiency = getDefault(locality,'water_heater_natgas_efficiency', 0.7)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_therm / btu_per_therm
cost_old = btu * therm_price / btu_per_therm
else:
explanation = "Not recommended to replace %s water heater with heat pump." % wh_type
return points, cost, savings, explanation
points = co2_old - co2_hp
savings = cost_old - cost_hp
cost = getDefault(locality,'water_hpwh_installed_price', 2500.)
rebate = getDefault(locality, 'water_hpwh_rebate', 750.)
cost = cost - rebate
decent_payback = getDefault(locality,'general_decent_home_investment_payback',10.)
payback = int(cost/savings) + 1
if (payback < decent_payback and payback > 0):
explanation = "installing a heat pump water heater would pay back in about %d years and save %.1f tons of CO2 over 10 years." % (payback, points/200.)
else:
explanation = "installing a heat pump water heater could pay back in over %d years but save %.1f tons of CO2 over 10 years." % (decent_payback, points/200.)
return points, cost, savings, explanation
def EvalSolarHW(inputs):
#install_solar_hw,solar_potential
explanation = "Didn't choose to install solar HW."
locality = getLocality(inputs)
points = cost = savings = 0
if inputs.get('install_solar_hw', YES) == YES:
co2_per_kwh = getDefault(locality,"elec_lbs_co2_per_kwh",0.75) # lbs CO2 per kwh
kwh_price = getDefault(locality,"elec_price_per_kwh",0.2209) # Eversource current price
gallons_per_person = getDefault(locality,'water_hw_use_per_person', 13.)
default_size = float(getDefault(locality,'general_household_size_default', 4.))
people = inputs.get("household_members",default_size)
daily_hw_use = people * gallons_per_person
wh_set_temp = getDefault(locality,'water_heater_settemp', 120.)
wh_input_water_temp = getDefault(locality,'water_input_temp', 50.)
water_deltaT = wh_set_temp - wh_input_water_temp
water_specific_heat = 8.34 # BTU/gal/degF
BTU_per_kwh = 3414
co2_per_btu = co2_per_kwh / BTU_per_kwh
btu_price = kwh_price / BTU_per_kwh
# calculations for heat pump water heater
wh_efficiency = getDefault(locality,'water_heater_heatpump_efficiency', 2.5)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
wh_type = inputs.get('water_heater_type','Not sure')
potential = SolarPotential(inputs)
if potential<0.5:
explanation = "installing solar hot water doesn't make sense with your homes solar potential."
elif wh_type == "Heat pump" or wh_type == "Solar":
explanation = "Not recommended to replace %s water heater with solar hot water." % wh_type
else:
solar_fraction = getDefault(locality,'solar_wh_system_fraction',0.8)
# What are you replacing
wh_type = inputs.get('water_heater_type','Not sure')
if wh_type == "Electric":
wh_efficiency = getDefault(locality,'water_heater_electric_efficiency', 0.9)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_btu
cost_old = btu * btu_price
elif wh_type == "Propane":
gallon_price = getDefault(locality,"propane_price_per_gallon", 3.09)
co2_per_gal = getDefault(locality, "propane_co2_per_gallon", 12.7) # https://www.eia.gov/environment/emissions/co2_vol_mass.php
btu_per_gal = getDefault(locality, "propane_btu_per_gallon", 91333.) # https://www.eia.gov/energyexplained/units-and-calculators/
wh_efficiency = getDefault(locality,'water_heater_propane_efficiency', 0.7)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_gal / btu_per_gal
cost_old = btu * gallon_price / btu_per_gal
elif wh_type == "Fuel Oil":
gallon_price = getDefault(locality,"fueloil_price_per_gallon", 2.92)
co2_per_gal = getDefault(locality,"fueloil_co2_per_gallon", 22.4) # https://www.eia.gov/environment/emissions/co2_vol_mass.php
btu_per_gal = getDefault(locality,"fueloil_btu_per_gallon", 137619.) # https://www.eia.gov/energyexplained/units-and-calculators/
wh_efficiency = getDefault(locality,'water_heater_fueloil_efficiency', 0.55)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_gal / btu_per_gal
cost_old = btu * gallon_price / btu_per_gal
elif wh_type == "Nat Gas" or wh_type == "Not sure":
therm_price = getDefault(locality,"natgas_price_per_therm", 1.25)
co2_per_therm = NatGasFootprint(locality)
btu_per_therm = 100000
wh_efficiency = getDefault(locality,'water_heater_natgas_efficiency', 0.7)
btu = daily_hw_use * water_specific_heat * water_deltaT * 365/ wh_efficiency
co2_old = btu * co2_per_therm / btu_per_therm
cost_old = btu * therm_price / btu_per_therm
tax_credit = getDefault(locality,'solar_hw_federal_tax_credit',0.26) # for 2020
state_credit = getDefault(locality,'solar_hw_state_tax_credit',0.)
state_rebate = getDefault(locality,'solar_hw_state_rebate', 1000.)
utility_rebate = getDefault(locality,'solar_hw_utility_rebate', 0.)
points = solar_fraction * co2_old
savings = solar_fraction * cost_old
# too simplistic I think
system_cost = getDefault(locality,'solar_hw_system_average_cost', 9000.) / potential
cost = system_cost * (1 - tax_credit) * (1 - state_credit) - state_rebate - utility_rebate
decent_payback = getDefault(locality,'general_decent_home_investment_payback',10.)
payback = int(cost/savings) + 1
if (payback < decent_payback and payback > 0):
explanation = "installing solar hot water could pay back in about %d years and save %.1f tons of CO2 over 10 years." % (payback, points/200.)
else:
explanation = "installing solar hot water could pay back in over %d years but save %.1f tons of CO2 over 10 years." % (decent_payback, points/200.)
return points, cost, savings, explanation
|
py
|
1a5a2b6793ecaeb432ede498c9e080b7e75dab1f
|
"""
Current version: |version| (:ref:`changelog`)
This library is designed to simplify adaptive signal
processing tasks within python
(filtering, prediction, reconstruction, classification).
For code optimisation,
this library uses `Numpy <http://www.numpy.org/>`_ for array operations.
Also in this library is presented some new methods
for adaptive signal processing.
The library is designed to be used with datasets and also with
real-time measuring (sample-after-sample feeding).
.. toctree::
:maxdepth: 2
index
License
===============
This project is under `MIT License <https://en.wikipedia.org/wiki/MIT_License>`_.
Instalation
====================
With `pip <https://pypi.python.org/pypi/pip>`_ from terminal: ``$ pip install padasip``
Or download you can download the source codes from Github
(`link <https://github.com/matousc89/padasip>`_)
Tutorials
===============
All Padasip related tutorials are created as Jupyter notebooks. You can find
them in `Python Adaptive Signal Processing Handbook
<https://github.com/matousc89/Python-Adaptive-Signal-Processing-Handbook>`_.
The User Quide
=====================
If you need to know something what is not covered by tutorials,
check the complete documentation here
.. toctree::
:maxdepth: 2
:titlesonly:
sources/preprocess
sources/filters
sources/ann
sources/detection
sources/misc
Contact
=====================
By email: [email protected]
Changelog
======================
For informations about versions and updates see :ref:`changelog`.
Indices and tables
===========================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
"""
#from padasip.preprocess import
import padasip.ann
import padasip.filters
import padasip.preprocess
import padasip.misc
import padasip.detection
# back compatibility with v0.5
from padasip.preprocess.standardize import standardize
from padasip.preprocess.standardize_back import standardize_back
from padasip.preprocess.input_from_history import input_from_history
|
py
|
1a5a2b828f3e3a0fece41864e3e0ab6841d33429
|
import unittest
import numpy as np
from limix.core.covar.zkz import ZKZCov
from limix.utils.check_grad import mcheck_grad
import scipy as sp
class TestZKZ(unittest.TestCase):
def setUp(self):
np.random.seed()
print '\n\n\n'
print np.random.randn(1)
print '\n\n\n'
self._X = np.random.randn(10, 5)
tmp = np.random.randn(10, 20)
self.Kinship = tmp.dot(tmp.transpose())
self._cov = ZKZCov(self._X, self.Kinship, remove_diag=True)
def test_Kgrad(self):
def func(x, i):
self._cov.scale = x[i]
return self._cov.K()
def grad(x, i):
self._cov.scale = x[i]
return self._cov.K_grad_i(0)
x0 = np.array([self._cov.scale])
err = mcheck_grad(func, grad, x0)
np.testing.assert_almost_equal(err, 0, decimal=5)
def func(x, i):
self._cov.length = x[i]
return self._cov.K()
def grad(x, i):
self._cov.scale = x[i]
return self._cov.K_grad_i(1)
x0 = np.array([self._cov.scale])
err = mcheck_grad(func, grad, x0)
def test_penalty(self):
self._cov.setPenalty(10.,2.)
def func(x, i):
self._cov.scale = x[i]
return self._cov.K()
def grad(x, i):
self._cov.scale = x[i]
return self._cov.K_grad_i(0)
x0 = np.array([self._cov.scale])
err = mcheck_grad(func, grad, x0)
np.testing.assert_almost_equal(err, 0, decimal=5)
def func(x, i):
self._cov.length = x[i]
return self._cov.K()
def grad(x, i):
self._cov.scale = x[i]
return self._cov.K_grad_i(1)
x0 = np.array([self._cov.scale])
err = mcheck_grad(func, grad, x0)
if __name__ == '__main__':
unittest.main()
|
py
|
1a5a2cbb34ec852595e92be93e0e55a96d725d58
|
class NotDefinedYet:
@property
def ha(self) -> "NotDefinedYet":
"""
This property returns `self`.
It's fun because you can call it like `obj.ha.ha.ha.ha.ha.ha...`.
:return: self!
"""
return self
class ClassInitFunction:
def __init__(self, value: str, other=1) -> None:
"""
Initialize instance.
:param value: Value to store
:param int other: Other value with default
"""
self.value = value
self.other = other
class ClassWithFunction:
def thing(self, value: str, other=1) -> str:
"""
Concatenate a integer after a string.
:param value: Value to store
:param int other: Other value with default
:return: Concatenated result
"""
return f"{value}{other}"
|
py
|
1a5a2d4fb2d8e0e657f3e69a5cbe176bf8c30b48
|
#--------------------- Packages
import pandas as pd
import dash_table
#--------------------- Datatable
def datatable_asset(df):
"""Function to create a datatable which is used to return the tweets and sentiment."""
datatable = dash_table.DataTable(
id='typing_formatting_1',
data=df.to_dict('records'),
columns=[
{
'id': 'product_name',
'name': 'Auction',
'type': 'text'
},
{
'id': 'link',
'name': 'URL',
'type': 'text',
'presentation': 'markdown'
},
{
'id': 'user_feedback',
'name': 'Feedback',
'type': 'text'
},
{
'id': 'user_feedback_positive',
'name': 'Positive feedback (%)',
'type': 'text'
},
{
'id': 'price',
'name': 'Price ($)',
'type': 'numeric'
},
],
# Highlight Cells based on conditions - first, second, and third row
style_data_conditional=[
# Fix columnd widths
{'if': {'column_id': 'product_name'},
'width': '20%'},
{'if': {'column_id': 'link'},
'width': '20%'},
{'if': {'column_id': 'user_feedback'},
'width': '20%'},
{'if': {'column_id': 'user_feedback_positive'},
'width': '20%'},
{'if': {'column_id': 'price'},
'width': '20%'},
],
# Formatting the data/headers cells
style_cell={'backgroundColor': '#f7f7f7', 'font-family': 'helvetica',
'fontColor': '#000000', 'fontSize': 24,
'textAlign': 'center'
},
style_data={'border': '1px solid LightPink', 'font-size': 24,
'font-family': 'helvetica', 'whiteSpace': 'normal',
},
style_header={'border': '1px solid LightPink', 'font-size': 28,
'font-family': 'helvetica', 'textAlign': 'center',
'fontWeight': 'bold'
},
css=[{
'selector': '.dash-spreadsheet td div',
'rule': '''
line-height: 35px;
max-height: 35px; min-height: 35px; height: 35px;
display: block;
overflow-y: hidden;
''',
}, {'selector': 'table', 'rule': 'table-layout: fixed'}
],
tooltip_data=[{
column: {'value': str(value), 'type': 'markdown'}
for column, value in row.items()
}
for row in df.to_dict('rows')
],
tooltip_duration=None,
editable=True,
page_size=10,
filter_action="native",
sort_action="native",
sort_mode="multi",
column_selectable="single",
row_selectable="multi",
row_deletable=True,
selected_columns=[],
selected_rows=[],
page_action="native",
)
return datatable
|
py
|
1a5a2dfc69a19b80a5fc0a9cc4bd363fa4bf8370
|
from django.contrib import admin
# Register your models here.
from .models import Department, DepartmentImages
class DepartmentInline(admin.TabularInline):
model = DepartmentImages
@admin.register(Department)
class DepartmentAdmin(admin.ModelAdmin):
inlines = [
DepartmentInline,
]
|
py
|
1a5a2e12a81121521d9951547df03ee44b6c2043
|
# -*- coding:utf-8 -*-
# ! ./usr/bin/env python
# __author__ = 'zzp'
import cv2
import json
import glob
import numpy as np
from os.path import join
from os import listdir
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir',type=str, default='./GOT_10k', help='your got_10k data dir')
args = parser.parse_args()
got10k_base_path = args.dir
sub_sets = sorted({'train_data', 'val_data'})
got10k = []
for sub_set in sub_sets:
sub_set_base_path = join(got10k_base_path, sub_set)
for video_set in sorted(listdir(sub_set_base_path)):
videos = sorted(listdir(join(sub_set_base_path, video_set)))
s = []
for vi, video in enumerate(videos):
print('subset: {}, video_set: {}, video id: {:04d} / {:04d}'.format(sub_set, video_set, vi, len(videos)))
v = dict()
v['base_path'] = join(sub_set, video_set, video)
v['frame'] = []
video_base_path = join(sub_set_base_path, video_set, video)
gts_path = join(video_base_path, 'groundtruth.txt')
# gts_file = open(gts_path, 'r')
# gts = gts_file.readlines()
gts = np.loadtxt(open(gts_path, "rb"), delimiter=',')
# get image size
im_path = join(video_base_path, '00000001.jpg')
im = cv2.imread(im_path)
size = im.shape # height, width
frame_sz = [size[1], size[0]] # width,height
# get all im name
jpgs = sorted(glob.glob(join(video_base_path, '*.jpg')))
f = dict()
for idx, img_path in enumerate(jpgs):
f['frame_sz'] = frame_sz
f['img_path'] = img_path.split('/')[-1]
gt = gts[idx]
bbox = [int(g) for g in gt] # (x,y,w,h)
f['bbox'] = bbox
v['frame'].append(f.copy())
s.append(v)
got10k.append(s)
print('save json (raw got10k info), please wait 1 min~')
json.dump(got10k, open('got10k.json', 'w'), indent=4, sort_keys=True)
print('got10k.json has been saved in ./')
|
py
|
1a5a2ec630ac05b90d7d807866984f45771fc2d4
|
from functools import total_ordering
from typing import Dict, Union, Callable, Any
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority:
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events = []
self.static_events = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self):
return self.events
def __len__(self):
return len(self.events)
def add(self, event_name, static=False):
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self):
self.events_prev = {k: (v+1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type):
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types, callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type , True)
ret.append(event)
return ret
@total_ordering
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status,
alert_size,
alert_priority,
visual_alert,
audible_alert,
duration_sound: float,
duration_hud_alert: float,
duration_text: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_type = ""
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.alert_priority = alert_priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration_sound = duration_sound
self.duration_hud_alert = duration_hud_alert
self.duration_text = duration_text
self.start_time = 0.
self.alert_rate = alert_rate
self.creation_delay = creation_delay
# typecheck that enums are valid on startup
tst = car.CarControl.new_message()
tst.hudControl.visualAlert = self.visual_alert
def __str__(self) -> str:
return self.alert_text_1 + "/" + self.alert_text_2 + " " + str(self.alert_priority) + " " + str(
self.visual_alert) + " " + str(self.audible_alert)
def __gt__(self, alert2) -> bool:
return self.alert_priority > alert2.alert_priority
def __eq__(self, alert2) -> bool:
return self.alert_priority == alert2.alert_priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2, audible_alert=AudibleAlert.chimeError,
visual_alert=VisualAlert.none, duration_hud_alert=2.):
super().__init__("openpilot Unavailable", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
audible_alert, .4, duration_hud_alert, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.chimeWarningRepeat, .1, 2., 2.),
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2, alert_text_1="TAKE CONTROL IMMEDIATELY"):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.chimeWarningRepeat, 2.2, 3., 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert=True):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, 2.0, 0., 0.),
# ********** alert callback functions **********
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(round(CP.minSteerSpeed * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = "km/h" if metric else "mph"
return Alert(
"TAKE CONTROL",
"Steer Unavailable Below %d %s" % (speed, unit),
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.none, 0., 0.4, .3)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
speed = int(MIN_SPEED_FILTER * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH))
unit = "km/h" if metric else "mph"
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
"Drive Above %d %s" % (speed, unit),
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
gps_integrated = sm['health'].hwType in [log.HealthData.HwType.uno, log.HealthData.HwType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text, duration_hud_alert=0.)
def auto_lane_change_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool) -> Alert:
alc_timer = sm['pathPlan'].autoLaneChangeTimer
return Alert(
"Auto Lane Change starts in (%d)" % alc_timer,
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.steerRequired, AudibleAlert.none, 0., .1, .1, alert_rate=0.75)
EVENTS: Dict[int, Dict[str, Union[Alert, Callable[[Any, messaging.SubMaster, bool], Alert]]]] = {
# ********** events with no alerts **********
# ********** events only containing alerts displayed in all states **********
EventName.debugAlert: {
ET.PERMANENT: Alert(
"DEBUG ALERT",
"",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, .1, .1),
},
EventName.startup: {
ET.PERMANENT: Alert(
"Be ready to take over at any time",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupMaster: {
ET.PERMANENT: Alert(
"WARNING: This branch is not tested",
"Always keep hands on wheel and eyes on road",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupNoControl: {
ET.PERMANENT: Alert(
"Dashcam mode",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.startupNoCar: {
ET.PERMANENT: Alert(
"Dashcam mode for unsupported car",
"Always keep hands on wheel and eyes on road",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
EventName.invalidGiraffeToyota: {
ET.PERMANENT: Alert(
"Unsupported Giraffe Configuration",
"Visit comma.ai/tg",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.whitePandaUnsupported: {
ET.PERMANENT: Alert(
"White Panda Is No Longer Supported",
"Upgrade to comma two or black panda",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("White panda is no longer supported"),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: Alert(
"Stock LKAS is turned on",
"Turn off stock LKAS to engage",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.communityFeatureDisallowed: {
# LOW priority to overcome Cruise Error
ET.PERMANENT: Alert(
"Community Feature Detected",
"Enable Community Features in Developer Settings",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.carUnrecognized: {
ET.PERMANENT: Alert(
"Dashcam Mode",
"Car Unrecognized",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
},
EventName.stockFcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock FCW: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 1., 2., 2.),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.chimeWarningRepeat, 1., 2., 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"TAKE CONTROL",
"Lane Departure Detected",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 2., 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"openpilot will not brake while gas pressed",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .0, .0, .1, creation_delay=1.),
},
EventName.vehicleModelInvalid: {
ET.WARNING: Alert(
"Vehicle Parameter Identification Failed",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.steerRequired, AudibleAlert.none, .0, .0, .1),
},
EventName.steerTempUnavailableMute: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steering Temporarily Unavailable",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2, .2, .2),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"KEEP EYES ON ROAD: Driver Distracted",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"KEEP EYES ON ROAD",
"Driver Appears Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Was Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"TOUCH STEERING WHEEL: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"TOUCH STEERING WHEEL",
"Driver Is Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, .1, .1, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Was Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarningRepeat, .1, .1, .1),
},
EventName.driverMonitorLowAcc: {
ET.WARNING: Alert(
"CHECK DRIVER FACE VISIBILITY",
"Driver Monitor Model Output Uncertain",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .4, 0., 1.5),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Move",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lane",
"Monitor Other Vehicles",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .0, .1, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 2., 3.),
},
EventName.turningIndicatorOn: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steer Unavailable while Turning",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .0, .0, .2),
},
EventName.lkasButtonOff: {
ET.WARNING: Alert(
"lkasButtonOff",
"LKAS button off",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .1),
},
EventName.autoLaneChange: {
ET.WARNING: auto_lane_change_alert,
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.chimeEngage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Park Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed During Attempt",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Enable Adaptive Cruise"),
},
EventName.steerTempUnavailable: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Steering Temporarily Unavailable",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimeWarning1, .4, 2., 3.),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable",
duration_hud_alert=0.),
},
EventName.focusRecoverActive: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Attempting Refocus: Camera Focus Invalid",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimeWarning1, .4, 2., 3., creation_delay=3.1),
},
EventName.outOfSpace: {
ET.NO_ENTRY: NoEntryAlert("Out of Storage Space",
duration_hud_alert=0.),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: NoEntryAlert("Speed Too Low"),
},
EventName.neosUpdateRequired: {
ET.PERMANENT: Alert(
"NEOS Update Required",
"Please Wait for Update",
AlertStatus.normal, AlertSize.mid,
Priority.HIGHEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("NEOS Update Required"),
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: Alert(
"Speaker not found",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.SOFT_DISABLE: SoftDisableAlert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System overheated"),
},
EventName.wrongGear: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.chimeDisengage),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
EventName.calibrationInvalid: {
ET.PERMANENT: Alert(
"Calibration Invalid",
"Reposition Device and Recalibrate",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.SOFT_DISABLE: SoftDisableAlert("Calibration Invalid: Reposition Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Reposition Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.SOFT_DISABLE: SoftDisableAlert("Calibration in Progress"),
ET.PERMANENT: calibration_incomplete_alert,
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: SoftDisableAlert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: SoftDisableAlert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: SoftDisableAlert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: SoftDisableAlert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
EventName.commIssue: {
ET.SOFT_DISABLE: SoftDisableAlert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.radarCommIssue: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Communication Issue"),
ET.NO_ENTRY: NoEntryAlert("Radar Communication Issue",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.radarCanError: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: SoftDisableAlert("Radar Error: Restart the Car"),
ET.NO_ENTRY : NoEntryAlert("Radar Error: Restart the Car"),
},
EventName.modeldLagging: {
ET.SOFT_DISABLE: SoftDisableAlert("Driving model lagging"),
ET.NO_ENTRY : NoEntryAlert("Driving model lagging"),
},
EventName.posenetInvalid: {
ET.SOFT_DISABLE: SoftDisableAlert("Vision Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Vision Model Output Uncertain"),
},
EventName.deviceFalling: {
ET.SOFT_DISABLE: SoftDisableAlert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: SoftDisableAlert("Low Memory: Reboot Your Device"),
ET.PERMANENT: Alert(
"RAM Critically Low",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY : NoEntryAlert("Low Memory: Reboot Your Device",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.controlsFailed: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Failed"),
ET.NO_ENTRY: NoEntryAlert("Controls Failed"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: Alert(
"LKAS Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: Alert(
"Cruise Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.gasUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Gas Fault: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Gas Error: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, 0., 0., .2, creation_delay=0.5),
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: Alert(
"Harness Malfunction",
"Please Check Hardware",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
ET.NO_ENTRY : NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.chimeDisengage, .4, 2., 3.),
},
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Slow down to resume operation",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.chimeWarning2Repeat, 2.2, 3., 4.),
ET.NO_ENTRY: Alert(
"Speed Too High",
"Slow down to engage",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.chimeError, .4, 2., 3.),
},
EventName.internetConnectivityNeeded: {
ET.PERMANENT: Alert(
"Please connect to Internet",
"An Update Check Is Required to Engage",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Please Connect to Internet",
audible_alert=AudibleAlert.chimeDisengage),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: Alert(
"Cruise Fault: Restart the car to engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
}
|
py
|
1a5a30cf90b17aa86ed724f393c02991aa5f07db
|
"""This module defines custom management commands for the app admin."""
import asyncio
from asgiref.sync import sync_to_async
from typing import Dict, Optional, Union, List, Tuple
from decimal import Decimal
from django.core.management.base import BaseCommand
from django.db.models import Q
from stellar_sdk.exceptions import NotFoundError
from stellar_sdk.transaction import Transaction as HorizonTransaction
from stellar_sdk.transaction_envelope import TransactionEnvelope
from stellar_sdk.utils import from_xdr_amount
from stellar_sdk.xdr import (
PaymentResult,
PathPaymentStrictSendResult,
PathPaymentStrictReceiveResult,
OperationResult,
TransactionResult,
)
from stellar_sdk.operation import (
Operation,
Payment,
PathPaymentStrictReceive,
PathPaymentStrictSend,
)
from stellar_sdk.server_async import ServerAsync
from stellar_sdk.client.aiohttp_client import AiohttpClient
from polaris import settings
from polaris.models import Asset, Transaction
from polaris.utils import getLogger, maybe_make_callback_async
from polaris.integrations import registered_custody_integration as rci
logger = getLogger(__name__)
PaymentOpResult = Union[
PaymentResult, PathPaymentStrictSendResult, PathPaymentStrictReceiveResult
]
PaymentOp = Union[Payment, PathPaymentStrictReceive, PathPaymentStrictSend]
class Command(BaseCommand):
"""
Streams transactions to the :attr:`~polaris.models.Asset.distribution_account`
of each :class:`~polaris.models.Asset` in the DB.
Note that this command assumes Stellar payments are made to one distribution
account address per asset. Some third party custody service providers may not
use this scheme, in which case the custody integration class should provide
an alternative command for detecting incoming Stellar payments.
For every response from the server, attempts to find a matching transaction in
the database and updates the transaction's status to ``pending_anchor`` or
``pending_receiver`` depending on the protocol.
Then, the :mod:`~polaris.management.commands.execute_outgoing_transactions` process
will query for transactions in those statuses and provide the anchor an integration
function for executing the payment or withdrawal.
**Optional arguments:**
-h, --help show this help message and exit
"""
def handle(self, *_args, **_options): # pragma: no cover
try:
asyncio.run(self.watch_transactions())
except Exception as e:
# This is very likely a bug, so re-raise the error and crash.
# Heroku will restart the process unless it is repeatedly crashing,
# in which case restarting isn't of much use.
logger.exception("watch_transactions() threw an unexpected exception")
raise e
async def watch_transactions(self): # pragma: no cover
assets = await sync_to_async(list)(Asset.objects.all())
await asyncio.gather(
*[
self._for_account(rci.get_distribution_account(asset=asset))
for asset in assets
]
)
async def _for_account(self, account: str):
"""
Stream transactions for the server Stellar address.
"""
async with ServerAsync(settings.HORIZON_URI, client=AiohttpClient()) as server:
try:
# Ensure the distribution account actually exists
await server.load_account(account)
except NotFoundError:
# This exception will crash the process, but the anchor needs
# to provide valid accounts to watch.
raise RuntimeError(
"Stellar distribution account does not exist in horizon"
)
last_completed_transaction = await sync_to_async(
Transaction.objects.filter(
Q(kind=Transaction.KIND.withdrawal) | Q(kind=Transaction.KIND.send),
receiving_anchor_account=account,
status=Transaction.STATUS.completed,
)
.order_by("-completed_at")
.first
)()
cursor = "0"
if last_completed_transaction and last_completed_transaction.paging_token:
cursor = last_completed_transaction.paging_token
logger.info(
f"starting transaction stream for {account} with cursor {cursor}"
)
endpoint = server.transactions().for_account(account).cursor(cursor)
async for response in endpoint.stream():
await self.process_response(response, account)
@classmethod
async def process_response(cls, response, account):
# We should not match valid pending transactions with ones that were
# unsuccessful on the stellar network. If they were unsuccessful, the
# client is also aware of the failure and will likely attempt to
# resubmit it, in which case we should match the resubmitted transaction
if not response.get("successful"):
return
try:
_ = response["id"]
envelope_xdr = response["envelope_xdr"]
memo = response["memo"]
result_xdr = response["result_xdr"]
except KeyError:
return
# Query filters for SEP6 and 24
withdraw_filters = Q(
status=Transaction.STATUS.pending_user_transfer_start,
kind__in=[
Transaction.KIND.withdrawal,
getattr(Transaction.KIND, "withdrawal-exchange"),
],
)
# Query filters for SEP31
send_filters = Q(
status=Transaction.STATUS.pending_sender,
kind=Transaction.KIND.send,
)
transactions = await sync_to_async(list)(
Transaction.objects.filter(
withdraw_filters | send_filters,
memo=memo,
receiving_anchor_account=account,
)
.select_related("asset")
.all()
)
if not transactions:
logger.info(f"No match found for stellar transaction {response['id']}")
return
elif len(transactions) == 1:
transaction = transactions[0]
else:
# in the prior implementation of watch_transactions, the first transaction
# to have the same memo is matched, so we'll do the same in the refactored
# version.
logger.error(f"multiple Transaction objects returned for memo: {memo}")
transaction = transactions[0]
logger.info(
f"Matched transaction object {transaction.id} for stellar transaction {response['id']}"
)
op_results = TransactionResult.from_xdr(result_xdr).result.results
horizon_tx = TransactionEnvelope.from_xdr(
envelope_xdr,
network_passphrase=settings.STELLAR_NETWORK_PASSPHRASE,
).transaction
payment_data = await cls._find_matching_payment_data(
response, horizon_tx, op_results, transaction
)
if not payment_data:
logger.warning(f"Transaction matching memo {memo} has no payment operation")
return
# Transaction.amount_in is overwritten with the actual amount sent in the stellar
# transaction. This allows anchors to validate the actual amount sent in
# execute_outgoing_transactions() and handle invalid amounts appropriately.
transaction.amount_in = round(
Decimal(payment_data["amount"]),
transaction.asset.significant_decimals,
)
# The stellar transaction has been matched with an existing record in the DB.
# Now the anchor needs to initiate the off-chain transfer of the asset.
if transaction.protocol == Transaction.PROTOCOL.sep31:
# SEP-31 uses 'pending_receiver' status
transaction.status = Transaction.STATUS.pending_receiver
await sync_to_async(transaction.save)()
else:
# SEP-6 and 24 uses 'pending_anchor' status
transaction.status = Transaction.STATUS.pending_anchor
await sync_to_async(transaction.save)()
await maybe_make_callback_async(transaction)
return None
@classmethod
async def _find_matching_payment_data(
cls,
response: Dict,
horizon_tx: HorizonTransaction,
result_ops: List[OperationResult],
transaction: Transaction,
) -> Optional[Dict]:
matching_payment_data = None
ops = horizon_tx.operations
for idx, op_result in enumerate(result_ops):
op, op_result = cls._cast_operation_and_result(ops[idx], op_result)
if not op_result: # not a payment op
continue
maybe_payment_data = cls._check_for_payment_match(
op, op_result, transaction.asset, transaction
)
if maybe_payment_data:
if ops[idx].source:
source = ops[idx].source.account_muxed or ops[idx].source.account_id
else:
source = (
horizon_tx.source.account_muxed or horizon_tx.source.account_id
)
await cls._update_transaction_info(
transaction, response["id"], response["paging_token"], source
)
matching_payment_data = maybe_payment_data
break
return matching_payment_data
@classmethod
async def _update_transaction_info(
cls, transaction: Transaction, stellar_txid: str, paging_token: str, source: str
):
transaction.stellar_transaction_id = stellar_txid
transaction.from_address = source
transaction.paging_token = paging_token
await sync_to_async(transaction.save)()
@classmethod
def _check_for_payment_match(
cls,
operation: PaymentOp,
op_result: PaymentOpResult,
want_asset: Asset,
transaction: Transaction,
) -> Optional[Dict]:
payment_data = cls._get_payment_values(operation, op_result)
if (
payment_data["destination"] == transaction.receiving_anchor_account
and payment_data["code"] == want_asset.code
and payment_data["issuer"] == want_asset.issuer
):
return payment_data
else:
return None
@classmethod
def _cast_operation_and_result(
cls, operation: Operation, op_result: OperationResult
) -> Tuple[Optional[PaymentOp], Optional[PaymentOpResult]]:
op_xdr_obj = operation.to_xdr_object()
if isinstance(operation, Payment):
return (
Payment.from_xdr_object(op_xdr_obj),
op_result.tr.payment_result,
)
elif isinstance(operation, PathPaymentStrictSend):
return (
PathPaymentStrictSend.from_xdr_object(op_xdr_obj),
op_result.tr.path_payment_strict_send_result,
)
elif isinstance(operation, PathPaymentStrictReceive):
return (
PathPaymentStrictReceive.from_xdr_object(op_xdr_obj),
op_result.tr.path_payment_strict_receive_result,
)
else:
return None, None
@classmethod
def _get_payment_values(
cls, operation: PaymentOp, op_result: PaymentOpResult
) -> Dict:
values = {
"destination": operation.destination.account_id,
"amount": None,
"code": None,
"issuer": None,
}
if isinstance(operation, Payment):
values["amount"] = str(operation.amount)
values["code"] = operation.asset.code
values["issuer"] = operation.asset.issuer
elif isinstance(operation, PathPaymentStrictSend):
# since the dest amount is not specified in a strict-send op,
# we need to get the dest amount from the operation's result
#
# this method of fetching amounts gives the "raw" amount, so
# we need to divide by Operation._ONE: 10000000
# (Stellar uses 7 decimals places of precision)
values["amount"] = from_xdr_amount(op_result.success.last.amount.int64)
values["code"] = operation.dest_asset.code
values["issuer"] = operation.dest_asset.issuer
elif isinstance(operation, PathPaymentStrictReceive):
values["amount"] = str(operation.dest_amount)
values["code"] = operation.dest_asset.code
values["issuer"] = operation.dest_asset.issuer
else:
raise ValueError("Unexpected operation, expected payment or path payment")
return values
|
py
|
1a5a32e4bc5097ac77d91466e1339e5c0f6c540a
|
from JumpScale import j
import argparse
import sys
class ArgumentParser(argparse.ArgumentParser):
def exit(self, status=0, message=None):
if message:
self._print_message(message, sys.stderr)
if j.application.state == "RUNNING":
j.application.stop(status)
else:
sys.exit(status)
def processLogin(parser):
parser.add_argument("-l", '--login', help='login for grid, if not specified defaults to root')
parser.add_argument("-p", '--passwd', help='passwd for grid')
parser.add_argument(
"-a", '--addr', help='ip addr of master, if not specified will be the one as specified in local config')
opts = parser.parse_args()
if opts.login is None:
opts.login = "root"
# if opts.passwd==None and opts.login=="root":
# if j.application.config.exists("grid.master.superadminpasswd"):
# opts.passwd=j.application.config.get("grid.master.superadminpasswd")
# else:
# opts.passwd=j.tools.console.askString("please provide superadmin passwd for the grid.")
# if opts.addr==None:
# opts.addr=j.application.config.get("grid.master.ip")
return opts
def getProcess(parser=None):
parser = parser or ArgumentParser()
parser.add_argument('-d', '--domain', help='Process domain name')
parser.add_argument('-n', '--name', help='Process name')
return parser.parse_args()
|
py
|
1a5a32e6b6a637a4a5aab57570f7c0a963c8aca1
|
#!/usr/bin/env python3
import unittest
from pico8.lua import lua
VALID_LUA_SHORT_LINES = [line + b'\n' for line in b'''-- short test
-- by dan
function foo()
return 999
end'''.split(b'\n')]
VALID_LUA_EVERY_NODE = [line + b'\n' for line in b'''
-- title comment
-- author comment
-- the code with the nodes
-- doesn't have to make sense
function f(arg1, ...)
local zzz = {}
zzz[arg1] = 999
zzz['extra'] = ...
return zzz
end
local function myprint(msg)
print(msg)
end
a = 1
f(a, a+1 , a + 2 )
beta, gamma = 2, 3
do
gamma = 4
break
end
while a < 10 do
-- increase a
a += 1
if a % 2 == 0 then
f(a)
elseif a > 5 then
f(a, 5)
else
f(a, 1)
beta *= 2
end
end
repeat
-- reduce a
a -= 1
f(a)
until a <= 0
for a=3, 10, 2 do
f(a)
end
for beta in vals() do
f(beta)
end
if a < 20 then
goto mylabel
end
a = -20 + 2 - .1
gamma = 9.999e-3
::mylabel::
if (a * 10 > 100) myprint('yup')
prefix = 'foo'
mytable = {
[prefix..'key'] = 111,
barkey= 222;
333
}
a=1; b=2; c=3
if ((x < 1) or (x > width) or (y < 1) or (y > height)) then
return 0
end
::draw::
goto draw
'''.split(b'\n')]
HIGH_CHARS_UNICODE = 'xx \x00 \x07 🐱 ◝ ⬇️░✽●♥☉웃⌂⬅️😐♪🅾️◆…➡️★⧗⬆️ˇ∧❎▤▥'
HIGH_CHARS_P8SCII = (
b'xx \x00 \x07 \x82 \xff \x83\x84\x85\x86\x87\x88\x89'
b'\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99')
class TestUnicodeConversion(unittest.TestCase):
def testP8SCIIToUnicode(self):
self.assertEqual(
HIGH_CHARS_UNICODE,
lua.p8scii_to_unicode(HIGH_CHARS_P8SCII))
def testUnicodeToP8SCII(self):
self.assertEqual(
HIGH_CHARS_P8SCII,
lua.unicode_to_p8scii(HIGH_CHARS_UNICODE))
class TestLua(unittest.TestCase):
def testInit(self):
result = lua.Lua(4)
self.assertEqual(4, result._lexer._version)
self.assertEqual(4, result._parser._version)
def testFromLines(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
self.assertEqual(17, len(result._lexer._tokens))
def testGetCharCount(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
self.assertEqual(sum(len(line) for line in VALID_LUA_SHORT_LINES),
result.get_char_count())
def testGetTokenCount(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
self.assertEqual(5, result.get_token_count())
def testGetTokenCountCarriageReturns(self):
result = lua.Lua.from_lines([
b'function foo()\r\n',
b' return 999\r\n',
b'end\r\n'
], 4)
self.assertEqual(5, result.get_token_count())
def testGetTitle(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
self.assertEqual(b'short test', result.get_title())
def testGetByline(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
self.assertEqual(b'by dan', result.get_byline())
def testBaseLuaWriterNotYetImplemented(self):
# coverage
self.assertRaises(NotImplementedError,
lua.BaseLuaWriter(None, None).to_lines)
class TestLuaEchoWriter(unittest.TestCase):
def testToLinesEchoWriter(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
lines = list(result.to_lines())
self.assertEqual(lines, VALID_LUA_SHORT_LINES)
def testToLinesEchoWriterLastCharIsntNewline(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES + [b'break'], 4)
lines = list(result.to_lines())
self.assertEqual(lines, VALID_LUA_SHORT_LINES + [b'break'])
class TestLuaASTEchoWriter(unittest.TestCase):
def testToLinesASTEchoWriter(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
lines = list(result.to_lines(writer_cls=lua.LuaASTEchoWriter))
self.assertEqual(lines, VALID_LUA_SHORT_LINES)
def testToLinesASTEchoWriterEveryNode(self):
result = lua.Lua.from_lines(VALID_LUA_EVERY_NODE, 4)
lines = list(result.to_lines(writer_cls=lua.LuaASTEchoWriter))
self.assertEqual(lines, VALID_LUA_EVERY_NODE)
class TestLuaMinifyWriter(unittest.TestCase):
def testMinifiesNames(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
lines = list(result.to_lines(writer_cls=lua.LuaMinifyWriter))
txt = b''.join(lines)
self.assertIn(b'function a()', txt)
def testMinifiesNamesEveryNode(self):
result = lua.Lua.from_lines(VALID_LUA_EVERY_NODE, 4)
lines = list(result.to_lines(writer_cls=lua.LuaMinifyWriter))
txt = b''.join(lines)
self.assertIn(b'function a(b,', txt)
self.assertIn(b'local c', txt)
self.assertIn(b'c[b]', txt)
self.assertIn(b'return c', txt)
self.assertIn(b'local function d(e)', txt)
self.assertIn(b'print(e)', txt)
self.assertIn(b'::u::\ngoto u', txt)
def testMinifiesSpaces(self):
result = lua.Lua.from_lines(VALID_LUA_SHORT_LINES, 4)
lines = list(result.to_lines(writer_cls=lua.LuaMinifyWriter))
txt = b''.join(lines)
self.assertEqual(b'''function a()
return 999
end''', txt)
def testMinifiesSpacesEveryNode(self):
result = lua.Lua.from_lines(VALID_LUA_EVERY_NODE, 4)
lines = list(result.to_lines(writer_cls=lua.LuaMinifyWriter))
txt = b''.join(lines)
self.assertNotIn(b'-- the code with the nodes', txt)
self.assertIn(b'''while f < 10 do
f += 1
if f % 2 == 0 then
a(f)
elseif f > 5 then
a(f, 5)
else
a(f, 1)
g *= 2
end
end
''', txt)
self.assertIn(b'''for g in i() do
a(g)
end
''', txt)
self.assertIn(b'f=1 n=2 o=3', txt)
def testMinifyTokenWriterMinifiesSpacesEveryNode(self):
result = lua.Lua.from_lines(VALID_LUA_EVERY_NODE, 4)
lines = list(result.to_lines(writer_cls=lua.LuaMinifyTokenWriter))
txt = b''.join(lines)
self.assertIn(b'-- author comment', txt)
self.assertNotIn(b'-- the code with the nodes', txt)
self.assertIn(
b'while f<10 do\nf+=1\nif f%2==0 then\na(f)\nelseif f>5 then\n'
b'a(f,5)\nelse\na(f,1)\ng*=2\nend\nend', txt)
self.assertIn(b'for g in i() do\na(g)\nend', txt)
self.assertIn(b'f=1;n=2;o=3', txt)
class TestLuaFormatterWriter(unittest.TestCase):
def testNormalizesSpaceCharacters(self):
result = lua.Lua.from_lines([b'a\t=\tb\r\n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'a = b\n', txt)
def testTrailingWhitespace(self):
result = lua.Lua.from_lines([b'a = b \nc = d \n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'a = b\nc = d\n', txt)
def testCommentAtEndOfLine(self):
result = lua.Lua.from_lines([b'a = b -- comment\nc = d\n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'a = b -- comment\nc = d\n', txt)
def testCommentOnOwnLine(self):
result = lua.Lua.from_lines([b'a = b\n -- comment\nc = d\n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'a = b\n-- comment\nc = d\n', txt)
def testIndentZero(self):
result = lua.Lua.from_lines([b'\n a = b\n c = d\n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'\na = b\nc = d\n', txt)
def testIndentBlock(self):
result = lua.Lua.from_lines([b'''
a = 1
do
b = 2
c = 3
end
d = 4
while foo do
e = 5
end
repeat
f = 6
g = 7
h = 8
until foo
if foo then
i = 9
elseif bar then
j = 10
else
k = 11
end
for x=1,10,2 do
l = 12
m = 13
end
for x in foo do
n = 14
o = 15
end
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'''
a = 1
do
b = 2
c = 3
end
d = 4
while foo do
e = 5
end
repeat
f = 6
g = 7
h = 8
until foo
if foo then
i = 9
elseif bar then
j = 10
else
k = 11
end
for x=1,10,2 do
l = 12
m = 13
end
for x in foo do
n = 14
o = 15
end
''', txt)
def testIndentMulti(self):
result = lua.Lua.from_lines([b'''
do
a = 1
while foo do
b = 2
if bar then
c = 3
elseif baz then
d = 4
repeat
e = 5
until bing
else
f = 6
end
end
g = 7
end
h = 8
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'''
do
a = 1
while foo do
b = 2
if bar then
c = 3
elseif baz then
d = 4
repeat
e = 5
until bing
else
f = 6
end
end
g = 7
end
h = 8
''', txt)
def testIndentCommentsAndStatements(self):
result = lua.Lua.from_lines([b'''
x += 1 -- increment x
do
-- do stuff in here
print "stuff happens"
x -= 1 -- decrement x
end
-- END
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'''
x += 1 -- increment x
do
-- do stuff in here
print "stuff happens"
x -= 1 -- decrement x
end
-- END
''', txt)
def testIndentTableConstructor(self):
result = lua.Lua.from_lines([b'''
obj = {
foo=function(arg)
a = 1
b = 2
c = 3
end,
bar=function(arg, arg, arg)
d = 4
e = 5
end,
baz=999
}
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'''
obj = {
foo=function(arg)
a = 1
b = 2
c = 3
end,
bar=function(arg, arg, arg)
d = 4
e = 5
end,
baz=999
}
''', txt)
def testTooManyNewlines(self):
result = lua.Lua.from_lines([b'''
a = 1
b = 2
c = 3
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter))
txt = b''.join(lines)
self.assertEqual(b'''
a = 1
b = 2
c = 3
''', txt)
def testAcceptsIndentWidthArg(self):
result = lua.Lua.from_lines([b'''
do
a = 1
while foo do
b = 2
if bar then
c = 3
elseif baz then
d = 4
repeat
e = 5
until bing
else
f = 6
end
end
g = 7
end
h = 8
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterWriter,
writer_args={'indentwidth': 3}))
txt = b''.join(lines)
self.assertEqual(b'''
do
a = 1
while foo do
b = 2
if bar then
c = 3
elseif baz then
d = 4
repeat
e = 5
until bing
else
f = 6
end
end
g = 7
end
h = 8
''', txt)
class TestLuaFormatterTokenWriter(unittest.TestCase):
def testNormalizesSpaceCharacters(self):
result = lua.Lua.from_lines([b'a\t=\tb\r\n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'a = b\n', txt)
def testTrailingWhitespace(self):
result = lua.Lua.from_lines([b'a = b \nc = d \n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'a = b\nc = d\n', txt)
def testCommentAtEndOfLine(self):
result = lua.Lua.from_lines([b'a = b -- comment\nc = d\n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'a = b -- comment\nc = d\n', txt)
def testCommentOnOwnLine(self):
result = lua.Lua.from_lines([b'a = b\n -- comment\nc = d\n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'a = b\n-- comment\nc = d\n', txt)
def testIndentZero(self):
result = lua.Lua.from_lines([b'\n a = b\n c = d\n'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'\na = b\nc = d\n', txt)
def testIndentBlock(self):
result = lua.Lua.from_lines([b'''
a = 1
do
b = 2
c = 3
end
d = 4
while foo do
e = 5
end
repeat
f = 6
g = 7
h = 8
until foo
if foo then
i = 9
elseif bar then
j = 10
else
k = 11
end
for x=1,10,2 do
l = 12
m = 13
end
for x in foo do
n = 14
o = 15
end
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'''
a = 1
do
b = 2
c = 3
end
d = 4
while foo do
e = 5
end
repeat
f = 6
g = 7
h = 8
until foo
if foo then
i = 9
elseif bar then
j = 10
else
k = 11
end
for x = 1, 10, 2 do
l = 12
m = 13
end
for x in foo do
n = 14
o = 15
end
''', txt)
def testIndentMulti(self):
result = lua.Lua.from_lines([b'''
do
a = 1
while foo do
b = 2
if bar then
c = 3
elseif baz then
d = 4
repeat
e = 5
until bing
else
f = 6
end
end
g = 7
end
h = 8
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'''
do
a = 1
while foo do
b = 2
if bar then
c = 3
elseif baz then
d = 4
repeat
e = 5
until bing
else
f = 6
end
end
g = 7
end
h = 8
''', txt)
def testIndentCommentsAndStatements(self):
result = lua.Lua.from_lines([b'''
x += 1 -- increment x
do
-- do stuff in here
print "stuff happens"
x -= 1 -- decrement x
end
-- END
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'''
x += 1 -- increment x
do
-- do stuff in here
print "stuff happens"
x -= 1 -- decrement x
end
-- END
''', txt)
def testIndentTableConstructor(self):
result = lua.Lua.from_lines([b'''
obj = {
foo=function(arg)
a = 1
b = 2
c = 3
end,
bar=function(arg, arg, arg)
d = 4
e = 5
end,
baz=999
}
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'''
obj = {
foo = function(arg)
a = 1
b = 2
c = 3
end,
bar = function(arg, arg, arg)
d = 4
e = 5
end,
baz = 999
}
''', txt)
def testTooManyNewlines(self):
result = lua.Lua.from_lines([b'''
a = 1
b = 2
c = 3
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'''
a = 1
b = 2
c = 3
''', txt)
def testAcceptsIndentWidthArg(self):
result = lua.Lua.from_lines([b'''
do
a = 1
while foo do
b = 2
if bar then
c = 3
elseif baz then
d = 4
repeat
e = 5
until bing
else
f = 6
end
end
g = 7
end
h = 8
'''], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter,
writer_args={'indentwidth': 3}))
txt = b''.join(lines)
self.assertEqual(b'''
do
a = 1
while foo do
b = 2
if bar then
c = 3
elseif baz then
d = 4
repeat
e = 5
until bing
else
f = 6
end
end
g = 7
end
h = 8
''', txt)
def testEliminateSpaceAroundSomePunctuation(self):
result = lua.Lua.from_lines([b'a = { "x" , y, -3, 4+5*6}'], 4)
lines = list(result.to_lines(writer_cls=lua.LuaFormatterTokenWriter))
txt = b''.join(lines)
self.assertEqual(b'a = {"x", y, - 3, 4 + 5 * 6}\n', txt)
if __name__ == '__main__':
unittest.main()
|
py
|
1a5a3383d30918dff72aa0efec62c8cad5adb0c6
|
"""The :mod:`interpreter` module defines the ``PushInterpreter`` used to run Push programs."""
import traceback
from typing import Union
import time
from enum import Enum
from pyshgp.push.instruction import Instruction
from pyshgp.push.program import Program
from pyshgp.push.state import PushState
from pyshgp.push.instruction_set import InstructionSet
from pyshgp.push.atoms import Atom, Closer, Literal, InstructionMeta, CodeBlock, Input
from pyshgp.push.config import PushConfig
from pyshgp.tap import tap
from pyshgp.validation import PushError
class PushInterpreterStatus(Enum):
"""Enum class of all potential statuses of a PushInterpreter."""
normal = 1
step_limit_exceeded = 2
runtime_limit_exceeded = 3
growth_cap_exceeded = 4
class PushInterpreter:
"""An interpreter capable of running Push programs.
Parameters
----------
instruction_set : Union[InstructionSet, str], optional
The ``InstructionSet`` to use for executing programs. Default is "core"
which instantiates an ``InstructionSet`` using all the core instructions.
Attributes
----------
instruction_set : InstructionSet
The ``InstructionSet`` to use for executing programs.
state : PushState
The current ``PushState``. Contains one stack for each ``PushType``
mentioned by the instructions in the instruction set.
status : PushInterpreterStatus
A string denoting if the interpreter has encountered a situation
where non-standard termination was required.
"""
def __init__(self,
instruction_set: Union[InstructionSet, str] = "core",
reset_on_run: bool = True):
self.reset_on_run = reset_on_run
# If no instruction set given, create one and register all instructions.
if instruction_set == "core":
self.instruction_set = InstructionSet(register_core=True)
else:
self.instruction_set = instruction_set
self.type_library = self.instruction_set.type_library
# Initialize the PushState and status
self.state: PushState = None
self.status: PushInterpreterStatus = None
self._validate()
def _validate(self):
library_type_names = set(self.type_library.keys())
required_stacks = self.instruction_set.required_stacks() - {"stdout", "exec", "untyped"}
if not required_stacks <= library_type_names:
raise ValueError(
"PushInterpreter instruction_set and type_library are incompatible. {iset} vs {tlib}. Diff: {d}".format(
iset=required_stacks,
tlib=library_type_names,
d=required_stacks - library_type_names,
))
def _evaluate_instruction(self, instruction: Instruction, config: PushConfig):
self.state = instruction.evaluate(self.state, config)
def untyped_to_typed(self):
"""Infer ``PushType`` of items on state's untyped queue and push to corresponding stacks."""
while len(self.state.untyped) > 0:
el = self.state.untyped.popleft()
push_type = self.type_library.push_type_of(el, error_on_not_found=True)
self.state[push_type.name].push(el)
@tap
def evaluate_atom(self, atom: Atom, config: PushConfig):
"""Evaluate an ``Atom``.
Parameters
----------
atom : Atom
The Atom (``Literal``, ``InstructionMeta``, ``Input``, or ``CodeBlock``) to
evaluate against the current ``PushState``.
config : PushConfig
The configuration of the Push program being run.
"""
try:
if isinstance(atom, InstructionMeta):
self._evaluate_instruction(self.instruction_set[atom.name], config)
elif isinstance(atom, Input):
input_value = self.state.inputs[atom.input_index]
self.state.untyped.append(input_value)
elif isinstance(atom, CodeBlock):
for a in atom[::-1]:
self.state["exec"].push(a)
elif isinstance(atom, Literal):
self.state[atom.push_type.name].push(atom.value)
elif isinstance(atom, Closer):
raise PushError("Closers should not be in push programs. Only genomes.")
else:
raise PushError("Cannot evaluate {t}, require a subclass of Atom".format(t=type(atom)))
self.untyped_to_typed()
except Exception as e:
err_type = type(e)
err_msg = str(e)
traceback.print_exc()
raise PushError(
"{t} raised while evaluating {atom}. Original message: \"{m}\"".format(
t=err_type.__name__,
atom=atom,
m=err_msg
))
@tap
def run(self,
program: Program,
inputs: list,
print_trace: bool = False) -> list:
"""Run a Push ``Program`` given some inputs and desired output ``PushTypes``.
The general flow of this method is:
1. Create a new push state
2. Load the program and inputs.
3. If the exec stack is empty, return the outputs.
4. Else, pop the exec stack and process the atom.
5. Return to step 3.
Parameters
----------
program : Program
Program to run.
inputs : list
A sequence of values to use as inputs to the push program.
print_trace : bool
If True, each step of program execution will be summarized in stdout.
Returns
-------
Sequence
A sequence of values pulled from the final push state. May contain
pyshgp.utils.Token.no_stack_item if output stacks are empty.
"""
push_config = program.signature.push_config
if self.reset_on_run or self.state is None:
self.state = PushState(self.type_library, push_config)
self.status = PushInterpreterStatus.normal
# Setup
self.state.load_code(program.code)
self.state.load_inputs(inputs)
stop_time = time.time() + push_config.runtime_limit
steps = 0
if print_trace:
print("Initial State:")
self.state.pretty_print()
# Iterate atom evaluation until entire program is evaluated.
while len(self.state["exec"]) > 0:
# Stopping conditions
if steps > push_config.step_limit:
self.status = PushInterpreterStatus.step_limit_exceeded
break
if time.time() > stop_time:
self.status = PushInterpreterStatus.runtime_limit_exceeded
break
# Next atom in the program to evaluate.
next_atom = self.state["exec"].pop()
if print_trace:
start = time.time()
print("\nCurrent Atom: " + str(next_atom))
# Evaluate atom.
old_size = self.state.size()
self.evaluate_atom(next_atom, push_config)
if self.state.size() > old_size + push_config.growth_cap:
self.status = PushInterpreterStatus.growth_cap_exceeded
break
if print_trace:
duration = time.time() - start
print("Current State (step {step}):".format(step=steps))
self.state.pretty_print()
print("Step duration:", duration)
steps += 1
if print_trace:
print("Finished program evaluation.")
return self.state.observe_stacks(program.signature.output_stacks)
|
py
|
1a5a350c860422757d1e8abb51b539d62f2bc43c
|
import random
from flask import jsonify
from .rpsls import RPSLS
# Fixed pick Game Strategy
def fixed_strategy(pick_value):
pick_RPSLS=pick_value
def pick():
return pick_RPSLS
return pick
# Random pick Game Strategy
def random_strategy():
def pick():
pick_RPSLS = random.choice(list(RPSLS))
return pick_RPSLS
return pick
# Iterative pick Game Strategy
def iterative_generator(value):
while True:
yield value
value += 1
value = value % len(RPSLS)
def iterative_strategy():
pick_generator = iterative_generator(0)
def pick():
pick_RPSLS = RPSLS(next(pick_generator))
return pick_RPSLS
return pick
|
py
|
1a5a3541df064de3ef7dafc0f4f05547e8819e75
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from os import path
from config import INPUT_PATH, OUTPUT_PATH
def draw_response_times_plot(input_file, output_file):
sns.set_style("ticks", {"'xtick.major.size'": "0"})
response_times = pd.read_csv(path.join(INPUT_PATH, input_file), sep=';')
# colors
td_beacon_color = "#08519C"
td_no_beacon_color = "#6BAED6"
td_untrained_color = "#006D2C"
bottom_up_color = "#74C476"
flatui = [td_beacon_color, td_no_beacon_color, td_untrained_color, bottom_up_color]
# Draw a boxplot
boxplot = sns.boxplot(x="Condition", y="ResponseTime", data=response_times, palette=sns.color_palette(flatui))
# set axes dimensions & labels
boxplot.set(ylim=(0, 35000))
boxplot.set(ylabel='Response Time in msec')
# remove lines around graph
sns.despine(bottom=True, trim=True)
# save output as file, in a high resolution
fig = boxplot.get_figure()
fig.savefig(path.join(OUTPUT_PATH, output_file), dpi=300, transparent=False)
|
py
|
1a5a35de412aa69349a0418ccd5ea59f1e708e2d
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import BitstockTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(BitstockTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
#self._test_getblockchaininfo()
self._test_gettxoutsetinfo()
self._test_getblockheader()
#self._test_getdifficulty()
self.nodes[0].verifychain(0)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(keys))
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('50000.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bytes_serialized'], 14073),
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized']), 64)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
#assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
#assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
if __name__ == '__main__':
BlockchainTest().main()
|
py
|
1a5a3615c6c5c40b3b47741fcb7379b1f23402ec
|
import subprocess, json, re
command = "Get-Service -Name Audiosrv -ComputerName asl-ad04"
p = subprocess.Popen(
[
"powershell.exe",
"({}) | ConvertTo-Json -Compress".format(command)
],
stdout=subprocess.PIPE
)
result = (p.communicate()[0]).decode('cp1252')
if re.search("^{", result):
print("Valido")
print(result)
|
py
|
1a5a367fbdc63d1679c33f5e7334420e90d4fc7d
|
#!/usr/bin/env python -tt
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
#PHENOS
"""
"""
def check_directories():
"""
Ensure all expected directories (and set-up files) are present and correct.
Create any paths that are missing.
"""
expected_directories=["DAT files",
"Genotypes",
"Layouts",
"Logs",
"Plots",
"rQTL input",
"Stinger files"]
for ed in expected_directories:
if not os.path.exists(ed):
#logging.info("Directory '{}' not found.".format(ed))
os.mkdir(ed)
#logging.info("Directory '{}' created.".format(ed))
#check_directories()
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(name='phenos',
version='3.3.0',
description='tools for handling solid media phenotyping data',
long_description=readme,
author='David B. H. Barton',
author_email='[email protected]',
url='http://github.com/gact/phenos',
license=license,
install_requires=['numpy>=1.9.2',
'scipy>=0.16.0c1',
'matplotlib>=1.4.3',
'tables>=3.2.0',
'xlrd>=0.9.3',
'brewer2mpl>=1.4.1',
'win32com'],
packages=['phenos'])
|
py
|
1a5a38c176e82aab8c9c142def5575014a08346f
|
from setuptools import setup
import os
import glob
package_name = 'rastreator_simulation'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'param'),glob.glob('param/*.yaml')),
(os.path.join('share', package_name, 'launch'),glob.glob('launch/*.launch.py')),
(os.path.join('lib', package_name, 'utils'),glob.glob('utils/*.py'))
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='iggyrrieta',
maintainer_email='[email protected]',
description='rastreator_simulation: Simulation package',
license='Apache 2.0',
entry_points={
'console_scripts': [
'ekf = rastreator_simulation.ekf_simulation:main',
'constant_cmd = rastreator_simulation.constant_cmd:main',
],
},
)
|
py
|
1a5a39200a675e04f5393cc33cb1edc057287aed
|
#!/usr/bin/env python3
"""The Graph package contains the Graph class
that carries the results from scrapping/exploring nlp models etc...
The class inherit from :obj:`rdflib.Graph`.
"""
import logging
from requests.utils import quote
import rdflib
import xlsxwriter
from rdflib.plugins.sparql.parser import parseQuery
from touch import touch
class Graph(rdflib.Graph):
"""same as a :obj:`rdflib.Graph` object (see https://rdflib.readthedocs.io/en/stable/intro_to_creating_rdf.html), but with a few additional methods
.. code:: python
>>> from lexicons_builder.graphs.graphs import Graph
RDFLib Version: 5.0.0
>>> g = Graph()
>>> # the graph has a __str__ method that serialize itself to ttl
>>> print(g)
@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
<urn:default:baseUri:#holonym> a rdfs:Class ;
ns1:definition "A term that denotes a whole, a part of which is denoted by a second term. The word \"face\" is a holonym of the word \"eye\"." .
<urn:default:baseUri:#hypernym> a rdfs:Class ;
ns1:definition "a word with a broad meaning constituting a category into which words with more specific meanings fall; a superordinate. For example, colour is a hypernym of red." .
...
"""
local_namespace = "urn:default:baseUri:#"
root_words = []
root_word_uriref = rdflib.URIRef(f"{local_namespace}root_word")
base_local = rdflib.Namespace(local_namespace)
root_word_uri = f"{local_namespace}root_word_uri"
def __init__(
self, store="default", identifier=None, namespace_manager=None, base=None
):
super().__init__(
store=store,
identifier=identifier,
namespace_manager=namespace_manager,
base=base,
)
# add the root word,
self.add(
(
self.root_word_uriref,
rdflib.namespace.RDF.type,
rdflib.namespace.RDFS.Class,
)
)
self.add(
(
self.root_word_uriref,
rdflib.namespace.SKOS.definition,
rdflib.Literal(
"A root word is the term from which all of the words are fetched"
),
)
)
# hyponym
self.add(
(
self.base_local.hyponym,
rdflib.namespace.RDF.type,
rdflib.namespace.RDFS.Class,
)
)
self.add(
(
self.base_local.hyponym,
rdflib.namespace.SKOS.definition,
rdflib.Literal(
"Hyponymy is the converse of hypernymy. For example, red is a hyponym of color."
),
)
)
# hypernym
self.add(
(
self.base_local.hypernym,
rdflib.namespace.RDF.type,
rdflib.namespace.RDFS.Class,
)
)
self.add(
(
self.base_local.hypernym,
rdflib.namespace.SKOS.definition,
rdflib.Literal(
"a word with a broad meaning constituting a category into which words with more specific meanings fall; a superordinate. For example, colour is a hypernym of red."
),
)
)
# holonym
self.add(
(
self.base_local.holonym,
rdflib.namespace.RDF.type,
rdflib.namespace.RDFS.Class,
)
)
self.add(
(
self.base_local.holonym,
rdflib.namespace.SKOS.definition,
rdflib.Literal(
"""A term that denotes a whole, a part of which is denoted by a second term. The word "face" is a holonym of the word "eye"."""
),
)
)
def __contains__(self, word):
"""quick check to see if there's a word with a prefLabel predicate
that is the same as the word
>>> "book" in g
True"""
return self.word_in_graph(word)
def __str__(self):
"""quick way of serializing the graph to ttl"""
return self.to_str()
def __len__(self):
"return the number of words in the graph"
return len(self.to_list())
# did not implement __iter__ as some methods needs
# the default rdflib.Graph.__iter__()
# such as for s, p, o in self:
# def __iter__(self):
# "return the words in the graph"
# q_words = "SELECT ?word WHERE { ?_ <http://www.w3.org/2004/02/skos/core#prefLabel> ?word} ORDER BY ASC (?word)"
# for (w,) in self.query(q_words):
# yield str(w)
def word_in_graph(self, word: str) -> bool:
"""return :obj:`True` if the word is in the graph
.. code:: python
>>> g = Graph()
>>> g.add_root_word('dog')
>>> g.add_word('hound', 1, 'synonym', 'dog', comesFrom='http://example/com')
>>> g.word_in_graph('cat')
False
>>> g.word_in_graph('dog')
True
>>> # could be invoked with the in keyword
>>> 'dog' in g
True
"""
# checks if the word is already in the graph
assert isinstance(word, str), f"word is not str it is {type(word)}"
query_check = (
'ASK {?_ <http://www.w3.org/2004/02/skos/core#prefLabel> "' + word + '"}'
)
try:
parseQuery(query_check)
except Exception as e:
logging.error(f"Error while checking if the word '{word}' is in the graph")
logging.error(
f"the query '''{query_check}''' is could be badly formated OR you're using threads"
)
# the parseQuery function from rdflib could raise errors
# if used with threads
# see https://github.com/RDFLib/rdflib/issues/765
raise e
# print(f"checking if word '{word}' in graph")
if [_ for _ in self.query(query_check)][0]:
# print("it is already")
return True
else:
# print("it is not")
return False
def _check_word_type(self, word):
"raise a TypeError if type(word)!=str"
if not isinstance(word, str):
raise TypeError(
f"the word you're adding to the graph is not a string instance. It has a '{type(word)}' type"
)
def add_word(
self, word, depth, relation, target_word, synset_uri=None, comesFrom=None
):
"""Add some tripples to the graph that contains the relation between the word and its target.
Args:
word (str): The word to add to the graph
deepth (int): The deepth of the reccursion
relation (str): The relation of the word to the target word.
Could be "hyponym", "hypernym", "holonym" or "synonym"
target_word (str): The word
.. code:: python
>>> g = Graph()
>>> g.add_root_word('car')
>>> print(g)
@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .
<urn:default:baseUri:#root_word_uri> a <urn:default:baseUri:#root_word> ;
ns1:prefLabel "car" .
>>> g.add_word('bus', 1, 'synonym', 'car', comesFrom='http://example.com')
>>> print(g)
@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .
@prefix ns2: <urn:default:baseUri:#> .
@prefix ns3: <http://taxref.mnhn.fr/lod/property/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
ns2:bus ns3:isSynonymOf ns2:root_word_uri ;
ns1:prefLabel "bus" ;
ns2:comesFrom <http://example.com> ;
ns2:depth 1 .
ns2:root_word_uri a ns2:root_word ;
ns1:prefLabel "car" .
"""
self._check_word_type(word)
# to avoid unvalid URI
# as some wordnet words do have unwanted characters
ss_word = quote(word)
ss_target_word = quote(target_word)
assert ss_word != ss_target_word
base_wn = rdflib.Namespace("http://www.w3.org/2006/03/wn/wn20/schema/")
if relation == "hyponym":
rela = base_wn.hyponymOf
elif relation == "hypernym":
rela = base_wn.hypernymOf
elif relation == "holonym":
rela = base_wn.holonymOf
elif relation == "synonym":
# word is synonym
rela = rdflib.URIRef("http://taxref.mnhn.fr/lod/property/isSynonymOf")
else:
raise ValueError(
f"The relation '{relation}' is not implemented in the graph"
)
if depth == 1:
# the relation is linked to the root word
target = rdflib.URIRef(self.root_word_uri)
else:
target = rdflib.URIRef(self.local_namespace + ss_target_word)
# adding the relation word is synonym/hyponym/... of target word
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
rela,
target,
)
)
# adding the depth information
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
self.base_local.depth,
rdflib.Literal(depth),
)
)
# adding the preflabel info
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
rdflib.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
rdflib.Literal(word),
)
)
# adding the synset info
if synset_uri:
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
self.base_local.synsetLink,
rdflib.URIRef(synset_uri),
)
)
# adding the website the data is comming from
if comesFrom:
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
self.base_local.comesFrom,
rdflib.URIRef(comesFrom),
)
)
assert (
"<file:///home/k/Documents/lexicons_builder/"
not in self.serialize(format="ttl").decode()
)
def add_root_word(self, word: str):
"""Before searching for related terms, the root word
from which all synonyms come from should be added to the graph. This method creates rdf tripples for the root word
Args:
word (str): The root word to add to the graph
.. code:: python
>>> g = Graph()
>>> g.add_root_word("computer")
>>> print(g)
@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .
<urn:default:baseUri:#root_word_uri> a <urn:default:baseUri:#root_word> ;
ns1:prefLabel "computer" .
"""
self._check_word_type(word)
self.add(
(
rdflib.URIRef(self.root_word_uri),
rdflib.RDF.type,
rdflib.URIRef(self.local_namespace + "root_word"),
)
)
self.add(
(
rdflib.URIRef(self.root_word_uri),
rdflib.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
rdflib.Literal(word),
)
)
self._set_root_word_attribute()
def is_empty(self) -> bool:
"""return :obj:`True` if the graph does not contain synonyms, hyponyms, etc
If the graph contains only root word(s) or no words, return :obj:`False`
Note the graph contains some definitions by default
.. code:: python
>>> g = Graph()
>>> g.is_empty()
True
>>> g.add_root_word("new")
>>> g.is_empty()
True
>>> g.add_word("young", 1, "synonym", "new")
>>> g.is_empty()
False
"""
for _, p, _ in self:
if str(p) in (
"http://taxref.mnhn.fr/lod/property/isSynonymOf",
"http://www.w3.org/2006/03/wn/wn20/schema/hyponymOf",
"http://www.w3.org/2006/03/wn/wn20/schema/hypernymOf",
"http://www.w3.org/2006/03/wn/wn20/schema/holonymOf",
):
return False
else:
return True
# for s, o, p in self:
# break
# else:
# return True
def contains_synonyms(self) -> bool:
"""return :obj:`True` if the graph contains at least one synonym
.. code:: python
>>> g = Graph()
>>> g.add_root_word("new")
>>> g.contains_synonyms()
False
>>> g.add_word("young", 1, "synonym", "new")
>>> g.contains_synonyms()
True
"""
q_check = "ASK {?_ <http://taxref.mnhn.fr/lod/property/isSynonymOf> ?_2}"
return [r for r in self.query(q_check)][0]
def _set_root_word_attribute(self):
"""set the root_word and root_word_uri attributes
by looking at the self.graph"""
self.root_words = []
q_root = (
"SELECT ?uri ?pref WHERE {?uri a <"
+ self.local_namespace
+ """root_word> ;
<http://www.w3.org/2004/02/skos/core#prefLabel> ?pref }"""
)
res = [r for r in self.query(q_root)]
assert res, "The query to get the root word returned no results."
contains_root_word = False
for i, (uri, pref) in enumerate(res):
# self.root_word_uri = str(uri)
# self.root_word = str(pref)
self.root_words.append(str(pref))
contains_root_word = True
if not contains_root_word:
raise ValueError(f"The graph does not contain any root word")
# if i:
# logging.warning(
# f"The query to retrive the root word returned several results"
# )
# logging.warning(f"The root words are: {self.root_words}")
def delete_several_depth(self, method="MIN"):
"""Deletes words with several depths
Args:
word (str): The word to add to the graph
.. code:: python
>>> g = Graph()
>>> g.add_root_word('car')
>>> g.add_word('bus', 1, 'synonym', 'car', comesFrom='http://example/com')
>>> g.add_word('bus', 2, 'synonym', 'car', comesFrom='http://example/com')
>>> print(g)
@prefix ns1: <urn:default:baseUri:#> .
@prefix ns2: <http://taxref.mnhn.fr/lod/property/> .
@prefix ns3: <http://www.w3.org/2004/02/skos/core#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
ns1:bus ns2:isSynonymOf ns1:car,
ns1:root_word_uri ;
ns3:prefLabel "bus" ;
ns1:comesFrom <http://example/com> ;
ns1:depth 1,
2 .
ns1:root_word_uri a ns1:root_word ;
ns3:prefLabel "car" .
>>> g.delete_several_depth()
>>> print(g)
@prefix ns1: <urn:default:baseUri:#> .
@prefix ns2: <http://taxref.mnhn.fr/lod/property/> .
@prefix ns3: <http://www.w3.org/2004/02/skos/core#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
ns1:bus ns2:isSynonymOf ns1:car,
ns1:root_word_uri ;
ns3:prefLabel "bus" ;
ns1:comesFrom <http://example/com> ;
ns1:depth 1 .
ns1:root_word_uri a ns1:root_word ;
ns3:prefLabel "car" .
"""
# TODO should be implemented using one sparql query
q_words = """SELECT ?uri ( COUNT(?depth) AS ?c )
WHERE {?uri <urn:default:baseUri:#depth> ?depth}
GROUP BY ?uri
ORDER BY ASC (?uri)"""
for uri, depth in self.query(q_words):
if int(depth) < 2:
# skipping the uri that do not have several
# depth properties
continue
q_d = (
"SELECT (MIN(?o) AS ?int) WHERE { <"
+ str(uri)
+ """> <urn:default:baseUri:#depth> ?o } """
)
cur_dep = [int(dep) for dep, in self.query(q_d)][0]
q_all_depth = (
"SELECT ?unwanted_depth WHERE { <"
+ str(uri)
+ "> <urn:default:baseUri:#depth> ?unwanted_depth }"
)
for (unwanted_tripple,) in self.query(q_all_depth):
if int(unwanted_tripple) == cur_dep:
continue
self.remove(
(uri, self.base_local.depth, rdflib.Literal(int(unwanted_tripple)))
)
def _get_maximum_origin(self) -> int:
"""return the number maximum of <comesFrom>
predicate for a graph
:return: number
:rtype: int
>>> print(g)
ns1:article ns3:isSynonymOf ns1:paper,
ns1:piece ;
ns2:prefLabel "article" ;
ns4:hypernymOf ns1:paper ;
ns4:hyponymOf ns1:section ;
ns1:comesFrom <file:///lexicons_builder/synonyms.com>,
<file:///lexicons_builder/synonyms.reverso.net>,
<http://wordnet-rdf.princeton.edu/> ;
ns1:depth 2 ;
ns1:synsetLink <http://wordnet-rdf.princeton.edu/pwn30/06269956-n>,
<http://wordnet-rdf.princeton.edu/pwn30/06392001-n> .
>>> g._get_maximum_origin()
3
"""
query_max = """
SELECT (COUNT(?origin) as ?oCount)
WHERE
{
?uri ?origin ?_ .
FILTER (strEnds(str(?origin), 'comesFrom'))
}
GROUP BY ?uri
"""
max_ = 0
for (count,) in self.query(query_max):
if int(count) > max_:
max_ = int(count)
return max_
def pop_non_relevant_words(self):
"""Delete from the graph the words might not be relevant.
To do this, the method will search for the highest number
of `<urn:default:baseUri:#comesFrom>` predicates per word
and remove from the graph all words whose number of `<urn:default:baseUri:#comesFrom>`
predicates are lower than the maximum found before.
.. code:: python
>>> # the graph was constructed using the words "book" and "newspaper"
>>> # searching on different resources (wordnet and synonyms dictionaries)
>>> len(g)
3904
>>> g.to_list()
['(catholic) douay bible', '(mohammedan) koran', '78', 'AFISR', 'AI', 'ARDA', 'Apocrypha', 'Aramaic', 'Aramaic_script' ...
>>> # most of the word are not relevant
>>> g.pop_non_relevant_words()
>>> len(g)
106
>>> g.to_list()
['account', 'allow', 'arrange', 'article', 'assign', 'authorisation', 'batch', 'book', 'booklet', 'brochure', 'cahier', 'capture', 'card', 'classify', 'collection', ...
>>> # much more relevant words
"""
max_ = self._get_maximum_origin()
query_number_of_origins = """
SELECT ?uri ?word (COUNT(?origin) as ?oCount)
WHERE {
?uri <http://www.w3.org/2004/02/skos/core#prefLabel> ?word ;
<urn:default:baseUri:#comesFrom> ?origin
}
GROUP BY ?uri
"""
for uri, word, count in self.query(query_number_of_origins):
if int(count) < max_ - 1:
self.remove((uri, None, None))
def to_list(self) -> list:
"""return a list of all the prefLabels in the graph
>>> g = Graph()
>>> g.add_root_word('car')
>>> g.add_word('bus', 1, 'synonym', 'car', comesFrom='http://example/com')
>>> g.add_word('truck', 1, 'synonym', 'car', comesFrom='http://example/com')
>>> g.add_word('vehicle', 1, 'synonym', 'car', comesFrom='http://example/com')
>>> g.to_list()
['bus', 'car', 'truck', 'vehicle']
"""
q_words = "SELECT ?word WHERE { ?_ <http://www.w3.org/2004/02/skos/core#prefLabel> ?word} ORDER BY ASC (?word)"
return [str(w) for w, in self.query(q_words)]
# note that even that's less elegant, python's sorted function
# works faster than sparql engine's ORDER BY
# q_words = "SELECT ?word WHERE { ?_ <http://www.w3.org/2004/02/skos/core#prefLabel> ?word}"
# return sorted([str(w) for w, in self.query(q_words)])
def to_str(self) -> str:
"""return a string containing the serialized graph in the turtle format
Note that during the serialization, some items might get a file:///
string in their properties, it means the main graph has been merged
from different graph files
>>> g = Graph()
>>> g.add_root_word('dog')
>>> str(g)
'@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .\\n\\n<urn:default:baseUri:#root_word_uri> a <urn:default:baseUri:#root_word> ;\\n ns1:prefLabel "dog" .\\n\\n'
"""
str_ = self.serialize(format="ttl").decode()
return str_
def to_text_file(self, out_file=None):
"""write the graph to the path provided.
Args:
out_file (str, optional): The outfile path. If None, returns the string
Example of file:
.. code:: python
book # the root word
Bible # a 1st rank synonym, linked to 'book'
Holy_Writ # a 2nd rank synonym, linked to 'Bible'
Scripture # a 2nd rank synonym, linked to 'Bible'
Word # a 2nd rank synonym, linked to 'Bible'
Epistle # a 1st rank synonym, linked to 'book'
letter # a 2nd rank synonym, linked to 'Epistle'
missive # a 2nd rank synonym, linked to 'Epistle'
"""
touch(out_file) # None can be touch ! ??
def rec_search(uri, str_=None, dep=None, uri_used=[]):
q_words = (
"""SELECT ?uri ?pref ?dep WHERE {
?uri <http://www.w3.org/2004/02/skos/core#prefLabel> ?pref ;
<urn:default:baseUri:#depth> ?dep .
?uri ?relation <"""
+ uri
+ "> } ORDER BY ASC (?pref) "
)
if not str_:
str_ = ""
res = [r for r in self.query(q_words)]
for new_uri, word, dep in res:
new_uri = str(new_uri)
word = str(word)
dep = int(dep)
assert type(dep) == int
assert type(word) == type(new_uri) == str
if new_uri in uri_used:
continue
uri_used.append(new_uri)
str_ += "\t" * dep + word + "\n"
str_ = rec_search(new_uri, str_, dep, uri_used=uri_used)
return str_
if not hasattr(self, "root_words") or not getattr(self, "root_words"):
self._set_root_word_attribute()
text = rec_search(self.root_word_uri, "\n".join(self.root_words) + "\n")
if out_file:
with open(out_file, "w") as f:
print(text, file=f)
else:
return text
logging.info(f"out file is: '{out_file}'")
def to_xlsx_file(self, out_file: str):
"""Save the graph to an excel file
Args:
out_file (str): The outfile path
"""
self._set_root_word_attribute()
workbook = xlsxwriter.Workbook(out_file)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "root word(s)")
worksheet.write(0, 1, ", ".join(self.root_words))
q_words_depth = """SELECT ?word ?depth
WHERE { ?_ <http://www.w3.org/2004/02/skos/core#prefLabel> ?word ;
<urn:default:baseUri:#depth> ?depth ;
}
ORDER BY ASC (?word)"""
for i, (word, depth,) in enumerate(
self.query(q_words_depth), start=2
): # origin
worksheet.write(i, 0, word)
worksheet.write(i, 1, depth)
# worksheet.write(i, 2, origin)
workbook.close()
logging.info(f"out file is: '{out_file}'")
if __name__ == "__main__":
pass
|
py
|
1a5a39f0f129eb6a28a7f7ba45888da1763ed82b
|
m = int(input())
scores = list(map(int,input().split()))
scores = sorted(set(scores),reverse = True)
m=len(scores)
n = int(input())
alice = list(map(int,input().split()))
for score in alice:
if score >= scores[0] :
print (1)
elif score == scores[-1] :
print (m)
elif score < scores[-1] :
print (m+1)
else :
b=0
e=m-1
while(b<=e) :
mid=(b+e)//2
if(scores[mid] == score):
print (mid+1)
break
elif(scores[mid] > score):
if(scores[mid+1] < score):
print (mid+2)
break
b = mid + 1
else:
if(scores[mid-1] > score):
print (mid+1)
break
e = mid - 1
|
py
|
1a5a3af4eeda98a4de2258dcc34c52610d642de3
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-04-18 12:50
from __future__ import unicode_literals
from functools import partial
from django.db import migrations, models
LANG_FORWARD_MAP = {
'ENG': 'ENEN',
'CHI': 'ZHZH',
}
LANG_REVERSE_MAP = {
'ENEN': 'ENG',
'ZHZH': 'CHI',
'ZHEN': 'CHI',
'TAI': 'CHI',
}
def lang_map_func(apps, schema_editor, lang_mapping=None):
TalkProposal = apps.get_model('proposals', 'TalkProposal')
TutorialProposal = apps.get_model('proposals', 'TutorialProposal')
db_alias = schema_editor.connection.alias
for Proposal in [TalkProposal, TutorialProposal]:
for old_lang, new_lang in lang_mapping.items():
Proposal.objects.using(db_alias).filter(
language=old_lang,
).update(
language=new_lang,
)
class Migration(migrations.Migration):
dependencies = [
('proposals', '0024_auto_20160307_0412'),
]
operations = [
migrations.AlterField(
model_name='talkproposal',
name='language',
field=models.CharField(choices=[('ENEN', 'English talk'), ('ZHEN', 'Chinese talk w. English slides'), ('ZHZH', 'Chinese talk w. Chinese slides'), ('TAI', 'Taiwanese Hokkien')], max_length=4, verbose_name='language'),
),
migrations.AlterField(
model_name='tutorialproposal',
name='language',
field=models.CharField(choices=[('ENEN', 'English talk'), ('ZHEN', 'Chinese talk w. English slides'), ('ZHZH', 'Chinese talk w. Chinese slides'), ('TAI', 'Taiwanese Hokkien')], max_length=4, verbose_name='language'),
),
migrations.RunPython(
partial(lang_map_func, lang_mapping=LANG_FORWARD_MAP),
partial(lang_map_func, lang_mapping=LANG_REVERSE_MAP),
),
]
|
py
|
1a5a3b104e6f23613113ccea5abad041491f76df
|
def isHappy(self, n: int) -> bool:
square_list = []
while True :
sq = 0
while n :
rem = n % 10
n = n // 10
sq += rem ** 2
if sq == 1:
return True
if sq in square_list:
return False
square_list.append(sq)
n = sq
|
py
|
1a5a3cca647710e91b1e44a3acc419b39470d27f
|
import multiprocessing
import os
import subprocess
import traceback
from itertools import product
import numpy as np
import seaborn
import torch
from matplotlib import pyplot as plt
seaborn.set()
SMALL_SIZE = 18
MEDIUM_SIZE = 22
BIGGER_SIZE = 26
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def get_gpu_memory():
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.free',
'--format=csv,nounits,noheader'
])
gpu_memory = [int(x) for x in result.decode().strip().split()]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
LOADER_WORKERS = 4
# PIN_MEMORY = True
PIN_MEMORY = False
device = None
def get_device():
global device
if device is None:
print(f'{multiprocessing.cpu_count()} CPUs')
print(f'{torch.cuda.device_count()} GPUs')
if torch.cuda.is_available():
device = 'cuda:0'
# torch.set_default_tensor_type(torch.cuda.FloatTensor)
for k, v in get_gpu_memory().items():
print(f'Device {k} memory: {v} MiB')
torch.backends.cudnn.benchmark = True
else:
# torch.set_default_tensor_type(torch.FloatTensor)
device = 'cpu'
print(f'Using: {device}')
return device
def loader(data, batch_size):
return torch.utils.data.DataLoader(dataset=data, batch_size=batch_size,
shuffle=True,
pin_memory=PIN_MEMORY,
num_workers=LOADER_WORKERS)
def load_or_run(dir_name, run_name, method, *args, **kwargs):
os.makedirs(dir_name, exist_ok=True)
filepath = os.path.join(dir_name, f'{run_name}@state')
print(f'State file: {filepath}')
loaded = False
if os.path.isfile(filepath):
try:
with open(filepath, 'rb') as f:
context = torch.load(f, map_location=get_device())
loaded = True
except Exception:
print(f'Exception when loading {filepath}')
traceback.print_exc()
if not loaded:
context = {}
context['model_state'] = None
context['run_name'] = run_name
context['dir_name'] = dir_name
# TODO maybe move arguments into context?
context, ex = method(context, *args, **kwargs)
if ex is not None:
raise ex
if 'exception' in context:
print(context['traceback'])
return context
def load_or_run_n(n, dir_name, run_name, method, *args, **kwargs):
results = []
for i in range(n):
name = f'{run_name}_{i}'
results.append(load_or_run(dir_name, name, method, *args, **kwargs))
return results
def matrix_to_figure(matrix, xlabel="", ylabel=""):
matrix = matrix.cpu().numpy()
fig, ax = plt.subplots(figsize=(16, 16), facecolor='w', edgecolor='k')
ax.imshow(matrix, cmap='Spectral_r', vmin=-1, vmax=1)
# set x axis
ax.set_xticks(np.arange(matrix.shape[1]))
ax.set_xticklabels([str(i) for i in np.arange(matrix.shape[1])], fontsize=18)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(xlabel)
# set y axis
ax.set_yticks(np.arange(matrix.shape[0]))
ax.set_yticklabels([str(i) for i in np.arange(matrix.shape[0])], fontsize=18)
ax.yaxis.set_label_position('left')
ax.yaxis.tick_left()
ax.set_ylabel(ylabel)
# plot text
for i, j in product(range(matrix.shape[0]), range(matrix.shape[1])):
ax.text(j, i, f'{matrix[i, j]:4.2f}' if matrix[i, j] != 0 else '.', horizontalalignment='center', fontsize=14,
verticalalignment='center', color='black')
ax.autoscale()
fig.set_tight_layout(True)
return fig
def cs_vec_to_figure(cs_vec, xlabel=""):
cs_vec = cs_vec.cpu().numpy()
fig, ax = plt.subplots(figsize=(22, 2), facecolor='w', edgecolor='k')
ax.imshow(cs_vec.reshape(1, -1), cmap='Spectral_r', vmin=-1, vmax=1)
ax.set_xticks(np.arange(cs_vec.shape[0]))
ax.set_xticklabels([str(i) for i in np.arange(cs_vec.shape[0])], fontsize=18)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(xlabel)
ax.set_yticks([])
for idx in range(len(cs_vec)):
ax.text(idx, 0, f'{cs_vec[idx]:4.2f}' if cs_vec[idx] != 0 else '.', horizontalalignment='center', fontsize=14,
verticalalignment='center', color='black')
ax.autoscale()
fig.set_tight_layout(True)
return fig
|
py
|
1a5a3d7112cc7deb1742b1774162b4f13d764126
|
"""
WSGI config for victory project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "victory.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
py
|
1a5a3d98a227f9b1e5d45a60a3115da10b9a5e83
|
"""HTTP utilities."""
from ipaddress import ip_address
from .const import (
KEY_REAL_IP, KEY_USE_X_FORWARDED_FOR, HTTP_HEADER_X_FORWARDED_FOR)
def get_real_ip(request):
"""Get IP address of client."""
if KEY_REAL_IP in request:
return request[KEY_REAL_IP]
if (request.app[KEY_USE_X_FORWARDED_FOR] and
HTTP_HEADER_X_FORWARDED_FOR in request.headers):
request[KEY_REAL_IP] = ip_address(
request.headers.get(HTTP_HEADER_X_FORWARDED_FOR).split(',')[0])
else:
peername = request.transport.get_extra_info('peername')
if peername:
request[KEY_REAL_IP] = ip_address(peername[0])
else:
request[KEY_REAL_IP] = None
return request[KEY_REAL_IP]
|
py
|
1a5a3f1427e15f4726a29323c44c6af887e3045d
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 14:19:36 2021
@author: bressler
"""
from PICOcode.REFPROP.SeitzModel import SeitzModel
import numpy as np
import matplotlib.pyplot as plt
from baxterelectronrecoilmodel import BTM
with open('/coupp/data/home/coupp/users/bressler/output/argonspikeoutput.txt','r') as argonfile:
d = argonfile.readlines()
data = {}
for line in d:
elements = line.split()
#print([float(e) for e in elements])
if int(elements[1]) == 0:
data["Background 20C %.1f ppm %.1f psia"%(float(elements[0]), float(elements[3]))] = [float(elements[i]) for i in range(len(elements))]
elif int(elements[1]) == 137:
data["Cs-137 20 position %d %.1f ppm %.1f psia"%(int(elements[2]), float(elements[0]), float(elements[3]))] = [float(elements[i]) for i in range(len(elements))]
elif int(elements[1]) == 244:
data["Cm-244 20 %.1f ppm %.1f psia"%(float(elements[0]), float(elements[3]))] = [float(elements[i]) for i in range(len(elements))]
subtractedData = {}
subtractedargonfile = open('/coupp/data/home/coupp/users/bressler/output/subtractedargonoutput.txt','w')
for k in data.keys():
datum = data[k]
#print(datum)
if datum[1] != 0.0:
associatedbg = data["Background 20C %.1f ppm %.1f psia"%(float(datum[0]), float(datum[3]))]
bgsubrate = datum[7] - associatedbg[7]
bgsubrateerror = np.sqrt(datum[8]**2 + associatedbg[8]**2)
#print(bgsubrate)
[Qseitz, Eion, f_ion, P_ion, f_seitz, P_contaminated]= BTM(datum[4]-1.3, datum[5], 'r218')
print("P=%.2f"%datum[4])
print("T=%.2f"%datum[5])
print("Qseitz=%.3f"%Qseitz)
subtractedData[k] = [datum[0], datum[1], datum[2], datum[3], datum[4], datum[5], Qseitz, f_seitz, bgsubrate, bgsubrateerror]
subtractedargonfile.write(
'%.1f %d %d %.1f %.1f %.1f %.2f %.2f %.2f %.2f\n'%(datum[0],
datum[1], datum[2], datum[3], datum[4]-1.3, datum[5], Qseitz, f_seitz, bgsubrate, bgsubrateerror))
subtractedargonfile.close()
|
py
|
1a5a403a3414e8568ad282908e4a39010e5310e7
|
'''
Created on Apr 4, 2022
@author: mballance
'''
import dataclasses
from rctgen.impl.ctor import Ctor
from rctgen.impl.type_info import TypeInfo
from rctgen.impl.type_kind_e import TypeKindE
from rctgen.impl.exec_group import ExecGroup
from rctgen.impl.rand_t import RandT
from rctgen.impl.scalar_t import ScalarT
from libvsc import core as vsc
from rctgen.impl.pool_t import PoolT
from rctgen.impl.struct_kind_e import StructKindE
from rctgen.impl.lock_share_t import LockShareT
class DecoratorImplBase(object):
def __init__(self, kind):
self._kind = kind
self._supports_constraints = True
def populate_execs(self, ti : TypeInfo, supported_s):
return None
def __call__(self, T):
ctor = Ctor.inst()
Tp = dataclasses.dataclass(T, init=False)
ds_t = self._mkLibDataType(T, T.__qualname__, ctor.ctxt())
ti = self._mkTypeInfo(self._kind)
setattr(T, "_typeinfo", ti)
ti.lib_obj = ds_t
self._populateFields(ti, Tp)
#************************************************************
#* Populate constraints from this type and base types
#************************************************************
constraints = Ctor.inst().pop_constraint_decl()
constraint_s = set()
for c in constraints:
constraint_s.add(c._name)
ti._constraint_l.append(c)
for b in T.__bases__:
if hasattr(b, "_typeinfo"):
self._populateConstraints(
ti,
b,
constraint_s)
#************************************************************
#* Populate exec blocks from this type and base types
#************************************************************
execs = Ctor.inst().pop_exec_types()
for e in execs:
print("Exec: %s" % str(e.kind))
if not self._validateExec(e.kind):
raise Exception("Unsupported exec kind %s" % str(e.kind))
if e.kind not in ti._exec_m.keys():
ti._exec_m[e.kind] = ExecGroup(e.kind)
ti._exec_m[e.kind].add_exec(e)
for b in T.__bases__:
if hasattr(b, "_typeinfo"):
self._populateExecs(
ti,
b)
return Tp
def _validateExec(self, kind):
return True
def _validateField(self, name, type, is_rand):
return True
def _mkTypeInfo(self, kind : TypeKindE):
return TypeInfo(kind)
def _mkLibDataType(self, T, name, ctxt):
raise NotImplementedError("_mkLibDataType not implemented for %s" % str(type(self)))
def _populateFields(self, ti : TypeInfo, T):
for f in dataclasses.fields(T):
attr = vsc.ModelFieldFlag.NoFlags
is_rand = False
iv=0
t = f.type
if issubclass(t, RandT):
t = t.T
attr |= vsc.ModelFieldFlag.DeclRand
is_rand = True
ctor = Ctor.inst()
print("f: %s" % str(f))
# The signature of a creation function is:
# - name
# - is_rand
# - idx
if issubclass(t, ScalarT):
self._processFieldScalar(ti, f, attr, t)
elif issubclass(t, PoolT):
self._processFieldPool(ti, f, attr, t)
elif issubclass(t, LockShareT):
print("LockShare!")
self._processFieldLockShare(ti, f, attr, t)
elif hasattr(t, "_typeinfo") and isinstance(t._typeinfo, TypeInfo):
# This is a field of user-defined type
print("Has TypeInfo")
field_t = ctor.ctxt().mkTypeField(
f.name,
t._typeinfo.lib_obj,
attr,
None)
ti.lib_obj.addField(field_t)
ti._field_ctor_l.append((f.name, lambda name, t=t: t._createInst(t, name)))
print("Field: %s" % str(f))
pass
def _processFieldLockShare(self, ti, f, attr, t):
ctor = Ctor.inst()
if hasattr(t.T, "_typeinfo"):
print("Kind: %s" % str(t.T._typeinfo._kind))
claim_t = t.T._typeinfo.lib_obj
else:
raise Exception("Type %s is not a PyRctGen type" % t.T.__qualname__)
if f.default is not dataclasses.MISSING:
print("default: %s" % str(f.default))
raise Exception("Lock/Share fields cannot be assigned a value")
field_t = ctor.ctxt().mkTypeFieldClaim(
f.name,
claim_t,
t.IsLock)
ti.lib_obj.addField(field_t)
ti._field_ctor_l.append((f.name, t.createField))
pass
def _processFieldPool(self, ti, f, attr, t):
ctor = Ctor.inst()
decl_size = -1
pool_t = None
if hasattr(t.T, "_typeinfo"):
print("Kind: %s" % str(t.T._typeinfo._kind))
pool_t = t.T._typeinfo.lib_obj
else:
raise Exception("Type %s is not a PyRctGen type" % t.T.__qualname__)
if f.default is not dataclasses.MISSING:
if t.T._typeinfo._kind != StructKindE.Resource:
raise Exception("Only resource pools may be given a size. Pool %s is of kind %s" % (
f.name, t.T._typeinfo._kind))
decl_size = int(f.default)
field_t = ctor.ctxt().mkTypeFieldPool(
f.name,
pool_t,
attr,
decl_size)
ti.lib_obj.addField(field_t)
ti._field_ctor_l.append((f.name, t.createField))
def _processFieldScalar(self, ti, f, attr, t):
ctor = Ctor.inst()
lt = ctor.ctxt().findDataTypeInt(t.S, t.W)
if lt is None:
lt = ctor.ctxt().mkDataTypeInt(t.S, t.W)
ctor.ctxt().addDataTypeInt(lt)
iv_m = None
if f.default is not dataclasses.MISSING:
iv_m = ctor.ctxt().mkModelVal()
iv_m.setBits(t.W)
if t.S:
iv_m.set_val_i(int(f.default))
else:
iv_m.set_val_u(int(f.default))
field_t = ctor.ctxt().mkTypeField(
f.name,
lt,
attr,
iv_m)
ti.lib_obj.addField(field_t)
ti._field_ctor_l.append((f.name, t.createField))
def _populateExecs(self, ti, T):
T_ti = T._typeinfo
for kind in T_ti._exec_m.keys():
# If the target type hasn't registered an exec of this kind,
# but a base type has, then link that up
if kind not in ti._exec_m.keys():
ti._exec_m[kind] = T_ti.exec_m[kind]
elif ti._exec_m[kind].super is None:
# Link the first available super-type exec to the
# 'super' link
ti._exec_m[kind].super = T_ti.exec_m[kind]
# Now, continue working back through the inheritance hierarchy
for b in T.__bases__:
if hasattr(b, "_typeinfo"):
self._populateExecs(
ti,
b)
def _populateConstraints(self, ti, T, name_s):
T_ti = T._typeinfo
for c in T_ti.constraint_l:
if c.name not in name_s:
name_s.add(c.name)
ti.constraint_l.append(c)
for b in T.__bases__:
if hasattr(b, "_typeinfo"):
self._populateConstraints(
ti,
b,
name_s)
|
py
|
1a5a407bb73a9619ace9560821c162f13f77d413
|
from .base import BaseZRP
from .acs_mapper import ACSModelPrep
from .geo_geocoder import ZGeo
from .preprocessing import ProcessStrings, ProcessGeo, ProcessACS
from .prepare import ZRP_Prepare
__all__ = ['BaseZRP','ZRP_Prepare', 'ProcessStrings', 'ProcessGeo', 'ProcessACS', 'ACSModelPrep' ]
|
py
|
1a5a40aedfe2b697ad129fd263f09acc73b538ad
|
"""Auto-generated file, do not edit by hand. NR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NR = PhoneMetadata(id='NR', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='11[0-2]', example_number='110', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1[0-2]|23|92)', example_number='110', possible_length=(3,)),
short_data=True)
|
py
|
1a5a40e5248f98d06d5716161cb6611d650da895
|
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class NotificationControllerBase(object):
"""Top-level class for controllers.
:param driver: Instance of the driver
instantiating this controller.
"""
def __init__(self, driver):
self._driver = driver
@property
def driver(self):
return self._driver
|
py
|
1a5a412b6911af23b7aa41743f19698c00c24f29
|
import os
import sys
import matplotlib.pyplot as plt
#name of the file, full system performance
def save(plt_local):
figure_name, ext = os.path.splitext(sys.argv[0])
plt_local.savefig(figure_name+'.pdf')
plt_local.savefig(figure_name+'.png')
def div_thousand (list):
return [val /1000.0 for val in list]
|
py
|
1a5a41740da00ebd28c9c8697bc3b59a2860db41
|
import unittest
from cybercaptain.processing.filter import processing_filter
class ProcessingFilterEQTest(unittest.TestCase):
"""
Test the filters for EQ
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
arguments = {'src': '.',
'filterby': 'EQ',
'rule': 'EQ 500', # the content must end on two as'.
'target': '.'}
self.processing = processing_filter(**arguments)
def test_eq_positive(self):
"""
Test if the filter passes EQ correctly.
"""
# border line test
self.assertTrue(self.processing.filter({"EQ":500}), 'should not be filtered')
def test_eq_negative(self):
"""
Test if the filter fails EQ correctly.
"""
# border line test
self.assertFalse(self.processing.filter({"EQ":501}), 'should be filtered')
self.assertFalse(self.processing.filter({"EQ":499}), 'should be filtered')
# deep test
self.assertFalse(self.processing.filter({"EQ":600}), 'should be filtered')
self.assertFalse(self.processing.filter({"EQ":400}), 'should be filtered')
|
py
|
1a5a4294ee4ed21724cde2666ac4d6fe68e48c53
|
"""empty message
Revision ID: fdf96fced099
Revises: 4bf2b3ccc928
Create Date: 2021-01-22 12:06:22.413050
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'fdf96fced099'
down_revision = '4bf2b3ccc928'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('dataset_generation_job', sa.Column('generator_type', sa.String(), nullable=True))
op.execute('UPDATE dataset_generation_job SET generator_type = \'MPCI\'')
op.alter_column('dataset_generation_job', 'generator_type', nullable=False)
op.add_column('dataset_generation_job', sa.Column('parameters', sa.JSON(), nullable=True))
op.execute('UPDATE dataset_generation_job SET parameters = \'{}\'')
op.alter_column('dataset_generation_job', 'parameters', nullable=False)
op.drop_column('dataset_generation_job', 'edgeValueLowerBound')
op.drop_column('dataset_generation_job', 'edgeValueUpperBound')
op.drop_column('dataset_generation_job', 'nodes')
op.drop_column('dataset_generation_job', 'samples')
op.drop_column('dataset_generation_job', 'edgeProbability')
def downgrade():
op.add_column('dataset_generation_job', sa.Column('edgeProbability', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('dataset_generation_job', sa.Column('samples', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('dataset_generation_job', sa.Column('nodes', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('dataset_generation_job', sa.Column('edgeValueUpperBound', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('dataset_generation_job', sa.Column('edgeValueLowerBound', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.execute('UPDATE dataset_generation_job SET "edgeProbability" = -1, samples = -1, nodes = -1, "edgeValueUpperBound" = -1, "edgeValueLowerBound" = -1')
op.alter_column('dataset_generation_job', 'edgeProbability', nullable=False)
op.alter_column('dataset_generation_job', 'samples', nullable=False)
op.alter_column('dataset_generation_job', 'nodes', nullable=False)
op.alter_column('dataset_generation_job', 'edgeValueUpperBound', nullable=False)
op.alter_column('dataset_generation_job', 'edgeValueLowerBound', nullable=False)
op.drop_column('dataset_generation_job', 'parameters')
op.drop_column('dataset_generation_job', 'generator_type')
|
py
|
1a5a42a9434aa45c3989313b027906079be2ea35
|
"""
@author: Maneesh D
@date: 5/26/2017
@intepreter: Python 3.6
Interactive Quiz Application
"""
from random import randint, choices
from sys import exit
def addition(difficulty, questions):
if difficulty == "e":
while questions:
try:
a = randint(0, 30)
b = randint(0, 10)
answer = int(input("What's %d + %d?\nAnswer: " % (a, b)))
if answer == (a + b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
elif difficulty == "i":
while questions:
try:
a = randint(30, 100)
b = randint(10, 50)
answer = int(input("What's %d + %d?\nAnswer: " % (a, b)))
if answer == (a + b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
else:
while questions:
try:
primes = [x for x in range(100, 1000) if all(x % y != 0 for y in range(2, int(x**0.5)+1))]
nums = choices(primes, k=2)
a = nums[0]
b = nums[1]
answer = int(input("What's %d + %d?\nAnswer: " % (a, b)))
if answer == (a + b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
return 0
def substration(difficulty, questions):
if difficulty == "e":
while questions:
try:
a = randint(0, 30)
b = randint(0, 10)
answer = int(input("What's %d - %d?\nAnswer: " % (a, b)))
if answer == (a - b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
elif difficulty == "i":
while questions:
try:
a = randint(30, 100)
b = randint(10, 50)
answer = int(input("What's %d - %d?\nAnswer: " % (a, b)))
if answer == (a - b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
else:
while questions:
try:
primes = [x for x in range(100, 1000) if all(x % y != 0 for y in range(2, int(x**0.5)+1))]
nums = choices(primes, k=2)
a = nums[0]
b = nums[1]
answer = int(input("What's %d - %d?\nAnswer: " % (a, b)))
if answer == (a - b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
return 0
def multiplication(difficulty, questions):
if difficulty == "e":
while questions:
try:
a = randint(0, 30)
b = randint(0, 20)
answer = int(input("What's %d * %d?\nAnswer: " % (a, b)))
if answer == (a * b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
elif difficulty == "i":
while questions:
try:
a = randint(30, 100)
b = randint(10, 50)
answer = int(input("What's %d * %d?\nAnswer: " % (a, b)))
if answer == (a * b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
else:
while questions:
try:
primes = [x for x in range(100, 1000) if all(x % y != 0 for y in range(2, int(x**0.5)+1))]
nums = choices(primes, k=2)
a = nums[0]
b = nums[1]
answer = int(input("What's %d * %d?\nAnswer: " % (a, b)))
if answer == (a * b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
return 0
def division(difficulty, questions):
if difficulty == "e":
while questions:
try:
a = randint(0, 30)
b = randint(0, 10)
answer = int(input("What's %d/%d?\nAnswer: " % (a, b)))
if answer == (a / b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
elif difficulty == "i":
while questions:
try:
a = randint(30, 100)
b = randint(10, 50)
answer = int(input("What's %d/%d?\nAnswer: " % (a, b)))
if answer == (a / b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
else:
while questions:
try:
primes = [x for x in range(100, 1000) if all(x % y != 0 for y in range(2, int(x**0.5)+1))]
nums = choices(primes, k=2)
a = nums[0]
b = nums[1]
answer = int(input("What's %d/%d?\nAnswer: " % (a, b)))
if answer == (a / b):
print("Correct :-)\n")
else:
print("Wrong :-(\n")
questions -= 1
except TypeError:
print("Trying to be too smart???\n")
questions -= 1
continue
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
return 0
def main():
try:
difficulty = input("Choose level (easy:E, intermediate:I, and hard:H): ").lower()
if difficulty not in ("easy", "intermediate", "hard", "e", "i", "h"):
print("!!! Please enter a valid choice !!!")
exit(1)
questions = int(input("Please give us the number of question you want to attempt(max=10): "))
if questions < 0 or questions > 10:
print("!!! Enter a number between 1 and 10 !!!")
exit(1)
elif questions == 0:
print("Wow genius!!! You entered zero!!!")
exit(1)
qtype = input("Specify the question type "
"(multiplication:M, addition:A, subtraction:S, division:D): ").lower()
if qtype not in ("m", "a", "s", "d"):
print("!!! Please enter a valid choice !!!")
exit(1)
while True:
if questions:
if qtype == "a":
questions = addition(difficulty, questions)
elif qtype == "s":
questions = substration(difficulty, questions)
elif qtype == "m":
questions = multiplication(difficulty, questions)
else:
questions = division(difficulty, questions)
else:
cont = input("Continue or exit (Continue:C, Exit: E): ").lower()
if cont not in ("c", "e"):
print("!!! Invalid Choice !!!")
continue
if cont == "c":
questions = 1
else:
print("\n<------------ THANK YOU ------------>")
exit(0)
except KeyboardInterrupt:
print("\n!!! USER CANCELLATION !!!")
exit(1)
except Exception as e:
print("Encountered Exception: %s" % e)
if __name__ == '__main__':
main()
|
py
|
1a5a43faf903da1fd0bef8d32eadc2df78576016
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/score_matching_swiss_roll.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="eTpkSxV-Nwq3"
# Fit score-based generative model to 2d swiss roll data.
#
# Code is taken from
# https://jax.readthedocs.io/en/latest/notebooks/score_matching.html
#
# Notebook author: Denis Mazur, edited by Just Heuristic
#
# + id="mm7ZX-zYNwNe"
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_swiss_roll
import jax
import jax.numpy as jnp
from jax.experimental import optimizers
from jax.experimental import stax
from functools import partial
from IPython.display import clear_output
# + [markdown] id="jRbkmuINOa68"
# # Dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="hKCWtx6bN6WH" outputId="0b76611f-0bb0-4ab8-8199-bc73bbe90dda"
def sample_batch(size, noise=1.0):
x, _= make_swiss_roll(size, noise=noise)
x = x[:, [0, 2]] / 10.0
return np.array(x)
plt.figure(figsize=[16, 16])
plt.scatter(*sample_batch(10**4).T, alpha=0.1)
plt.axis('off')
plt.tight_layout()
plt.savefig('swiss_roll.png')
# + [markdown] id="wKYFKsbmOmu_"
# # Fit score function
# + id="mjWY78zpOcke"
# Set up network to predict scores
net_init, net_apply = stax.serial(
stax.Dense(128), stax.Softplus,
stax.Dense(128), stax.Softplus,
stax.Dense(2),
)
# Create optimizer. Note that both network and optimizer returns pure (stateless) functions
opt_init, opt_update, get_params = optimizers.adam(1e-3)
# + id="KRvf5xVDOsBB"
@jax.jit
def compute_loss(net_params, inputs):
# a function that computes jacobian by forward mode differentiation
jacobian = jax.jacfwd(net_apply, argnums=-1)
# we use jax.vmap to vectorize jacobian function along batch dimension
batch_jacobian = jax.vmap(partial(jacobian, net_params))(inputs) # [batch, dim, dim]
trace_jacobian = jnp.trace(batch_jacobian, axis1=1, axis2=2)
output_norm_sq = jnp.square(net_apply(net_params, inputs)).sum(axis=1)
return jnp.mean(trace_jacobian + 1/2 * output_norm_sq)
@jax.jit
def train_step(step_i, opt_state, batch, key):
net_params = get_params(opt_state)
loss = compute_loss(net_params, batch)
grads = jax.grad(compute_loss, argnums=0)(net_params, batch)
return loss, opt_update(step_i, grads, opt_state)
# + id="C61QTY6iTJLb"
def train_loop(key, train_step, nsteps):
key, subkey = jax.random.split(key)
out_shape, net_params = net_init(subkey, input_shape=(-1, 2))
opt_state = opt_init(net_params)
loss_history = []
for i in range(nsteps):
x = sample_batch(size=128)
key, subkey = jax.random.split(key)
loss, opt_state = train_step(i, opt_state, x, subkey)
loss_history.append(loss.item())
if i % 200 == 0:
clear_output(True)
plt.figure(figsize=[16, 8])
plt.subplot(1, 2, 1)
plt.title("mean loss = %.3f" % jnp.mean(jnp.array(loss_history[-32:])))
plt.scatter(jnp.arange(len(loss_history)), loss_history)
plt.grid()
plt.subplot(1, 2, 2)
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 2.0, 50), jnp.linspace(-1.5, 2.0, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
plt.xlim(-1.5, 2.0)
plt.ylim(-1.5, 2.0)
plt.show()
return opt_state
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="R_X-lTClTOuh" outputId="04de5845-32fa-4595-f66f-57f87cb6ef19"
opt_state = train_loop(jax.random.PRNGKey(seed=42), train_step, 10000)
# + id="7kiWmJdUVgP6"
opt_state_basic = opt_state
# + [markdown] id="MDUOCMhiO3RA"
# # Plot gradient field
# + id="KcIsShngW2GM"
opt_state = opt_state_basic
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="DPHCKA-IO2tA" outputId="c674e722-ba78-445c-f5d9-02da1a8b39c2"
plt.figure(figsize=[16, 16])
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
plt.scatter(*sample_batch(10_000).T, alpha=0.1)
plt.axis('off')
plt.tight_layout()
plt.savefig('score_matching_swiss_roll.png')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="lApxtHmTWzoN" outputId="3fb02e72-bbd8-4470-cd7f-1d83b3d3f2b1"
plt.figure(figsize=[16, 16])
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
#plt.scatter(*sample_batch(10_000).T, alpha=0.1)
plt.axis('off')
plt.tight_layout()
plt.savefig('score_matching_swiss_roll_no_data.png')
# + [markdown] id="DSgVc8mxPNiS"
# # Fit using sliced score matching
# + id="KO1FOR6_PPNn"
@jax.jit
def compute_ssm_loss(net_params, inputs, key):
apply = jax.jit(partial(net_apply, net_params))
batch_dot = partial(jnp.einsum, 'bu,bu->b')
# generate random vectors from N(0, I)
v = jax.random.normal(key, shape=inputs.shape)
# predict score and compute jacobian of score times v
score, jac_v = jax.jvp(apply, [inputs], [v])
return jnp.mean(batch_dot(v, jac_v) + 1/2 * batch_dot(v, score) ** 2)
@jax.jit
def train_step(step_i, opt_state, batch, key):
# the new compute_loss is random key dependent, thus we need a new train_step function
net_params = get_params(opt_state)
loss = compute_ssm_loss(net_params, batch, key)
grads = jax.grad(compute_ssm_loss, argnums=0)(net_params, batch, key)
return loss, opt_update(step_i, grads, opt_state)
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="SnN8RKubS3cx" outputId="b03c0751-aa73-4349-ad61-9c26daa37919"
opt_state = train_loop(jax.random.PRNGKey(seed=42), train_step, 10000)
# + id="kENOLLQRVmXQ"
opt_state_sliced = opt_state
# + [markdown] id="PPwZiwI3PfVB"
# # Plot gradient field
# + colab={"base_uri": "https://localhost:8080/", "height": 917} id="cS6WhEMlPWt5" outputId="24e3de0b-1afd-4b30-a14e-385d47582032"
plt.figure(figsize=[16, 16])
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
plt.scatter(*sample_batch(10_000).T, alpha=0.1)
plt.savefig('score_matching_sliced_swiss_roll.pdf', dpi=300)
# + colab={"base_uri": "https://localhost:8080/", "height": 933} id="DRZ3D3CTWg2W" outputId="66669c5e-7ace-4a8d-8dfd-798f2825b61e"
plt.figure(figsize=[16, 16])
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
#plt.scatter(*sample_batch(10_000).T, alpha=0.1)
# + [markdown] id="ePW3Z5SNP91R"
# # Langevin sampling
# + id="WEvDt6HGPhLS"
def sample_langevin(x_initial, *, net_params, key, eps=1e-2, eps_decay=0.9, num_steps=15, temperature=1.0):
""" sample x ~ p(x) by applying approximate Langvenin Dynamics, return a sequence of x_t """
x_t, x_sequence = x_initial, [x_initial]
for t in range(num_steps):
key, subkey = jax.random.split(key)
z_t = jax.random.normal(subkey, shape=x_t.shape)
x_t = x_t + eps / 2 * net_apply(net_params, x_t) + jnp.sqrt(eps) * temperature * z_t
x_sequence.append(x_t)
eps *= eps_decay
return jnp.stack(x_sequence)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="eHj9dTScQDog" outputId="92a5fe41-1a1f-46f5-8077-b86ef03ba5f4"
plt.figure(figsize=[16, 16])
key = jax.random.PRNGKey(42)
net_params = get_params(opt_state)
for x_initial in jnp.array([[-1.5, -1.5], [0, 0], [1.5, 0]]):
key, subkey = jax.random.split(key)
# sample x sequence
xx = sample_langevin(x_initial, key=subkey, net_params=net_params, num_steps=25)
plt.scatter(xx.T[0], xx.T[1], color="blue")
# draw arrows for each mcmc step
deltas = (xx[1:] - xx[:-1])
deltas = deltas - deltas / jnp.linalg.norm(deltas, keepdims=True, axis=-1) * 0.04
for i, arrow in enumerate(deltas):
plt.arrow(xx[i][0], xx[i][1], arrow[0], arrow[1], width=1e-4, head_width=2e-2, color="orange")
# plot data points and gradients
plt.plot()
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
plt.axis('off')
plt.scatter(*sample_batch(10_000).T, alpha=0.025)
plt.tight_layout()
plt.savefig('langevin_swiss_roll.png')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 33} id="6jhP-yF5QD0B" outputId="c821a148-722a-4e97-9206-627b8391ac67"
|
py
|
1a5a441faf8fe2bcba80d573e2fcc811cf4d4ce4
|
import traitlets
import os
class CameraBase(traitlets.HasTraits):
value = traitlets.Any()
@staticmethod
def instance(*args, **kwargs):
raise NotImplementedError
def widget(self):
if hasattr(self, '_widget'):
return self._widget # cache widget, so we don't duplicate links
from ipywidgets import Image
from jetbot.image import bgr8_to_jpeg
image = Image()
traitlets.dlink((self, 'value'), (image, 'value'), transform=bgr8_to_jpeg)
self._widget = image
return image
|
py
|
1a5a44df82201172e362d9cdb48233905a20b998
|
"""
Impute itemized expense amounts to nonitemizers in the puf.csv file.
This is done by using the distribution of itemized expense amounts
among filing units who are itemizers to generate the distribution of
imputed itemized expense amounts among filing units who are
nonitemizers. We do this using a recursive (or sequential) model in
which the imputed values of previously imputed itemized expense
variables are use as explanatory variables. This is done to better
represent the correlations between the several itemized expense
variables. This method is sometimes called sequential regression
multiple imputation. See Raghunathan, et al., "A Multivariate
Technique for Multiply Imputing Missing Values Using a Sequence of
Regression Models" (2001).
Imputing amounts for nonitemizers using statistical models estimated
on itemizers requires that we handle the resulting Heckman sample
selection problem: any statistical model of the itemizers that is used
to impute itemized expense amounts for nonitemizers will over-estimate
the imputed amounts. This problem is handled by using three different
ad hoc procedures to handle the Heckman sample selection problem.
(Numbered comments below contain more detail on these three procedures.)
And one additional procedure is used in this work: we scale the
distribution of each itemized expense variable so that the weighted
count of nonitemizers with a positive imputed amount and the weighted
dollar sum of the imputed amounts approximate those estimated by JCT
in JCX-75-15, "Estimating Changes in the Federal Individual Income
Tax: Description of the Individual Tax Model," April 23, 2015, pages
18-22, as summarized in Table 2 on page 22, which is entitled "Number
of Tax Filing Units and Amounts of Imputed Itemized Deductions for
Non-Itemizers, 2011." (Comments below contain more detail on this
procedure.)
"""
from __future__ import print_function
import numpy as np
import pandas as pd
import statsmodels.api as sm
DUMP0 = False
DUMP1 = False
DUMP2 = False
CALIBRATING = False
def impute(
ievar, logit_prob_af, log_amount_af, exogenous_vars, itemizer_data, nonitemizer_data
):
"""
Function that estimates imputation equations for ievar with itemizer_data
using the list of exogenous variables. The estimated equations are then
used (along with the two additive factors) to impute amounts for ievar
for nonitemizers with the imputed nonitemizer amounts being returned.
"""
if DUMP1:
print("****** IMPUTE {} ******".format(ievar))
# estimate Logit parameters for probability of having a positive amount
logit_y = (itemizer_data[ievar] > 0).astype(int)
logit_x = itemizer_data[exogenous_vars]
logit_res = sm.Logit(logit_y, logit_x).fit(disp=0)
x_b = logit_res.predict(nonitemizer_data[exogenous_vars], linear=True)
exp_x_b = np.exp(x_b + logit_prob_af[ievar])
adj_prob = exp_x_b / (1.0 + exp_x_b)
np.random.seed(int(ievar[1:]))
urn = np.random.uniform(size=len(x_b))
positive_imputed = np.where(urn < adj_prob, True, False)
if DUMP1:
print(logit_res.summary())
print(adj_prob.head())
print(round(positive_imputed.mean(), 4))
print(len(nonitemizer_data))
# estimate OLS parameters for the positive amount using a sample of
# itemizers who have positive ievar amounts that are less than the
# itemizer's standard deduction amount
# (1) This sample limitation is one part of an ad hoc procedure to deal
# with the Heckman sample selection problems present in this imputation
# process.
tpi_data = itemizer_data[
(itemizer_data[ievar] > 0) & (itemizer_data[ievar] < itemizer_data["stdded"])
]
ols_y = np.log(tpi_data[ievar])
ols_x = tpi_data[exogenous_vars]
ols_res = sm.OLS(ols_y, ols_x).fit()
ols_se = np.sqrt(ols_res.scale)
error = np.random.normal(loc=0.0, scale=ols_se, size=len(nonitemizer_data))
raw_imputed_amt = ols_res.predict(nonitemizer_data[exogenous_vars]) + error
# (2) Limiting the imputed amount to be no more than the standard
# deduction is a second part of the ad hoc procedure to deal with the
# Heckman sample selection problems present in this imputation process.
log_stdded = np.log(nonitemizer_data["stdded"])
cap_imputed_amt = np.where(
raw_imputed_amt > log_stdded, log_stdded, raw_imputed_amt
)
adj_imputed_amt = cap_imputed_amt + log_amount_af[ievar]
imputed_amount = np.where(
positive_imputed, np.exp(adj_imputed_amt).round().astype(int), 0
)
if DUMP1:
print("size of {} OLS sample = {}".format(ievar, len(ols_y)))
print("max {} value = {}".format(ievar, ols_y.max()))
print("avg {} value = {:.2f}".format(ievar, ols_y.mean()))
print(ols_res.summary())
print("OLS std error of regression = {:.2f}".format(ols_se))
print("mean cap_imputed_amt = {:.3f}".format(cap_imputed_amt.mean()))
print("mean adj_imputed_amt = {:.3f}".format(adj_imputed_amt.mean()))
print("mean imputed_amount = {:.2f}".format(imputed_amount.mean()))
# return imputed_amount array
return imputed_amount
# end of impute() function
def check(iev, nonitemizer_data, target_cnt, target_amt):
"""
Function that returns error message if weighted nonitemizer_data for iev
does not imply filing unit counts and itemized expenses amounts that are
close to the targets.
"""
max_diff = 0.2
var = nonitemizer_data[iev]
pos = var > 0
wgt = nonitemizer_data["s006"] * 0.01
assert len(var) == len(wgt)
wcnt = wgt[pos].sum() * 1e-6 # millions of filing units
wamt = (var[pos] * wgt[pos]).sum() * 1e-9 # billions of dollars
msg = ""
if not np.allclose([wcnt], [target_cnt[iev]], rtol=0.0, atol=max_diff):
msg += "\nNONITEMIZER {}>0 CNT TARGET ACTUAL= {:.1f} {:.1f}".format(
iev, target_cnt[iev], wcnt
)
if not np.allclose([wamt], [target_amt[iev]], rtol=0.0, atol=max_diff):
msg += "\nNONITEMIZER {}>0 AMT TARGET ACTUAL= {:.1f} {:.1f}".format(
iev, target_amt[iev], wamt
)
return msg
# end of check() function
def impute_itemized_expenses(alldata):
"""
Main function in impute_itmexp.py file.
Argument: puf.csv DataFrame just before imputation is done.
Returns: puf.csv DataFrame with imputed itemized expense amounts for
nonitemizers.
"""
# specify variable names of itemized-expense variables
iev_names = [
"e18400", # state and local taxes
"e18500", # real-estate taxes
"e19200", # interest paid
"e19800", # charity cash contributions
"e20100", # charity non-cash contributions
"e20400", # misc itemizable expenses
"e17500", # medical expenses
"g20500",
] # gross casualty/theft loss
def standard_deduction(row):
"""
Specifies 2011 standard deduction amount by MARS
"""
# TODO: parameterize this function
if row["MARS"] == 1:
return 5800 # single
elif row["MARS"] == 2:
return 11600 # married filing jointly
elif row["MARS"] == 3:
return 5800 # married filing separately
elif row["MARS"] == 4:
return 8500 # head of household
else:
raise ValueError("illegal value of MARS")
# extract selected variables and construct new variables
varnames = iev_names + [
"MARS",
"filer",
"s006",
"XTOT",
"e00200",
"e00600",
"e00900",
"e02000",
]
data = alldata[varnames].copy()
data["stdded"] = data.apply(standard_deduction, axis=1)
data["sum_itmexp"] = data[iev_names].sum(axis=1)
data["itemizer"] = np.where(data["sum_itmexp"] > data["stdded"], 1, 0)
data["constant"] = 1
data["MARS2"] = np.where(data["MARS"] == 2, 1, 0)
data["MARS3"] = np.where(data["MARS"] == 3, 1, 0)
data["MARS4"] = np.where(data["MARS"] == 4, 1, 0)
# separate all the data into data for itemizers and data for nonitemizers
itemizer_data = data[data["itemizer"] == 1].copy()
nonitemizer_data = data[data["itemizer"] == 0].copy()
# descriptive statistics for the data variables
if DUMP0:
print("ALL raw count = {:6d}".format(len(data)))
print("PUF raw count = {:6d}".format(len(data[data["filer"] == 1])))
print("CPS raw count = {:6d}".format(len(data[data["filer"] == 0])))
print("PUF fraction of ALL = {:.4f}".format(data["filer"].mean()))
ier = data["itemizer"]
print("ALL itemizer mean = {:.4f}".format(ier.mean()))
print("PUF itemizer mean = {:.4f}".format(ier[data["filer"] == 1].mean()))
print("CPS itemizer mean = {:.4f}".format(ier[data["filer"] == 0].mean()))
for iev in iev_names:
var = itemizer_data[iev]
varpos = var > 0
print(
"{} with {}>0 = {:.4f} {:.2f}".format(
"frac and mean for itemizers",
iev,
varpos.mean(),
var[varpos].mean(),
)
)
print("itmexp correlation coefficients for itemizers:")
print(itemizer_data[iev_names].corr()[iev_names[:4]])
print(itemizer_data[iev_names].corr()[iev_names[-4:]])
for iev in iev_names:
var = nonitemizer_data[iev]
varpos = var > 0
print("frac of non-itemizers with {}>0 = {:.4f}".format(iev, varpos.mean()))
# specify 2011 JCT count/amount targets for nonitemizers
# (When JCX-75-15 Table 2 contains more than one line item for a
# PUF variable, we assume the largest count represents the count
# for the PUF variable, and we assume that the sum of the amounts
# for the line items represents the amount for the PUF variable.)
target_cnt = dict(zip(iev_names, [0.0] * len(iev_names)))
target_amt = dict(zip(iev_names, [0.0] * len(iev_names)))
target_cnt["e18400"] = 113.2
target_amt["e18400"] = 128.1
target_cnt["e18500"] = 34.7
target_amt["e18500"] = 46.2
target_cnt["e19200"] = 16.7
target_amt["e19200"] = 58.5
target_cnt["e19800"] = 63.0
target_amt["e19800"] = 27.7
target_cnt["e20100"] = 31.5
target_amt["e20100"] = 15.6
target_cnt["e20400"] = 16.2
target_amt["e20400"] = 18.6
target_cnt["e17500"] = 5.5
target_amt["e17500"] = 20.4
# specify calibrated logit-probability and log-amount additive factors
# (Note that the logit_prob_af value will affect both the count and
# the amount for that itmexp variable, so calibrate logit_prob_af
# first and then calibrate the log_amount_af value. Also, note that
# because of the recursive nature of the imputation equations, the
# two additive factors for each itemexp variable must be calibrated
# in the order the equations are estimated.)
logit_prob_af = dict(zip(iev_names, [0.0] * len(iev_names)))
log_amount_af = dict(zip(iev_names, [0.0] * len(iev_names)))
logit_prob_af["e18400"] = 1.40
log_amount_af["e18400"] = -0.753
logit_prob_af["e18500"] = -2.73
log_amount_af["e18500"] = -0.93
logit_prob_af["e19200"] = -2.90
log_amount_af["e19200"] = -0.282
logit_prob_af["e19800"] = -0.70
log_amount_af["e19800"] = -1.47
logit_prob_af["e20100"] = -0.73
log_amount_af["e20100"] = -0.63
logit_prob_af["e20400"] = -2.25
log_amount_af["e20400"] = -0.28
logit_prob_af["e17500"] = -2.70
log_amount_af["e17500"] = -0.31
# estimate itemizer equations and use to impute nonitemizer itmexp amounts
exogenous_vars = [
"constant",
"MARS2",
"MARS3",
"MARS4",
"XTOT",
"e00200",
"e00600",
"e00900",
"e02000",
]
errmsg = ""
for iev in iev_names:
if iev == "g20500":
nonitemizer_data["g20500"] = 0
else:
nonitemizer_data[iev] = impute(
iev,
logit_prob_af,
log_amount_af,
exogenous_vars,
itemizer_data,
nonitemizer_data,
)
errmsg += check(iev, nonitemizer_data, target_cnt, target_amt)
# add imputed variable to exogenous variable list in order
# to better estimate correlation between the imputed variables
exogenous_vars.append(iev)
if errmsg:
if CALIBRATING:
print(errmsg)
else:
raise ValueError(errmsg)
# proportionally reduce imputed amounts in cases where nonitemizer's
# sum of imputed amounts exceeds the nonitemizer's standard deduction
# (3) Reducing the imputed amounts so that their sum is no more than
# the nonitemizer filing unit's standard deduction is a third part of
# the ad hoc procedure to deal with the Heckman sample selection problems
# present in this imputation process.
stdded = nonitemizer_data["stdded"]
ratio_ = nonitemizer_data[iev_names].sum(axis=1) / stdded
ratio = np.maximum(ratio_, 1.0)
if DUMP2:
print(
"BEFORE: num of nonitemizers with sum>stdded = {}".format(
len(ratio[ratio > 1])
)
)
print(
"BEFORE: frac of nonitemizers with sum>stdded = {:.4f}".format(
len(ratio[ratio > 1]) / float(len(ratio))
)
)
for iev in iev_names:
reduced_amt = np.trunc(nonitemizer_data[iev] / ratio)
nonitemizer_data[iev] = reduced_amt.astype(int)
if DUMP2:
r_a = nonitemizer_data[iev_names].sum(axis=1) / stdded
print(
"AFTER: num of nonitemizers with sum>stdded = {}".format(len(r_a[r_a > 1]))
)
print(
"AFTER: frac of nonitemizers with sum>stdded = {:.4f}".format(
len(r_a[r_a > 1]) / float(len(r_a))
)
)
# set imputed itmexp variable values in alldata and return alldata
combined_data = pd.concat([nonitemizer_data, itemizer_data]).sort_index()
for iev in iev_names:
alldata[iev] = combined_data[iev]
return alldata
# end of impute_itemized_expenses() function
if __name__ == "__main__":
RAWDATA = pd.read_csv("puf.csv")
AUGDATA = impute_itemized_expenses(RAWDATA)
AUGDATA.to_csv("puf-aug.csv", index=False)
|
py
|
1a5a45ac725e334d8cad714e10e6406526dd825b
|
#!/usr/bin/env python
#AUTHOR: Younghun Ju <[email protected]>, <[email protected]>
import roslib; roslib.load_manifest('kobuki_node')
import rospy
from tf.transformations import euler_from_quaternion
from math import degrees
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Pose2D
class Converter(object):
def __init__(self):
rospy.init_node("getOdom2D", anonymous=True)
self.sub = rospy.Subscriber("odom", Odometry, self.OdomCallback)
self.pub = rospy.Publisher("pose2d", Pose2D, queue_size=10)
def OdomCallback(self,data):
px = data.pose.pose.position.x
py = data.pose.pose.position.y
quat = data.pose.pose.orientation
q = [quat.x, quat.y, quat.z, quat.w]
roll, pitch, yaw = euler_from_quaternion(q)
vx = data.twist.twist.linear.x
vy = data.twist.twist.linear.y
yaw_rate = data.twist.twist.angular.z
print "pose: x: {0:+2.5f}".format(px) + ", y: {0:+2.5f}".format(py)\
+ ", th: {0:+.4f}".format(yaw) + " rad; "\
+ "{0:+.2f}".format(degrees(yaw)) + " deg"
print "rate: x: {0:+2.5f}".format(vx) + ", y: {0:+2.5f}".format(vy)\
+ ", th: {0:+.2f}".format(yaw_rate) + " rad/s; "\
+ "{0:+.2f}".format(degrees(yaw_rate)) + " deg/s"
print '---'
pose2d = Pose2D()
pose2d.x = px
pose2d.y = py
pose2d.theta = yaw
self.pub.publish(pose2d)
if __name__ == '__main__':
try:
instance = Converter()
print
print "It prints x, y, theta values from Odom message of mobile base."
print
rospy.spin()
except rospy.ROSInterruptException: pass
|
py
|
1a5a46e005f70c06c08a69d685fb81f5aab81f83
|
import torch
from .misc import _convert_to_tensor, _dot_product
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
a = tuple(
_dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
b = tuple(
_dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
c = tuple(
_dot_product([-4 * dt, dt, -11, -5, 16], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
d = tuple(dt * f0_ for f0_ in f0)
e = y0
return [a, b, c, d, e]
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
dtype = coefficients[0][0].dtype
device = coefficients[0][0].device
t0 = _convert_to_tensor(t0, dtype=dtype, device=device)
t1 = _convert_to_tensor(t1, dtype=dtype, device=device)
t = _convert_to_tensor(t, dtype=dtype, device=device)
assert (t0 <= t) & (t <= t1), 'invalid interpolation, fails `t0 <= t <= t1`: {}, {}, {}'.format(t0, t, t1)
x = ((t - t0) / (t1 - t0)).type(dtype).to(device)
xs = [torch.tensor(1).type(dtype).to(device), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return tuple(_dot_product(coefficients_, reversed(xs)) for coefficients_ in zip(*coefficients))
|
py
|
1a5a4761e07c2ce30e50421832329bdbb1baf521
|
import codecs
from typing import Any
from flask import Blueprint, jsonify
from werkzeug.exceptions import abort
from shrunk.client import ShrunkClient
from shrunk.client.exceptions import NoSuchObjectException
from shrunk.util.decorators import require_login
__all__ = ['bp']
bp = Blueprint('request', __name__, url_prefix='/api/v1/request')
@bp.route('/pending', methods=['GET'])
@require_login
def get_pending_requests(netid: str, client: ShrunkClient) -> Any:
requests = client.links.get_pending_access_requests(netid)
def jsonify_request(req: Any) -> Any:
return {
'link_id': str(req['_id']),
'title': req['title'],
'request_token': str(codecs.encode(req['request']['token'], encoding='hex'), 'utf8'),
'requesting_netid': req['request']['requesting_netid'],
'request_time': req['request']['created_at'].isoformat(),
}
return jsonify({'requests': [jsonify_request(req) for req in requests]})
@bp.route('/resolve/<hex_token:token>/accept')
@require_login
def accept_request(netid: str, client: ShrunkClient, token: bytes) -> Any:
try:
if not client.roles.has('admin', netid) and not client.links.check_access_request_permission(token, netid):
abort(403)
except NoSuchObjectException:
abort(404)
client.links.accept_access_request(token)
return '', 204
@bp.route('/resolve/<hex_token:token>/deny')
@require_login
def deny_request(netid: str, client: ShrunkClient, token: bytes) -> Any:
try:
if not client.roles.has('admin', netid) and not client.links.check_access_request_permission(token, netid):
abort(403)
except NoSuchObjectException:
abort(404)
client.links.deny_access_request(token)
return '', 204
|
py
|
1a5a47a49c7852f6cda776456eae002804b40f5b
|
import numpy as np
from scipy.interpolate.interpolate import interp1d
import matplotlib.pyplot as plt
import os
path_104 = os.path.abspath('../../../Downloads/realTime-master-AlAl-data_trial5/AlAl/data_trial5/104S/')
files_104_unsorted = os.listdir(path_104)
order = [int(str.split(ff, "_")[1]) for ff in files_104_unsorted]
files_104 = [x for _, x in sorted(zip(order, files_104_unsorted))]
nfiles_104 = len(files_104)
files2_104 = [path_104 + '/' + files_104[i] for i in range(len(files_104))]
dat_104 = [None] * nfiles_104
for i in range(nfiles_104):
with open(files2_104[i]) as file:
temp = file.readlines()
dat_104[i] = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
path_105 = os.path.abspath('../../../Downloads/realTime-master-AlAl-data_trial5/AlAl/data_trial5/105S/')
files_105_unsorted = os.listdir(path_105)
order = [int(str.split(ff, "_")[1]) for ff in files_105_unsorted]
files_105 = [x for _, x in sorted(zip(order, files_105_unsorted))]
nfiles_105 = len(files_105)
files2_105 = [path_105 + '/' + files_105[i] for i in range(len(files_105))]
dat_105 = [None] * nfiles_105
for i in range(nfiles_105):
with open(files2_105[i]) as file:
temp = file.readlines()
dat_105[i] = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
path_106 = os.path.abspath('../../../Downloads/realTime-master-AlAl-data_trial5/AlAl/data_trial5/106S/')
files_106_unsorted = os.listdir(path_106)
order = [int(str.split(ff, "_")[1]) for ff in files_106_unsorted]
files_106 = [x for _, x in sorted(zip(order, files_106_unsorted))]
nfiles_106 = len(files_106)
files2_106 = [path_106 + '/' + files_106[i] for i in range(len(files_106))]
dat_106 = [None] * nfiles_106
for i in range(nfiles_106):
with open(files2_106[i]) as file:
temp = file.readlines()
dat_106[i] = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
xrange_104 = [0.75, 1.3]
xrange_105 = [1.2, 2.2]
xrange_106 = [0.65, 1.3]
M = 200
n = 1000
xx104 = np.linspace(xrange_104[0], xrange_104[1], M)
xx105 = np.linspace(xrange_105[0], xrange_105[1], M)
xx106 = np.linspace(xrange_106[0], xrange_106[1], M)
xx_all = [xx104, xx105, xx106]
sims_all = np.empty([3, n, M]) # 3 datasets, 1000 samples, 200 points on curve
for i in range(n):
ifunc = interp1d(dat_104[i][:,1], dat_104[i][:,3], kind = 'cubic')
sims_all[0, i, :] = ifunc(xx104)
ifunc = interp1d(dat_105[i][:,1], dat_105[i][:,3], kind = 'cubic')
sims_all[1, i, :] = ifunc(xx105)
ifunc = interp1d(dat_106[i][:,1], dat_106[i][:,3], kind = 'cubic')
sims_all[2, i, :] = ifunc(xx106)
with open('./../data/Al-5083/flyer_data/Data_S104S.txt') as file:
temp = file.readlines()
obs1 = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
with open('./../data/Al-5083/flyer_data/Data_S105S.txt') as file:
temp = file.readlines()
obs2 = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
with open('./../data/Al-5083/flyer_data/Data_S106S.txt') as file:
temp = file.readlines()
obs3 = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
obs_all = np.empty([3, M])
ifunc = interp1d(obs1[:,1], obs1[:,0]*1e-4, kind = 'cubic')
obs_all[0] = ifunc(xx104)
ifunc = interp1d(obs2[:,1]-.2, obs2[:,0]*1e-4, kind = 'cubic')
obs_all[1] = ifunc(xx105)
ifunc = interp1d(obs3[:,0]-2.55, obs3[:,1]*1e-4, kind = 'cubic')
obs_all[2] = ifunc(xx106)
plt.plot(sims_all[0].T, color='lightgrey')
plt.plot(obs_all[0])
plt.show()
plt.plot(sims_all[1].T, color='lightgrey')
plt.plot(obs_all[1])
plt.show()
plt.plot(sims_all[2].T, color='lightgrey')
plt.plot(obs_all[2])
plt.show()
# let the obs have large time shift discrepancy
np.savetxt("./../data/Al-5083/flyer_data/sims104.csv", sims_all[0], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/sims105.csv", sims_all[1], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/sims106.csv", sims_all[2], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/obs104.csv", obs_all[0], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/obs105.csv", obs_all[1], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/obs106.csv", obs_all[2], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/xsims104.csv", xx_all[0], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/xsims105.csv", xx_all[1], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/xsims106.csv", xx_all[2], delimiter=",")
#sim_inputs = np.genfromtxt('./../data/Al-5083/flyer_data/sim_input.csv', delimiter=',', skip_header=1)
|
py
|
1a5a48806d2ca20b906ea83bea7b2039c290ec58
|
#!/usr/bin/env python
# Filename: compare shapefiles
"""
introduction: compare two shapefiles
authors: Huang Lingcao
email:[email protected]
add time: 04 July, 2020
"""
import os, sys
cd_dir = os.path.expanduser('~/codes/PycharmProjects/ChangeDet_DL')
sys.path.insert(0, os.path.join(cd_dir,'thawSlumpChangeDet'))
from polygons_cd import polygons_change_detection
cur_dir = os.getcwd()
res_dir=os.path.expanduser('~/Data/Qinghai-Tibet/beiluhe/result/result_multi_temporal_changes_17-19July/BLH_change_deeplabV3+_4_exp7_iter30000_2017_2019_tiles')
testid='BLH_change_deeplabV3+_4_exp7_iter30000'
test_name='2017_2019'
os.chdir(res_dir)
## exp7 mapped polygons
num=3
for n in range(num):
# echo $n
shp_pre='_'.join(['I%d'%n,testid])
###### the one without post ######
# ${shp_pre}_${test_name}.shp
###### the one after post-processing ######
# ${shp_pre}_post_${test_name}.shp
###### the ones after timeIOU ######
# ${shp_pre}_post_${test_name}_RmOccur.shp
# ${shp_pre}_post_${test_name}_RmOccur_RmTimeidx.shp
# ${shp_pre}_post_${test_name}_rmTimeiou.shp
###### the ones only keep the true positives (IOU >= 0.5) ######
# ${shp_pre}_post_${test_name}_TP.shp
shp1 = '_'.join([shp_pre,'post',test_name,'TP0']) + '.shp' # ${shp_pre}_post_${test_name}_TP.shp
shp2 = '_'.join([shp_pre,'post',test_name,'rmTimeiou' ]) + '.shp' # ${shp_pre}_post_${test_name}_rmTimeiou.shp
# get expanding and shrinking parts
output_path_expand = '_'.join(['expand','I%d'%n,'diff_postTP_and_rmTimeiou']) +'.shp'
output_path_shrink = '_'.join(['shrink', 'I%d' % n, 'diff_postTP_and_rmTimeiou']) + '.shp'
polygons_change_detection(shp1, shp2, output_path_expand,output_path_shrink)
pass
os.chdir(cur_dir)
|
py
|
1a5a4a104d5460f266b573790ba5666b35600795
|
#########################
# Imports
#########################
from bs4 import BeautifulSoup
from fuzzywuzzy import fuzz
import bs4, requests, json
from secrets import *
#########################
# Headers
#########################
headers = {
'Authorization': 'Bearer ' + ACCESS_TOKEN,
}
#########################
# Helper Functions
#########################
def get_tracks(url):
# Parse Webpage
html = requests.get(url).text
soup = bs4.BeautifulSoup(html, 'html.parser')
data = json.loads(soup.find(id="shoebox-ember-data-store").get_text())
name = data['data']['attributes']['name']
playlist = data['included']
# Get Track Names and Artists from Playlist
tracks = []
for track in playlist:
try:
tracks.append({
'name': track['attributes']['name'],
'artist': track['attributes']['artistName']
})
except:
continue
return name, tracks
def get_spotify_playlist(target_name):
# Get All Playlists
response = requests.get('https://api.spotify.com/v1/me/playlists', headers=headers)
playlists = json.loads(response.text)['items']
target_id = None
# Search for Playlist in Existing Playlists
for playlist in playlists:
if str(playlist['name']) == target_name:
target_id = str(playlist['id'])
# Create Playlist if it DNE
if target_id == None:
response = requests.post('https://api.spotify.com/v1/users/%s/playlists' % USER_ID, headers=headers, data='{"name":"%s","public":false}' % target_name)
target_id = str(json.loads(response.text)['id'])
return target_id
def get_spotify_playlist_tracks(target_id):
# Get All Teacks in Playlist
response = requests.get("https://api.spotify.com/v1/users/%s/playlists/%s/tracks" % (USER_ID, target_id), headers=headers)
playlist = json.loads(response.text)['items']
# Get Track Names, Artists, and URIs from Playlist
tracks = []
for track in playlist:
tracks.append({
'name': track['track']['name'],
'artist': track['track']['artists'][0]['name'],
'uri': track['track']['uri']
})
return tracks
def get_spotify_track_uri(target_name, target_artist):
# Parse Apple Music Song Name
if "(feat." in target_name:
index = target_name.find("(feat.")
target_artist += target_name[index + len("(feat."):-1]
target_name = target_name[:index]
# Get Search Results
params = (
('q', target_name),
('type', 'track'),
)
response = requests.get('https://api.spotify.com/v1/search', headers=headers, params=params)
results = json.loads(response.text)['tracks']['items']
# Return Best Fuzzy Match
scores = []
factor = 1
for track in results:
result = ""
for artist in track['artists']:
result += artist['name'] + " "
scores.append(fuzz.ratio(result.strip(), target_artist) * factor)
factor -= 0.02
return results[scores.index(max(scores))]['uri']
def delete_spotify_playlist_tracks(tracks, target_id):
# Generate Data String
uris = ""
for track in tracks:
uris += '{"uri":"' + str(track['uri']) + '"},'
data = '{"tracks":[' + uris[:-1] + "]}"
response = requests.delete('https://api.spotify.com/v1/users/%s/playlists/%s/tracks' % (USER_ID, target_id), headers=headers, data=data)
def add_spotify_playlist_tracks(tracks, target_id):
# Support 100 Track Limit
if len(tracks) > 100:
add_spotify_playlist_tracks(tracks[:100], target_id)
add_spotify_playlist_tracks(tracks[100:], target_id)
# Search for Tracks on Spotify
uris = ""
for track in tracks:
try:
uris += get_spotify_track_uri(track['name'], track['artist']) + ","
except:
print("Couldn't add " + track['name'] + " by " + track['artist'])
params = (
('uris', uris[:-1]),
)
response = requests.post('https://api.spotify.com/v1/users/%s/playlists/%s/tracks' % (USER_ID, target_id), headers=headers, params=params)
#########################
# Main Function
#########################
def ams(url):
name, cur_tracks = get_tracks(url)
target_id = get_spotify_playlist(name)
old_tracks = get_spotify_playlist_tracks(target_id)
add_tracks = [ track for track in cur_tracks if track not in old_tracks ]
del_tracks = [ track for track in old_tracks if track not in cur_tracks ]
print("Syncing " + name + "...")
delete_spotify_playlist_tracks(del_tracks, target_id)
add_spotify_playlist_tracks(add_tracks, target_id)
|
py
|
1a5a4b3ef0bb7c3ad7c7d762c5919168c4548cf0
|
from .iterator2 import Iterator2
|
py
|
1a5a4b55d8cd4ad0256115c7333f8606792316d1
|
import numpy as np
from REESMath.quaternion import to_matrix
from math import atan2, asin, pi
class EulerXYZ:
def __init__(self, alpha, beta, gamma):
self.alpha = alpha # Rotation angle around x-axis in radians
self.beta = beta # Rotation angle around y-axis in radians
self.gamma = gamma # Rotation angle around z-axis in radians
def make_euler_xyz_from_matrix(R):
r00 = R[0, 0]
r01 = R[0, 1]
r02 = R[0, 2]
r10 = R[1, 0]
r20 = R[2, 0]
r21 = R[2, 1]
r22 = R[2, 2]
if r20 >= 1.0:
rz = atan2(-r01, -r02)
ry = - pi / 2.0
rx = 0.0
elif r20 <= -1.0:
rz = atan2(-r01, r02)
ry = pi / 2.0
rx = 0.0
else:
rz = atan2(r10, r00)
ry = asin(-r20)
rx = atan2(r21, r22)
return EulerXYZ(rx, ry, rz)
def make_euler_xyz_from_quaternion(Q):
return make_euler_xyz_from_matrix(to_matrix(Q))
|
py
|
1a5a4b8724181d5b1ce7d52a1e3eb3620954f117
|
#!/usr/bin/env python
"""Generate documentation.
"""
from __future__ import print_function
import os
import argparse
import json
import subprocess
import glob
BOARD_RST_FMT = """{desc}
{desc_underline}
Pinout
------
.. image:: ../images/boards/{pinout}
:width: 50%
:target: ../_images/{pinout}
{include_extra}
Default system features
-----------------------
The default configuration includes those major features. They are all
initialized by ``sys_start()`` at the startup of the application.
{major_features}
Drivers
-------
Supported drivers for this board.
{drivers}
Library Reference
-----------------
Read more about board specific functionality in the :doc:`{desc}
<../library-reference/boards/{name}>` module documentation in the
Library Reference.
Memory usage
------------
Below is the memory usage of two applications:
- The
:github-tree:`minimal-configuration<examples/minimal-configuration>`
application is configured to only include the bare minimum of
functionality for the low level kernel to run. That is, the
thread scheduler and system tick.
- The
:github-tree:`default-configuration<examples/default-configuration>`
application is built with the default configuration, including a lot
more functionality. See the list of `Default system features`_ above
for a summary.
+--------------------------+-----------+-----------+
| Application | Flash | RAM |
+==========================+===========+===========+
{memory_usage}
+--------------------------+-----------+-----------+
Default configuration
---------------------
Default Standard Library configuration.
+--------------------------------------------------------+-----------------------------------------------------+
| Name | Value |
+========================================================+=====================================================+
{default_configuration}
Homepage
--------
{homepage}
Mcu
---
:doc:`{mcu}<../library-reference/mcus/{mcu}>`
{targets}
"""
CONFIG_FMT = """| {:53} | {:50} |
+--------------------------------------------------------+-----------------------------------------------------+
"""
SOURCE_CODE_FMT = """.. code-block:: c
{source}
"""
def get_arguments():
"""Get the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("database",
help="JSON database.")
return parser.parse_args()
def boards_generate(database):
"""Generate boards.
"""
for board, data in database["boards"].items():
# Board drivers.
drivers = []
for driver in sorted(data["drivers"]):
subsystem = glob.glob('src/drivers/*/' + driver + '.h')[0].split('/')[2]
drivers.append("- :doc:`../library-reference/drivers/{}/{}`".format(
subsystem,
driver))
targets = []
# Default configuration.
default_configuration = ""
for config in data["default-configuration"]:
default_configuration += CONFIG_FMT.format(config[0] + "_", config[1])
target = ".. _{name}: ../user-guide/configuration.html#c.{name}".format(
name=config[0])
targets.append(target)
if os.path.exists(os.path.join("doc", "boards", "extra", board + ".rst")):
include_extra = ".. include:: extra/{name}.rst".format(name=board)
else:
include_extra = ""
# Enabled features.
major_features = []
for [name, value] in data["default-configuration"]:
if name == "CONFIG_START_NETWORK" and value == "1":
major_features.append("- Networking.")
if name == "CONFIG_START_FILESYSTEM" and value == "1":
major_features.append("- File system.")
if name == "CONFIG_START_CONSOLE" and value != "CONFIG_START_CONSOLE_NONE":
major_features.append("- :doc:`Console.<../library-reference/oam/console>`")
if name == "CONFIG_START_SHELL" and value == "1":
major_features.append("- :doc:`Debug shell.<../library-reference/oam/shell>`")
# Memory usage.
applications = [
"minimal-configuration",
"default-configuration"
]
memory_usage = []
try:
for application in applications:
subprocess.check_call([
'make',
'-s',
'-C', os.path.join('examples', application),
'BOARD=' + board,
'all'
])
sizes_json = subprocess.check_output([
'make',
'-s',
'-C', os.path.join('examples', application),
'BOARD=' + board,
'size-json'
])
sizes = json.loads(sizes_json)
memory_usage.append(
'| {application:24} | {program:9} | {data:9} |'.format(
application=application,
program=sizes['program'],
data=sizes['data']))
except subprocess.CalledProcessError:
print('Failed to generate memory footprint data for board {}. '
'Skipping board.'.format(board))
continue
rst = BOARD_RST_FMT.format(
name=board,
desc=data["board_desc"],
desc_underline="=" * len(data["board_desc"]),
homepage=data["board_homepage"],
pinout=data["board_pinout"],
major_features='\n'.join(major_features),
mcu=data["mcu"].replace("/", ""),
drivers='\n'.join(drivers),
default_configuration=default_configuration,
include_extra=include_extra,
targets='\n\n'.join(targets),
memory_usage='\n+-{}-+-----------+-----------+\n'.format(
24 * '-').join(memory_usage))
rst_path = os.path.join("doc", "boards", board + ".rst")
print("Writing to ", rst_path)
with open(rst_path, "w") as fout:
fout.write(rst)
def examples_generate(_):
"""Generate examples.
"""
examples = [
"analog_read",
"analog_write",
"blink",
"ds18b20",
"filesystem",
"hello_world",
"http_client",
"ping",
"queue",
"shell",
"timer"
]
for example in examples:
c_path = os.path.join("examples", example, "main.c")
source = []
with open(c_path) as fin:
for line in fin.readlines():
source.append(" " + line)
rst = SOURCE_CODE_FMT.format(source=''.join(source))
rst_path = os.path.join("doc", "examples", example, "source-code.rst")
print("Writing to ", rst_path)
with open(rst_path, "w") as fout:
fout.write(rst)
def testing_generate(database):
"""Generate the list of test suites.
"""
testing_suites_path = os.path.join("doc", "developer-guide", "testing-suites.rst")
with open(testing_suites_path, "w") as fout:
boards = database["boards"].keys()
boards.sort()
for board in boards:
suites = subprocess.check_output([
'make',
'-s',
'BOARD=' + board,
'print-TESTS'
])
print(database["boards"][board]["board_desc"], file=fout)
print('-' * len(board), file=fout)
print(file=fout)
for suite in suites.split(" "):
suite = suite[4:].strip()
if suite:
print("- :github-blob:`{suite}<tst/{suite}/main.c>`".format(suite=suite),
file=fout)
print(file=fout)
def main():
"""Main.
"""
args = get_arguments()
with open(args.database) as fin:
database = json.load(fin)
boards_generate(database)
examples_generate(database)
testing_generate(database)
if __name__ == "__main__":
main()
|
py
|
1a5a4e3bd53126890bf9e7e57054ca84a2cd65ce
|
from pathlib import Path
import pytest
from hookman.hookman_generator import HookManGenerator
def test_hook_man_generator(datadir, file_regression):
# Pass a folder
with pytest.raises(FileNotFoundError, match=f"File not found: *"):
HookManGenerator(hook_spec_file_path=datadir)
# Pass a invalid hook_spec_file (without specs)
Path(datadir / 'invalid_spec.py').touch()
with pytest.raises(RuntimeError, match="Invalid file, specs not defined."):
HookManGenerator(hook_spec_file_path=Path(datadir / 'invalid_spec.py'))
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
hg.generate_project_files(dst_path=datadir)
file_regression.check((datadir / 'cpp' / 'HookCaller.hpp').read_text(), basename='HookCaller', extension='.hpp')
file_regression.check((datadir / 'binding' / 'HookCallerPython.cpp').read_text(), basename='HookCallerPython', extension='.cpp')
def test_hook_man_generator_no_pyd(datadir, file_regression):
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs_no_pyd.py'))
hg.generate_project_files(dst_path=datadir)
obtained_hook_caller_file = datadir / 'cpp' / 'HookCaller.hpp'
file_regression.check(obtained_hook_caller_file.read_text(), basename='HookCallerNoPyd', extension='.hpp')
assert not (datadir / 'binding').is_dir()
def test_generate_plugin_template(datadir, file_regression):
plugin_dir = datadir / 'test_generate_plugin_template'
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
hg.generate_plugin_template(
caption='Acme',
plugin_id='acme',
author_name='FOO',
author_email='[email protected]',
dst_path=plugin_dir
)
obtained_hook_specs_file = datadir / 'test_generate_plugin_template/acme/src/hook_specs.h'
file_regression.check(obtained_hook_specs_file.read_text(), basename='generate_hook_specs', extension='.h')
obtained_plugin_yaml = datadir / 'test_generate_plugin_template/acme/assets/plugin.yaml'
file_regression.check(obtained_plugin_yaml.read_text(), basename='generate_plugin', extension='.yaml')
obtained_plugin_file = datadir / 'test_generate_plugin_template/acme/src/acme.cpp'
file_regression.check(obtained_plugin_file.read_text(), basename='generate_plugin', extension='.cpp')
obtained_readme = datadir / 'test_generate_plugin_template/acme/assets/README.md'
file_regression.check(obtained_readme.read_text(), basename='generate_README', extension='.md')
obtained_cmake_list = datadir / 'test_generate_plugin_template/acme/CMakeLists.txt'
file_regression.check(obtained_cmake_list.read_text(), basename='generate_CMakeLists', extension='.txt')
obtained_cmake_list_src = datadir / 'test_generate_plugin_template/acme/src/CMakeLists.txt'
file_regression.check(obtained_cmake_list_src.read_text(), basename='generate_src_CMakeLists', extension='.txt')
obtained_compile_script = datadir / 'test_generate_plugin_template/acme/compile.py'
file_regression.check(obtained_compile_script.read_text(), basename='generate_compile', extension='.py')
def test_generate_plugin_template_source_content_with_extra_includes (datadir, file_regression):
plugin_dir = datadir / 'test_generate_plugin_template_with_extra_include'
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
hg.generate_plugin_template(
caption='Acme',
plugin_id='acme',
author_name='FOO',
author_email='[email protected]',
dst_path=plugin_dir,
extra_includes=['<my_sdk/sdk.h>'],
)
obtained_plugin_file = datadir / 'test_generate_plugin_template_with_extra_include/acme/src/acme.cpp'
file_regression.check(obtained_plugin_file.read_text(), basename='plugin_file_with_extra_includes', extension='.cpp')
def test_generate_plugin_template_source_content_with_default_impls(datadir, file_regression):
plugin_dir = datadir / 'test_generate_plugin_template_source_content_with_default_impls'
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
extra_body_lines = [
'HOOK_FRICTION_FACTOR(v1, v2)',
'{',
' return 0;',
'}',
]
hg.generate_plugin_template(
caption='Acme',
plugin_id='acme',
author_name='FOO',
author_email='[email protected]',
dst_path=plugin_dir,
extra_body_lines=extra_body_lines,
exclude_hooks=['HOOK_FRICTION_FACTOR']
)
obtained_plugin_file = datadir / 'test_generate_plugin_template_source_content_with_default_impls/acme/src/acme.cpp'
file_regression.check(obtained_plugin_file.read_text(), basename='plugin_file_with_default_impl', extension='.cpp')
def test_generate_plugin_template_source_wrong_arguments(datadir):
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
with pytest.raises(ValueError, match='extra_includes parameter must be a list, got int'):
hg._validate_parameter('extra_includes', 1)
with pytest.raises(ValueError, match='All elements of extra_includes must be a string'):
hg._validate_parameter('extra_includes', ['xx', 1])
def test_generate_hook_specs_header(datadir, file_regression):
plugin_dir = datadir / 'my-plugin'
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
hg.generate_hook_specs_header(plugin_id='acme', dst_path=plugin_dir)
obtained_hook_specs_file = plugin_dir / 'acme/src/hook_specs.h'
file_regression.check(obtained_hook_specs_file.read_text(), basename='generate_hook_specs_header1', extension='.h')
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs_2.py'))
hg.generate_hook_specs_header(plugin_id='acme', dst_path=plugin_dir)
file_regression.check(obtained_hook_specs_file.read_text(), basename='generate_hook_specs_header2', extension='.h')
def test_generate_plugin_package_invalid_shared_lib_name(acme_hook_specs_file, tmpdir):
hg = HookManGenerator(hook_spec_file_path=acme_hook_specs_file)
from hookman.exceptions import HookmanError
with pytest.raises(HookmanError):
hg.generate_plugin_template(
caption='acme',
plugin_id='acm#e',
author_email='acme1',
author_name='acme2',
dst_path=Path(tmpdir)
)
with pytest.raises(HookmanError):
hg.generate_plugin_template(
caption='acme',
plugin_id='acm e',
author_email='acme1',
author_name='acme2',
dst_path=Path(tmpdir)
)
with pytest.raises(HookmanError):
hg.generate_plugin_template(
caption='1acme',
plugin_id='acm e',
author_email='acme1',
author_name='acme2',
dst_path=Path(tmpdir)
)
def test_generate_plugin_package(acme_hook_specs_file, tmpdir, mock_plugin_id_from_dll):
hg = HookManGenerator(hook_spec_file_path=acme_hook_specs_file)
plugin_id = 'acme'
hg.generate_plugin_template(
caption='acme',
plugin_id='acme',
author_email='acme1',
author_name='acme2',
dst_path=Path(tmpdir)
)
plugin_dir = Path(tmpdir) / 'acme'
artifacts_dir = plugin_dir / 'artifacts'
artifacts_dir.mkdir()
import sys
shared_lib_name = f"{plugin_id}.dll" if sys.platform == 'win32' else f"lib{plugin_id}.so"
shared_lib_path = artifacts_dir / shared_lib_name
shared_lib_path.write_text('')
hg.generate_plugin_package(
package_name='acme',
plugin_dir=plugin_dir,
)
from hookman.plugin_config import PluginInfo
version = PluginInfo(Path(tmpdir / 'acme/assets/plugin.yaml'), None).version
win_plugin_name = f"{plugin_id}-{version}-win64.hmplugin"
linux_plugin_name = f"{plugin_id}-{version}-linux64.hmplugin"
hm_plugin_name = win_plugin_name if sys.platform == 'win32' else linux_plugin_name
compressed_plugin = plugin_dir / hm_plugin_name
assert compressed_plugin.exists()
from zipfile import ZipFile
plugin_file_zip = ZipFile(compressed_plugin)
list_of_files = [file.filename for file in plugin_file_zip.filelist]
assert 'assets/plugin.yaml' in list_of_files
assert 'assets/README.md' in list_of_files
assert f'artifacts/{shared_lib_name}' in list_of_files
def test_generate_plugin_package_with_missing_folders(acme_hook_specs_file, tmpdir, mocker):
import sys
from textwrap import dedent
from hookman.exceptions import AssetsDirNotFoundError, ArtifactsDirNotFoundError, SharedLibraryNotFoundError
hg = HookManGenerator(hook_spec_file_path=acme_hook_specs_file)
plugin_dir = Path(tmpdir) / 'acme'
plugin_dir.mkdir()
# -- Without Assets Folder
with pytest.raises(AssetsDirNotFoundError):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
asset_dir = plugin_dir / 'assets'
asset_dir.mkdir()
# -- Without Artifacts Folder
with pytest.raises(ArtifactsDirNotFoundError, match=r'Artifacts directory not found: .*[\\/]acme[\\/]artifacts'):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
artifacts_dir = plugin_dir / 'artifacts'
artifacts_dir.mkdir()
# -- Without a shared library binary
shared_lib_extension = '*.dll' if sys.platform == 'win32' else '*.so'
string_to_match = fr'Unable to locate a shared library ({shared_lib_extension}) in'
import re
with pytest.raises(FileNotFoundError, match=re.escape(string_to_match)):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
lib_name = 'test.dll' if sys.platform == 'win32' else 'libtest.so'
shared_library_file = artifacts_dir / lib_name
shared_library_file.write_text('')
# -- Without Config file
with pytest.raises(FileNotFoundError, match=f'Unable to locate the file plugin.yaml in'):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
config_file = asset_dir / 'plugin.yaml'
config_file.write_text(dedent(f"""\
caption: 'ACME'
version: '1.0.0'
author: 'acme_author'
email: 'acme_email'
id: 'acme'
"""))
# -- Without Readme file
with pytest.raises(FileNotFoundError, match=f'Unable to locate the file README.md in'):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
readme_file = asset_dir / 'README.md'
readme_file.write_text('')
# # -- With a invalid shared_library name on config_file
acme_lib_name = 'acme.dll' if sys.platform == 'win32' else 'libacme.so'
hm_plugin_name = 'acme-1.0.0-win64.hmplugin' if sys.platform == 'win32' else 'acme-1.0.0-linux64.hmplugin'
with pytest.raises(SharedLibraryNotFoundError, match=f'{acme_lib_name} could not be found in *'):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
acme_shared_library_file = artifacts_dir / acme_lib_name
acme_shared_library_file.write_text('')
# The mock bellow is to avoid to have get a valid dll on this test
from hookman.plugin_config import PluginInfo
mocker.patch.object(PluginInfo, '_get_plugin_id_from_dll', return_value='')
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
compressed_plugin_package = plugin_dir / hm_plugin_name
assert compressed_plugin_package.exists()
def test_generate_plugin_package_invalid_version(acme_hook_specs_file, tmp_path, mocker, mock_plugin_id_from_dll):
hg = HookManGenerator(hook_spec_file_path=acme_hook_specs_file)
plugin_id = 'acme'
hg.generate_plugin_template(plugin_id, plugin_id, 'acme1', 'acme2', tmp_path)
plugin_yaml = tmp_path / 'acme/assets/plugin.yaml'
new_content = plugin_yaml.read_text().replace("version: '1.0.0'", "version: '1'")
plugin_yaml.write_text(new_content)
mocker.patch('hookman.hookman_generator.HookManGenerator._validate_package_folder', return_value=None)
with pytest.raises(ValueError, match="Version attribute does not follow semantic version, got '1'"):
hg.generate_plugin_package(plugin_id, plugin_dir=tmp_path / plugin_id)
|
py
|
1a5a4e54725760ddf15e56089a8fbbd2a37e1884
|
from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
from .base import BaseAPIIntegrationTest, BUSYBOX
from ..helpers import (
requires_api_version, ctrl_with, assert_cat_socket_detached_with_keys
)
class ExecTest(BaseAPIIntegrationTest):
def test_execute_command(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, ['echo', 'hello'])
assert 'Id' in res
exec_log = self.client.exec_start(res)
assert exec_log == b'hello\n'
def test_exec_command_string(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'echo hello world')
assert 'Id' in res
exec_log = self.client.exec_start(res)
assert exec_log == b'hello world\n'
def test_exec_command_as_user(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami', user='default')
assert 'Id' in res
exec_log = self.client.exec_start(res)
assert exec_log == b'default\n'
def test_exec_command_as_root(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami')
assert 'Id' in res
exec_log = self.client.exec_start(res)
assert exec_log == b'root\n'
def test_exec_command_streaming(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
assert 'Id' in exec_id
res = b''
for chunk in self.client.exec_start(exec_id, stream=True):
res += chunk
assert res == b'hello\nworld\n'
def test_exec_start_socket(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
self.tmp_containers.append(container_id)
line = 'yay, interactive exec!'
# `echo` appends CRLF, `printf` doesn't
exec_id = self.client.exec_create(
container_id, ['printf', line], tty=True)
assert 'Id' in exec_id
socket = self.client.exec_start(exec_id, socket=True)
self.addCleanup(socket.close)
next_size = next_frame_size(socket)
assert next_size == len(line)
data = read_exactly(socket, next_size)
assert data.decode('utf-8') == line
def test_exec_start_detached(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
self.tmp_containers.append(container_id)
exec_id = self.client.exec_create(
container_id, ['printf', "asdqwe"])
assert 'Id' in exec_id
response = self.client.exec_start(exec_id, detach=True)
assert response == ""
def test_exec_inspect(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
assert 'Id' in exec_id
self.client.exec_start(exec_id)
exec_info = self.client.exec_inspect(exec_id)
assert 'ExitCode' in exec_info
assert exec_info['ExitCode'] != 0
@requires_api_version('1.25')
def test_exec_command_with_env(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'env', environment=["X=Y"])
assert 'Id' in res
exec_log = self.client.exec_start(res)
assert b'X=Y\n' in exec_log
@requires_api_version('1.35')
def test_exec_command_with_workdir(self):
container = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True
)
self.tmp_containers.append(container)
self.client.start(container)
res = self.client.exec_create(container, 'pwd', workdir='/var/www')
exec_log = self.client.exec_start(res)
assert exec_log == b'/var/www\n'
def test_detach_with_default(self):
container = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(
id, 'cat', stdin=True, tty=True, stdout=True
)
sock = self.client.exec_start(exec_id, tty=True, socket=True)
self.addCleanup(sock.close)
assert_cat_socket_detached_with_keys(
sock, [ctrl_with('p'), ctrl_with('q')]
)
def test_detach_with_config_file(self):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(
id, 'cat', stdin=True, tty=True, stdout=True
)
sock = self.client.exec_start(exec_id, tty=True, socket=True)
self.addCleanup(sock.close)
assert_cat_socket_detached_with_keys(sock, [ctrl_with('p')])
def test_detach_with_arg(self):
self.client._general_configs['detachKeys'] = 'ctrl-p'
container = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True
)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(
id, 'cat',
stdin=True, tty=True, detach_keys='ctrl-x', stdout=True
)
sock = self.client.exec_start(exec_id, tty=True, socket=True)
self.addCleanup(sock.close)
assert_cat_socket_detached_with_keys(sock, [ctrl_with('x')])
|
py
|
1a5a4e8011a3b39286104f7cd235301225499811
|
#!/usr/bin/env python3
''' decrypts the first passage'''
from Vigenere import Vigenere
keyword_1 = 'kryptos'
keyword_2 = 'abscissa'
with open('text_b.txt', 'r') as f:
text = f.read().replace('\n', '').lower()
text = text[:text.index('?')]
# cut into 14x24 matrix
matrix = []
for i in range(14):
matrix.append(list(text[i*24:(i+1)*24]))
# rotate
matrix = zip(*matrix[::-1])
# restring it
text = ''
for line in matrix:
text += ''.join(line)
# cut into 42x8 matrix
matrix = []
for i in range(42):
matrix.append(list(text[i*8:(i+1)*8]))
# rotate
matrix = zip(*matrix[::-1])
# restring it
text = ''
for line in matrix:
text += ''.join(line)
print(text)
|
py
|
1a5a4edea52d55b4817a3b0002e64f4ed7d26401
|
#!/usr/bin/env python
#
# Electrum - lightweight STRAKS client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses functions from TLSLite (public domain)
#
# TLSLite Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
"""Pure-Python RSA implementation."""
import os
import math
import hashlib
from .pem import *
def SHA1(x):
return hashlib.sha1(x).digest()
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
class RSAKey(object):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def __len__(self):
"""Return the length of this key in bits.
@rtype: int
"""
return numBits(self.n)
def hasPrivateKey(self):
return self.d != 0
def hashAndSign(self, bytes):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1-SHA1 signature on the passed-in data.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1-SHA1 signature on the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
sigBytes = self.sign(prefixedHashBytes)
return sigBytes
def hashAndVerify(self, sigBytes, bytes):
"""Hash and verify the passed-in bytes with the signature.
This verifies a PKCS1-SHA1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1-SHA1 signature.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
# Try it with/without the embedded NULL
prefixedHashBytes1 = self._addPKCS1SHA1Prefix(hashBytes, False)
prefixedHashBytes2 = self._addPKCS1SHA1Prefix(hashBytes, True)
result1 = self.verify(sigBytes, prefixedHashBytes1)
result2 = self.verify(sigBytes, prefixedHashBytes2)
return (result1 or result2)
def sign(self, bytes):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1 signature on the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 signature on the passed-in data.
"""
if not self.hasPrivateKey():
raise AssertionError()
paddedBytes = self._addPKCS1Padding(bytes, 1)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPrivateKeyOp(m)
sigBytes = numberToByteArray(c, numBytes(self.n))
return sigBytes
def verify(self, sigBytes, bytes):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1 signature.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
if len(sigBytes) != numBytes(self.n):
return False
paddedBytes = self._addPKCS1Padding(bytes, 1)
c = bytesToNumber(sigBytes)
if c >= self.n:
return False
m = self._rawPublicKeyOp(c)
checkBytes = numberToByteArray(m, numBytes(self.n))
return checkBytes == paddedBytes
def encrypt(self, bytes):
"""Encrypt the passed-in bytes.
This performs PKCS1 encryption of the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be encrypted.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 encryption of the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 2)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPublicKeyOp(m)
encBytes = numberToByteArray(c, numBytes(self.n))
return encBytes
def decrypt(self, encBytes):
"""Decrypt the passed-in bytes.
This requires the key to have a private component. It performs
PKCS1 decryption of the passed-in data.
@type encBytes: L{bytearray} of unsigned bytes
@param encBytes: The value which will be decrypted.
@rtype: L{bytearray} of unsigned bytes or None.
@return: A PKCS1 decryption of the passed-in data or None if
the data is not properly formatted.
"""
if not self.hasPrivateKey():
raise AssertionError()
if len(encBytes) != numBytes(self.n):
return None
c = bytesToNumber(encBytes)
if c >= self.n:
return None
m = self._rawPrivateKeyOp(c)
decBytes = numberToByteArray(m, numBytes(self.n))
#Check first two bytes
if decBytes[0] != 0 or decBytes[1] != 2:
return None
#Scan through for zero separator
for x in range(1, len(decBytes)-1):
if decBytes[x]== 0:
break
else:
return None
return decBytes[x+1:] #Return everything after the separator
# **************************************************************************
# Helper Functions for RSA Keys
# **************************************************************************
def _addPKCS1SHA1Prefix(self, bytes, withNULL=True):
# There is a long history of confusion over whether the SHA1
# algorithmIdentifier should be encoded with a NULL parameter or
# with the parameter omitted. While the original intention was
# apparently to omit it, many toolkits went the other way. TLS 1.2
# specifies the NULL should be included, and this behavior is also
# mandated in recent versions of PKCS #1, and is what tlslite has
# always implemented. Anyways, verification code should probably
# accept both. However, nothing uses this code yet, so this is
# all fairly moot.
if not withNULL:
prefixBytes = bytearray(\
[0x30,0x1f,0x30,0x07,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x04,0x14])
else:
prefixBytes = bytearray(\
[0x30,0x21,0x30,0x09,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x05,0x00,0x04,0x14])
prefixedBytes = prefixBytes + bytes
return prefixedBytes
def _addPKCS1Padding(self, bytes, blockType):
padLength = (numBytes(self.n) - (len(bytes)+3))
if blockType == 1: #Signature padding
pad = [0xFF] * padLength
elif blockType == 2: #Encryption padding
pad = bytearray(0)
while len(pad) < padLength:
padBytes = getRandomBytes(padLength * 2)
pad = [b for b in padBytes if b != 0]
pad = pad[:padLength]
else:
raise AssertionError()
padding = bytearray([0,blockType] + pad + [0])
paddedBytes = padding + bytes
return paddedBytes
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self):
return False
def generate(bits):
key = RSAKey()
p = getRandomPrime(bits//2, False)
q = getRandomPrime(bits//2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 65537
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
|
py
|
1a5a4f134b9cd4206f58f6505437ffa8020e95d7
|
from tsfel.feature_extraction.features import *
from numpy.testing import run_module_suite
from tsfel.feature_extraction.features import *
# Implementing signals for testing features
const0 = np.zeros(20)
const1 = np.ones(20)
constNeg = np.ones(20) * (-1)
constF = np.ones(20) * 2.5
lin = np.arange(20)
lin0 = np.linspace(-10, 10, 20)
f = 5
sample = 1000
x = np.arange(0, sample, 1)
Fs = 1000
wave = np.sin(2 * np.pi * f * x / Fs)
np.random.seed(seed=10)
noiseWave = wave + np.random.normal(0, 0.1, 1000)
offsetWave = wave + 2
# ############################################### STATISTICAL FEATURES ############################################### #
def test_hist():
np.testing.assert_almost_equal(hist(const0, 10, 5), (0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(hist(const1, 10, 5), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(hist(constNeg, 10, 5), (0.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(hist(constF, 10, 5), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 0.0, 0.0))
np.testing.assert_almost_equal(hist(lin, 10, 5), (0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 2))
np.testing.assert_almost_equal(hist(wave, 10, 5), (0.0, 0.0, 0.0, 0.0, 499, 496, 5, 0.0, 0.0, 0.0), decimal=5)
np.testing.assert_almost_equal(hist(offsetWave, 10, 5), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 499, 496, 5, 0.0), decimal=5)
np.testing.assert_almost_equal(hist(noiseWave, 10, 5), (0.0, 0.0, 0.0, 48, 446, 450, 56, 0.0, 0.0, 0.0), decimal=5)
def test_skewness():
np.testing.assert_almost_equal(skewness(const0), 0.0)
np.testing.assert_almost_equal(skewness(const1), 0.0)
np.testing.assert_almost_equal(skewness(constNeg), 0.0)
np.testing.assert_almost_equal(skewness(constF), 0.0)
np.testing.assert_almost_equal(skewness(lin), 0)
np.testing.assert_almost_equal(skewness(lin0), -1.0167718723297815e-16, decimal=5)
np.testing.assert_almost_equal(skewness(wave), -2.009718347115232e-17, decimal=5)
np.testing.assert_almost_equal(skewness(offsetWave), 9.043732562018544e-16, decimal=5)
np.testing.assert_almost_equal(skewness(noiseWave), -0.0004854111290521465, decimal=5)
def test_kurtosis():
np.testing.assert_almost_equal(kurtosis(const0), -3)
np.testing.assert_almost_equal(kurtosis(const1), -3)
np.testing.assert_almost_equal(kurtosis(constNeg), -3)
np.testing.assert_almost_equal(kurtosis(constF), -3.0)
np.testing.assert_almost_equal(kurtosis(lin), -1.206015037593985, decimal=2)
np.testing.assert_almost_equal(kurtosis(lin0), -1.2060150375939847, decimal=2)
np.testing.assert_almost_equal(kurtosis(wave), -1.501494077162359, decimal=2)
np.testing.assert_almost_equal(kurtosis(offsetWave), -1.5014940771623597, decimal=2)
np.testing.assert_almost_equal(kurtosis(noiseWave), -1.4606204906023366, decimal=2)
def test_mean():
np.testing.assert_almost_equal(calc_mean(const0), 0.0)
np.testing.assert_almost_equal(calc_mean(const1), 1.0)
np.testing.assert_almost_equal(calc_mean(constNeg), -1.0)
np.testing.assert_almost_equal(calc_mean(constF), 2.5)
np.testing.assert_almost_equal(calc_mean(lin), 9.5)
np.testing.assert_almost_equal(calc_mean(lin0), -3.552713678800501e-16, decimal=5)
np.testing.assert_almost_equal(calc_mean(wave), 7.105427357601002e-18, decimal=5)
np.testing.assert_almost_equal(calc_mean(offsetWave), 2.0, decimal=5)
np.testing.assert_almost_equal(calc_mean(noiseWave), -0.0014556635615470554, decimal=5)
def test_median():
np.testing.assert_almost_equal(calc_median(const0), 0.0)
np.testing.assert_almost_equal(calc_median(const1), 1.0)
np.testing.assert_almost_equal(calc_median(constNeg), -1.0)
np.testing.assert_almost_equal(calc_median(constF), 2.5)
np.testing.assert_almost_equal(calc_median(lin), 9.5)
np.testing.assert_almost_equal(calc_median(lin0), -3.552713678800501e-16, decimal=5)
np.testing.assert_almost_equal(calc_median(wave), 7.105427357601002e-18, decimal=5)
np.testing.assert_almost_equal(calc_median(offsetWave), 2.0, decimal=5)
np.testing.assert_almost_equal(calc_median(noiseWave), 0.013846093997438328, decimal=5)
def test_max():
np.testing.assert_almost_equal(calc_max(const0), 0.0)
np.testing.assert_almost_equal(calc_max(const1), 1.0)
np.testing.assert_almost_equal(calc_max(constNeg), -1.0)
np.testing.assert_almost_equal(calc_max(constF), 2.5)
np.testing.assert_almost_equal(calc_max(lin), 19)
np.testing.assert_almost_equal(calc_max(lin0), 10.0, decimal=5)
np.testing.assert_almost_equal(calc_max(wave), 1.0, decimal=5)
np.testing.assert_almost_equal(calc_max(noiseWave), 1.221757617217142, decimal=5)
np.testing.assert_almost_equal(calc_max(offsetWave), 3.0, decimal=5)
def test_min():
np.testing.assert_almost_equal(calc_min(const0), 0.0)
np.testing.assert_almost_equal(calc_min(const1), 1.0)
np.testing.assert_almost_equal(calc_min(constNeg), -1.0)
np.testing.assert_almost_equal(calc_min(constF), 2.5)
np.testing.assert_almost_equal(calc_min(lin), 0)
np.testing.assert_almost_equal(calc_min(lin0), -10.0, decimal=5)
np.testing.assert_almost_equal(calc_min(wave), -1.0, decimal=5)
np.testing.assert_almost_equal(calc_min(noiseWave), -1.2582533627830566, decimal=5)
np.testing.assert_almost_equal(calc_min(offsetWave), 1.0, decimal=5)
def test_variance():
np.testing.assert_almost_equal(calc_var(const0), 0.0)
np.testing.assert_almost_equal(calc_var(const1), 0.0)
np.testing.assert_almost_equal(calc_var(constNeg), 0.0)
np.testing.assert_almost_equal(calc_var(constF), 0.0)
np.testing.assert_almost_equal(calc_var(lin), 33.25)
np.testing.assert_almost_equal(calc_var(lin0), 36.84210526315789, decimal=5)
np.testing.assert_almost_equal(calc_var(wave), 0.5, decimal=5)
np.testing.assert_almost_equal(calc_var(offsetWave), 0.5, decimal=5)
np.testing.assert_almost_equal(calc_var(noiseWave), 0.5081167177369529, decimal=5)
def test_std():
np.testing.assert_almost_equal(calc_std(const0), 0.0)
np.testing.assert_almost_equal(calc_std(const1), 0.0)
np.testing.assert_almost_equal(calc_std(constNeg), 0.0)
np.testing.assert_almost_equal(calc_std(constF), 0.0)
np.testing.assert_almost_equal(calc_std(lin), 5.766281297335398)
np.testing.assert_almost_equal(calc_std(lin0), 6.069769786668839, decimal=5)
np.testing.assert_almost_equal(calc_std(wave), 0.7071067811865476, decimal=5)
np.testing.assert_almost_equal(calc_std(offsetWave), 0.7071067811865476, decimal=5)
np.testing.assert_almost_equal(calc_std(noiseWave), 0.7128230620125536, decimal=5)
def test_interq_range():
np.testing.assert_almost_equal(interq_range(const0), 0.0)
np.testing.assert_almost_equal(interq_range(const1), 0.0)
np.testing.assert_almost_equal(interq_range(constNeg), 0.0)
np.testing.assert_almost_equal(interq_range(constF), 0.0)
np.testing.assert_almost_equal(interq_range(lin), 9.5)
np.testing.assert_almost_equal(interq_range(lin0), 10.0, decimal=5)
np.testing.assert_almost_equal(interq_range(wave), 1.414213562373095, decimal=5)
np.testing.assert_almost_equal(interq_range(offsetWave), 1.414213562373095, decimal=5)
np.testing.assert_almost_equal(interq_range(noiseWave), 1.4277110228590328, decimal=5)
def test_mean_abs_diff():
np.testing.assert_almost_equal(mean_abs_diff(const0), 0.0)
np.testing.assert_almost_equal(mean_abs_diff(const1), 0.0)
np.testing.assert_almost_equal(mean_abs_diff(constNeg), 0.0)
np.testing.assert_almost_equal(mean_abs_diff(constF), 0.0)
np.testing.assert_almost_equal(mean_abs_diff(lin), 1.0)
np.testing.assert_almost_equal(mean_abs_diff(lin0), 1.0526315789473684, decimal=5)
np.testing.assert_almost_equal(mean_abs_diff(wave), 0.019988577818740614, decimal=5)
np.testing.assert_almost_equal(mean_abs_diff(noiseWave), 0.10700252903161511, decimal=5)
np.testing.assert_almost_equal(mean_abs_diff(offsetWave), 0.019988577818740614, decimal=5)
def test_mean_abs_deviation():
np.testing.assert_almost_equal(mean_abs_deviation(const0), 0.0)
np.testing.assert_almost_equal(mean_abs_deviation(const1), 0.0)
np.testing.assert_almost_equal(mean_abs_deviation(constNeg), 0.0)
np.testing.assert_almost_equal(mean_abs_deviation(constF), 0.0)
np.testing.assert_almost_equal(mean_abs_deviation(lin), 5.0)
np.testing.assert_almost_equal(mean_abs_deviation(lin0), 5.263157894736842, decimal=5)
np.testing.assert_almost_equal(mean_abs_deviation(wave), 0.6365674116287157, decimal=5)
np.testing.assert_almost_equal(mean_abs_deviation(noiseWave), 0.6392749078483896, decimal=5)
np.testing.assert_almost_equal(mean_abs_deviation(offsetWave), 0.6365674116287157, decimal=5)
def test_calc_median_abs_deviation():
np.testing.assert_almost_equal(median_abs_deviation(const0), 0.0)
np.testing.assert_almost_equal(median_abs_deviation(const1), 0.0)
np.testing.assert_almost_equal(median_abs_deviation(constNeg), 0.0)
np.testing.assert_almost_equal(median_abs_deviation(constF), 0.0)
np.testing.assert_almost_equal(median_abs_deviation(lin), 5.0)
np.testing.assert_almost_equal(median_abs_deviation(lin0), 5.2631578947368425, decimal=5)
np.testing.assert_almost_equal(median_abs_deviation(wave), 0.7071067811865475, decimal=5)
np.testing.assert_almost_equal(median_abs_deviation(offsetWave), 0.7071067811865475, decimal=5)
np.testing.assert_almost_equal(median_abs_deviation(noiseWave), 0.7068117164205888, decimal=5)
def test_rms():
np.testing.assert_almost_equal(rms(const0), 0.0)
np.testing.assert_almost_equal(rms(const1), 1.0)
np.testing.assert_almost_equal(rms(constNeg), 1.0)
np.testing.assert_almost_equal(rms(constF), 2.5)
np.testing.assert_almost_equal(rms(lin), 11.113055385446435)
np.testing.assert_almost_equal(rms(lin0), 6.06976978666884, decimal=5)
np.testing.assert_almost_equal(rms(wave), 0.7071067811865476, decimal=5)
np.testing.assert_almost_equal(rms(offsetWave), 2.1213203435596424, decimal=5)
np.testing.assert_almost_equal(rms(noiseWave), 0.7128245483240299, decimal=5)
def test_ecdf():
np.testing.assert_almost_equal(ecdf(const0), (0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5))
np.testing.assert_almost_equal(ecdf(const1), (0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5))
np.testing.assert_almost_equal(ecdf(constNeg), (0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5))
np.testing.assert_almost_equal(ecdf(constF), (0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5))
np.testing.assert_almost_equal(ecdf(lin), (0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5))
np.testing.assert_almost_equal(ecdf(lin0), (0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5))
np.testing.assert_almost_equal(ecdf(wave), (0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009,0.01))
np.testing.assert_almost_equal(ecdf(offsetWave), (0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009,0.01))
np.testing.assert_almost_equal(ecdf(noiseWave), (0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01))
def test_ecdf_percentile():
np.testing.assert_almost_equal(ecdf_percentile(const0), 0)
np.testing.assert_almost_equal(ecdf_percentile(const1), 1)
np.testing.assert_almost_equal(ecdf_percentile(constNeg), -1)
np.testing.assert_almost_equal(ecdf_percentile(constF), 2.5)
np.testing.assert_almost_equal(ecdf_percentile(lin), (3, 15))
np.testing.assert_almost_equal(ecdf_percentile(lin0), (-6.8421053, 5.7894737))
np.testing.assert_almost_equal(ecdf_percentile(wave), (-0.809017, 0.809017))
np.testing.assert_almost_equal(ecdf_percentile(offsetWave), (1.1909830056250523, 2.809016994374947))
np.testing.assert_almost_equal(ecdf_percentile(noiseWave), (-0.8095410722491809, 0.796916231269631))
def test_ecdf_slope():
np.testing.assert_almost_equal(ecdf_slope(const0), np.inf)
np.testing.assert_almost_equal(ecdf_slope(const1), np.inf)
np.testing.assert_almost_equal(ecdf_slope(constNeg), np.inf)
np.testing.assert_almost_equal(ecdf_slope(constF), np.inf)
np.testing.assert_almost_equal(ecdf_slope(lin), 0.05)
np.testing.assert_almost_equal(ecdf_slope(lin0), 0.0475)
np.testing.assert_almost_equal(ecdf_slope(wave), 0.3535534)
np.testing.assert_almost_equal(ecdf_slope(offsetWave), 0.3535534)
np.testing.assert_almost_equal(ecdf_slope(noiseWave), 0.3594095)
def test_ecdf_percentile_count():
np.testing.assert_almost_equal(ecdf_percentile_count(const0), (0, 0))
np.testing.assert_almost_equal(ecdf_percentile_count(const1), (1, 1))
np.testing.assert_almost_equal(ecdf_percentile_count(constNeg), (-1, -1))
np.testing.assert_almost_equal(ecdf_percentile_count(constF), (2.5, 2.5))
np.testing.assert_almost_equal(ecdf_percentile_count(lin), (4, 16))
np.testing.assert_almost_equal(ecdf_percentile_count(lin0), (4, 16))
np.testing.assert_almost_equal(ecdf_percentile_count(wave), (200, 800))
np.testing.assert_almost_equal(ecdf_percentile_count(offsetWave), (200, 800))
np.testing.assert_almost_equal(ecdf_percentile_count(noiseWave), (200, 800))
# ################################################ TEMPORAL FEATURES ################################################# #
def test_distance():
np.testing.assert_almost_equal(distance(const0), 19.0)
np.testing.assert_almost_equal(distance(const1), 19.0)
np.testing.assert_almost_equal(distance(constNeg), 19.0)
np.testing.assert_almost_equal(distance(constF), 19.0)
np.testing.assert_almost_equal(distance(lin), 26.87005768508881)
np.testing.assert_almost_equal(distance(lin0), 27.586228448267438, decimal=5)
np.testing.assert_almost_equal(distance(wave), 999.2461809866238, decimal=5)
np.testing.assert_almost_equal(distance(offsetWave), 999.2461809866238, decimal=5)
np.testing.assert_almost_equal(distance(noiseWave), 1007.8711901383033, decimal=5)
def test_minpeaks():
np.testing.assert_almost_equal(minpeaks(const0), 0.0)
np.testing.assert_almost_equal(minpeaks(const1), 0.0)
np.testing.assert_almost_equal(minpeaks(constNeg), 0.0)
np.testing.assert_almost_equal(minpeaks(constF), 0.0)
np.testing.assert_almost_equal(minpeaks(lin), 0.0)
np.testing.assert_almost_equal(minpeaks(lin0), 0.0, decimal=5)
np.testing.assert_almost_equal(minpeaks(wave), 5, decimal=5)
np.testing.assert_almost_equal(minpeaks(offsetWave), 5, decimal=5)
np.testing.assert_almost_equal(minpeaks(noiseWave), 323, decimal=5)
def test_maxpeaks():
np.testing.assert_almost_equal(maxpeaks(const0), 0.0)
np.testing.assert_almost_equal(maxpeaks(const1), 0.0)
np.testing.assert_almost_equal(maxpeaks(constNeg), 0.0)
np.testing.assert_almost_equal(maxpeaks(constF), 0.0)
np.testing.assert_almost_equal(maxpeaks(lin), 0.0)
np.testing.assert_almost_equal(maxpeaks(lin0), 0.0, decimal=5)
np.testing.assert_almost_equal(maxpeaks(wave), 5, decimal=5)
np.testing.assert_almost_equal(maxpeaks(offsetWave), 5, decimal=5)
np.testing.assert_almost_equal(maxpeaks(noiseWave), 322, decimal=5)
def test_centroid():
np.testing.assert_almost_equal(calc_centroid(const0, Fs), 0.0)
np.testing.assert_almost_equal(calc_centroid(const1, Fs), 0.009499999999999998)
np.testing.assert_almost_equal(calc_centroid(constNeg, Fs), 0.009499999999999998)
np.testing.assert_almost_equal(calc_centroid(constF, Fs), 0.0095)
np.testing.assert_almost_equal(calc_centroid(lin, Fs), 0.014615384615384615)
np.testing.assert_almost_equal(calc_centroid(lin0, Fs), 0.0095, decimal=5)
np.testing.assert_almost_equal(calc_centroid(wave, Fs), 0.5000000000000001, decimal=5)
np.testing.assert_almost_equal(calc_centroid(offsetWave, Fs), 0.47126367059427926, decimal=5)
np.testing.assert_almost_equal(calc_centroid(noiseWave, Fs), 0.4996034303128802, decimal=5)
def test_mean_diff():
np.testing.assert_almost_equal(mean_diff(const0), 0.0)
np.testing.assert_almost_equal(mean_diff(const1), 0.0)
np.testing.assert_almost_equal(mean_diff(constNeg), 0.0)
np.testing.assert_almost_equal(mean_diff(constF), 0.0)
np.testing.assert_almost_equal(mean_diff(lin), 1.0)
np.testing.assert_almost_equal(mean_diff(lin0), 1.0526315789473684, decimal=5)
np.testing.assert_almost_equal(mean_diff(wave), -3.1442201279407477e-05, decimal=5)
np.testing.assert_almost_equal(mean_diff(offsetWave), -3.1442201279407036e-05, decimal=5)
np.testing.assert_almost_equal(mean_diff(noiseWave), -0.00010042477181949707, decimal=5)
def test_median_diff():
np.testing.assert_almost_equal(median_diff(const0), 0.0)
np.testing.assert_almost_equal(median_diff(const1), 0.0)
np.testing.assert_almost_equal(median_diff(constNeg), 0.0)
np.testing.assert_almost_equal(median_diff(constF), 0.0)
np.testing.assert_almost_equal(median_diff(lin), 1.0)
np.testing.assert_almost_equal(median_diff(lin0), 1.0526315789473684, decimal=5)
np.testing.assert_almost_equal(median_diff(wave), -0.0004934396342684, decimal=5)
np.testing.assert_almost_equal(median_diff(offsetWave), -0.0004934396342681779, decimal=5)
np.testing.assert_almost_equal(median_diff(noiseWave), -0.004174819648320949, decimal=5)
def test_calc_mean_abs_diff():
np.testing.assert_almost_equal(mean_abs_diff(const0), 0.0)
np.testing.assert_almost_equal(mean_abs_diff(const1), 0.0)
np.testing.assert_almost_equal(mean_abs_diff(constNeg), 0.0)
np.testing.assert_almost_equal(mean_abs_diff(constF), 0.0)
np.testing.assert_almost_equal(mean_abs_diff(lin), 1.0)
np.testing.assert_almost_equal(mean_abs_diff(lin0), 1.0526315789473684, decimal=5)
np.testing.assert_almost_equal(mean_abs_diff(wave), 0.019988577818740614, decimal=5)
np.testing.assert_almost_equal(mean_abs_diff(offsetWave), 0.019988577818740614, decimal=5)
np.testing.assert_almost_equal(mean_abs_diff(noiseWave), 0.10700252903161508, decimal=5)
def test_median_abs_diff():
np.testing.assert_almost_equal(median_abs_diff(const0), 0.0)
np.testing.assert_almost_equal(median_abs_diff(const1), 0.0)
np.testing.assert_almost_equal(median_abs_diff(constNeg), 0.0)
np.testing.assert_almost_equal(median_abs_diff(constF), 0.0)
np.testing.assert_almost_equal(median_abs_diff(lin), 1.0)
np.testing.assert_almost_equal(median_abs_diff(lin0), 1.0526315789473681, decimal=5)
np.testing.assert_almost_equal(median_abs_diff(wave), 0.0218618462348652, decimal=5)
np.testing.assert_almost_equal(median_abs_diff(offsetWave), 0.021861846234865645, decimal=5)
np.testing.assert_almost_equal(median_abs_diff(noiseWave), 0.08958750592592835, decimal=5)
def test_sum_abs_diff():
np.testing.assert_almost_equal(sum_abs_diff(const0), 0.0)
np.testing.assert_almost_equal(sum_abs_diff(const1), 0.0)
np.testing.assert_almost_equal(sum_abs_diff(constNeg), 0.0)
np.testing.assert_almost_equal(sum_abs_diff(constF), 0.0)
np.testing.assert_almost_equal(sum_abs_diff(lin), 19)
np.testing.assert_almost_equal(sum_abs_diff(lin0), 20.0, decimal=5)
np.testing.assert_almost_equal(sum_abs_diff(wave), 19.968589240921872, decimal=5)
np.testing.assert_almost_equal(sum_abs_diff(offsetWave), 19.968589240921872, decimal=5)
np.testing.assert_almost_equal(sum_abs_diff(noiseWave), 106.89552650258346, decimal=5)
def test_zerocross():
np.testing.assert_almost_equal(zero_cross(const0), 0.0)
np.testing.assert_almost_equal(zero_cross(const1), 0.0)
np.testing.assert_almost_equal(zero_cross(constNeg), 0.0)
np.testing.assert_almost_equal(zero_cross(constF), 0.0)
np.testing.assert_almost_equal(zero_cross(lin), 1.0)
np.testing.assert_almost_equal(zero_cross(lin0), 1.0, decimal=5)
np.testing.assert_almost_equal(zero_cross(wave), 10, decimal=5)
np.testing.assert_almost_equal(zero_cross(offsetWave), 0.0, decimal=5)
np.testing.assert_almost_equal(zero_cross(noiseWave), 38, decimal=5)
def test_autocorr():
np.testing.assert_almost_equal(autocorr(const0), 0.0)
np.testing.assert_almost_equal(autocorr(const1), 20.0)
np.testing.assert_almost_equal(autocorr(constNeg), 20.0)
np.testing.assert_almost_equal(autocorr(constF), 125.0)
np.testing.assert_almost_equal(autocorr(lin), 2470.0)
np.testing.assert_almost_equal(autocorr(lin0), 736.8421052631579, decimal=0)
np.testing.assert_almost_equal(autocorr(wave), 500.5, decimal=0)
np.testing.assert_almost_equal(autocorr(offsetWave), 4500.0, decimal=0)
np.testing.assert_almost_equal(autocorr(noiseWave), 508.6149018530489, decimal=0)
def test_auc():
np.testing.assert_almost_equal(auc(const0, Fs), 0.0)
np.testing.assert_almost_equal(auc(const1, Fs), 9.518999999999998)
np.testing.assert_almost_equal(auc(constNeg, Fs), -9.518999999999998)
np.testing.assert_almost_equal(auc(constF, Fs), 23.797500000000003)
np.testing.assert_almost_equal(auc(lin, Fs), 95.171)
np.testing.assert_almost_equal(auc(lin0, Fs), 4.989999999999997)
np.testing.assert_almost_equal(auc(wave, Fs), 3.1410759074645966e-05)
np.testing.assert_almost_equal(auc(offsetWave, Fs), 1000.998031410759)
np.testing.assert_almost_equal(auc(noiseWave, Fs), -0.7958996038449087)
def test_abs_energy():
np.testing.assert_almost_equal(abs_energy(const0), 0.0)
np.testing.assert_almost_equal(abs_energy(const1), 20.0)
np.testing.assert_almost_equal(abs_energy(constNeg), 20.0)
np.testing.assert_almost_equal(abs_energy(constF), 125.0)
np.testing.assert_almost_equal(abs_energy(lin), 2470)
np.testing.assert_almost_equal(abs_energy(lin0), 736.8421052631579)
np.testing.assert_almost_equal(abs_energy(wave), 500.0)
np.testing.assert_almost_equal(abs_energy(offsetWave), 4500.0)
np.testing.assert_almost_equal(abs_energy(noiseWave), 508.11883669335725)
def test_pk_pk_distance():
np.testing.assert_almost_equal(pk_pk_distance(const0), 0.0)
np.testing.assert_almost_equal(pk_pk_distance(const1), 0.0)
np.testing.assert_almost_equal(pk_pk_distance(constNeg), 0.0)
np.testing.assert_almost_equal(pk_pk_distance(constF), 0.0)
np.testing.assert_almost_equal(pk_pk_distance(lin), 19)
np.testing.assert_almost_equal(pk_pk_distance(lin0), 20.0)
np.testing.assert_almost_equal(pk_pk_distance(wave), 2.0)
np.testing.assert_almost_equal(pk_pk_distance(offsetWave), 2.0)
np.testing.assert_almost_equal(pk_pk_distance(noiseWave), 2.4800109800001993)
def test_slope():
np.testing.assert_almost_equal(slope(const0), 0.0)
np.testing.assert_almost_equal(slope(const1), -8.935559365603017e-18)
np.testing.assert_almost_equal(slope(constNeg), 8.935559365603017e-18)
np.testing.assert_almost_equal(slope(constF), 1.7871118731206033e-17)
np.testing.assert_almost_equal(slope(lin), 1.0)
np.testing.assert_almost_equal(slope(lin0), 1.0526315789473686)
np.testing.assert_almost_equal(slope(wave), -0.0003819408289180587)
np.testing.assert_almost_equal(slope(offsetWave), -0.00038194082891805853)
np.testing.assert_almost_equal(slope(noiseWave), -0.00040205425841671337)
def test_entropy():
np.testing.assert_almost_equal(entropy(const0), 0.0)
np.testing.assert_almost_equal(entropy(const1), 0.0)
np.testing.assert_almost_equal(entropy(constNeg), 0.0)
np.testing.assert_almost_equal(entropy(constF), 0.0)
np.testing.assert_almost_equal(entropy(lin), 0.994983274605318)
np.testing.assert_almost_equal(entropy(lin0), 0.994983274605318)
np.testing.assert_almost_equal(entropy(wave), 0.9972021515128497)
np.testing.assert_almost_equal(entropy(offsetWave), 0.99720215151285)
np.testing.assert_almost_equal(entropy(noiseWave), 0.9957000733996481)
# ################################################ SPECTRAL FEATURES ################################################# #
def test_max_fre():
np.testing.assert_almost_equal(max_frequency(const0, Fs), 0.0)
np.testing.assert_almost_equal(max_frequency(const1, Fs), 0.0)
np.testing.assert_almost_equal(max_frequency(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(max_frequency(constF, Fs), 0.0)
np.testing.assert_almost_equal(max_frequency(lin, Fs), 444.44444444444446)
np.testing.assert_almost_equal(max_frequency(lin0, Fs), 500.0, decimal=5)
np.testing.assert_almost_equal(max_frequency(wave, Fs), 5.0100200400801596, decimal=5)
np.testing.assert_almost_equal(max_frequency(offsetWave, Fs), 5.0100200400801596, decimal=5)
np.testing.assert_almost_equal(max_frequency(noiseWave, Fs), 464.9298597194388, decimal=5)
np.testing.assert_almost_equal(max_frequency(x, Fs), 344.689378757515, decimal=1)
def test_med_fre():
np.testing.assert_almost_equal(median_frequency(const0, Fs), 0.0)
np.testing.assert_almost_equal(median_frequency(const1, Fs), 0.0)
np.testing.assert_almost_equal(median_frequency(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(median_frequency(constF, Fs), 0.0)
np.testing.assert_almost_equal(median_frequency(lin, Fs), 55.55555555555556)
np.testing.assert_almost_equal(median_frequency(lin0, Fs), 166.66666666666669, decimal=5)
np.testing.assert_almost_equal(median_frequency(wave, Fs), 5.0100200400801596, decimal=5)
np.testing.assert_almost_equal(median_frequency(offsetWave, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(median_frequency(noiseWave, Fs), 146.29258517034066, decimal=5)
np.testing.assert_almost_equal(median_frequency(x, Fs), 4.008016032064128, decimal=1)
def test_fund_fre():
np.testing.assert_almost_equal(fundamental_frequency(const0, 1), 0.0)
np.testing.assert_almost_equal(fundamental_frequency(const1, 1), 0.0)
np.testing.assert_almost_equal(fundamental_frequency(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(fundamental_frequency(constF, Fs), 0.0)
np.testing.assert_almost_equal(fundamental_frequency(lin, Fs), 0.0)
np.testing.assert_almost_equal(fundamental_frequency(lin0, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(fundamental_frequency(wave, Fs), 5.0100200400801596, decimal=1)
np.testing.assert_almost_equal(fundamental_frequency(offsetWave, Fs), 5.0100200400801596, decimal=1)
np.testing.assert_almost_equal(fundamental_frequency(noiseWave, Fs), 5.0100200400801596, decimal=1)
def test_power_spec():
np.testing.assert_almost_equal(max_power_spectrum(const0, Fs), 0.0)
np.testing.assert_almost_equal(max_power_spectrum(const1, Fs), 0.0)
np.testing.assert_almost_equal(max_power_spectrum(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(max_power_spectrum(constF, Fs), 0.0)
np.testing.assert_almost_equal(max_power_spectrum(lin, Fs), 0.004621506382612649)
np.testing.assert_almost_equal(max_power_spectrum(lin0, Fs), 0.0046215063826126525, decimal=5)
np.testing.assert_almost_equal(max_power_spectrum(wave, Fs), 0.6666666666666667, decimal=5)
np.testing.assert_almost_equal(max_power_spectrum(offsetWave, Fs), 0.6666666666666667, decimal=5)
np.testing.assert_almost_equal(max_power_spectrum(noiseWave, Fs), 0.6570878541643916, decimal=5)
def test_total_energy():
np.testing.assert_almost_equal(total_energy(const0, Fs), 0.0)
np.testing.assert_almost_equal(total_energy(const1, Fs), 1052.6315789473686)
np.testing.assert_almost_equal(total_energy(constNeg, Fs), 1052.6315789473686)
np.testing.assert_almost_equal(total_energy(constF, Fs), 6578.9473684210525)
np.testing.assert_almost_equal(total_energy(lin, Fs), 130000.0)
np.testing.assert_almost_equal(total_energy(lin0, Fs), 38781.16343490305, decimal=5)
np.testing.assert_almost_equal(total_energy(wave, Fs), 500.5005005005005, decimal=5)
np.testing.assert_almost_equal(total_energy(offsetWave, Fs), 4504.504504504504, decimal=5)
np.testing.assert_almost_equal(total_energy(noiseWave, Fs), 508.6274641575148, decimal=5)
def test_spectral_centroid():
np.testing.assert_almost_equal(spectral_centroid(const0, Fs), 0.0)
np.testing.assert_almost_equal(spectral_centroid(const1, Fs), 2.7476856540265033e-14)
np.testing.assert_almost_equal(spectral_centroid(constNeg, Fs), 2.7476856540265033e-14)
np.testing.assert_almost_equal(spectral_centroid(constF, Fs), 2.4504208511457478e-14)
np.testing.assert_almost_equal(spectral_centroid(lin, Fs), 95.77382394996009)
np.testing.assert_almost_equal(spectral_centroid(lin0, Fs), 189.7228259594313, decimal=5)
np.testing.assert_almost_equal(spectral_centroid(wave, Fs), 5.010020040084022, decimal=5)
np.testing.assert_almost_equal(spectral_centroid(offsetWave, Fs), 1.0020040080169172, decimal=5)
np.testing.assert_almost_equal(spectral_centroid(noiseWave, Fs), 181.12036927310848, decimal=5)
def test_spectral_spread():
np.testing.assert_almost_equal(spectral_spread(const0, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_spread(const1, Fs), 2.811883163207112e-06, decimal=5)
np.testing.assert_almost_equal(spectral_spread(constNeg, Fs), 2.811883163207112e-06, decimal=5)
np.testing.assert_almost_equal(spectral_spread(constF, Fs), 2.657703172211011e-06, decimal=5)
np.testing.assert_almost_equal(spectral_spread(lin, Fs), 137.9288076645223, decimal=5)
np.testing.assert_almost_equal(spectral_spread(lin0, Fs), 140.93247375966078, decimal=5)
np.testing.assert_almost_equal(spectral_spread(wave, Fs), 3.585399057660381e-05, decimal=5)
np.testing.assert_almost_equal(spectral_spread(offsetWave, Fs), 2.004008016105514, decimal=5)
np.testing.assert_almost_equal(spectral_spread(noiseWave, Fs), 165.6402040682083, decimal=5)
def test_spectral_skewness():
np.testing.assert_almost_equal(spectral_skewness(const0, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_skewness(const1, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_skewness(constNeg, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_skewness(constF, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_skewness(lin, Fs), 1.5090650071326563, decimal=5)
np.testing.assert_almost_equal(spectral_skewness(lin0, Fs), 0.8140329168647044, decimal=5)
np.testing.assert_almost_equal(spectral_skewness(wave, Fs), 10643315.707158063, decimal=1)
np.testing.assert_almost_equal(spectral_skewness(offsetWave, Fs), 1.5000000137542306, decimal=1)
np.testing.assert_almost_equal(spectral_skewness(noiseWave, Fs), 0.4126776686583098, decimal=1)
def test_spectral_kurtosis():
np.testing.assert_almost_equal(spectral_kurtosis(const0, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_kurtosis(const1, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_kurtosis(constNeg, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_kurtosis(constF, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_kurtosis(lin, Fs), 4.209140226148914, decimal=0)
np.testing.assert_almost_equal(spectral_kurtosis(lin0, Fs), 2.4060168768515413, decimal=5)
np.testing.assert_almost_equal(spectral_kurtosis(wave, Fs), 120959227206031.11, decimal=1)
np.testing.assert_almost_equal(spectral_kurtosis(offsetWave, Fs), 3.2500028252333513, decimal=5)
np.testing.assert_almost_equal(spectral_kurtosis(noiseWave, Fs), 1.7251592171239667, decimal=5)
def test_spectral_slope():
np.testing.assert_almost_equal(spectral_slope(const0, Fs), 0.0)
np.testing.assert_almost_equal(spectral_slope(const1, Fs), -0.0009818181818181818)
np.testing.assert_almost_equal(spectral_slope(constNeg, Fs), -0.0009818181818181818)
np.testing.assert_almost_equal(spectral_slope(constF, Fs), -0.0009818181818181816)
np.testing.assert_almost_equal(spectral_slope(lin, Fs), -0.0006056882550328839)
np.testing.assert_almost_equal(spectral_slope(lin0, Fs), -0.00023672490168659717, decimal=1)
np.testing.assert_almost_equal(spectral_slope(wave, Fs), -2.3425149700598465e-05, decimal=5)
np.testing.assert_almost_equal(spectral_slope(offsetWave, Fs), -2.380838323353288e-05, decimal=5)
np.testing.assert_almost_equal(spectral_slope(noiseWave, Fs), -6.586047565550932e-06, decimal=5)
def test_spectral_decrease():
np.testing.assert_almost_equal(spectral_decrease(const0, Fs), 0.0)
np.testing.assert_almost_equal(spectral_decrease(const1, Fs), 0.0)
np.testing.assert_almost_equal(spectral_decrease(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(spectral_decrease(constF, Fs), 0.0)
np.testing.assert_almost_equal(spectral_decrease(lin, Fs), -2.255518236004341)
np.testing.assert_almost_equal(spectral_decrease(lin0, Fs), 0.5195484076294969, decimal=5)
np.testing.assert_almost_equal(spectral_decrease(wave, Fs), 0.19999999999999687, decimal=5)
np.testing.assert_almost_equal(spectral_decrease(offsetWave, Fs), -26.963293719961584, decimal=5)
np.testing.assert_almost_equal(spectral_decrease(noiseWave, Fs), 0.06053938231990085, decimal=5)
def test_spectral_roll_on():
np.testing.assert_almost_equal(spectral_roll_on(const0, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_on(const1, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_on(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_on(constF, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_on(lin, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_on(lin0, Fs), 55.55555555555556, decimal=5)
np.testing.assert_almost_equal(spectral_roll_on(wave, Fs), 5.0100200400801596, decimal=5)
np.testing.assert_almost_equal(spectral_roll_on(offsetWave, Fs), 0.0, decimal=5)
np.testing.assert_almost_equal(spectral_roll_on(noiseWave, Fs), 5.0100200400801596, decimal=5)
def test_spectral_roll_off():
np.testing.assert_almost_equal(spectral_roll_off(const0, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_off(const1, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_off(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_off(constF, Fs), 0.0)
np.testing.assert_almost_equal(spectral_roll_off(lin, Fs), 444.44444444444446)
np.testing.assert_almost_equal(spectral_roll_off(lin0, Fs), 500.0, decimal=5)
np.testing.assert_almost_equal(spectral_roll_off(wave, Fs), 5.0100200400801596, decimal=5)
np.testing.assert_almost_equal(spectral_roll_off(offsetWave, Fs), 5.0100200400801596, decimal=5)
np.testing.assert_almost_equal(spectral_roll_off(noiseWave, Fs), 464.9298597194388, decimal=5)
def test_spectral_distance():
np.testing.assert_almost_equal(spectral_distance(const0, Fs), 0.0)
np.testing.assert_almost_equal(spectral_distance(const1, Fs), -100)
np.testing.assert_almost_equal(spectral_distance(constNeg, Fs), -100)
np.testing.assert_almost_equal(spectral_distance(constF, Fs), -250)
np.testing.assert_almost_equal(spectral_distance(lin, Fs), -1256.997293357373)
np.testing.assert_almost_equal(spectral_distance(lin0, Fs), -323.15504563934024, decimal=5)
np.testing.assert_almost_equal(spectral_distance(wave, Fs), -122500.00000000022, decimal=5)
np.testing.assert_almost_equal(spectral_distance(offsetWave, Fs), -622500.0, decimal=5)
np.testing.assert_almost_equal(spectral_distance(noiseWave, Fs), -124832.72310672606, decimal=5)
def test_spect_variation():
np.testing.assert_almost_equal(spectral_variation(const0, Fs), 1.0)
np.testing.assert_almost_equal(spectral_variation(const1, Fs), 1.0)
np.testing.assert_almost_equal(spectral_variation(constNeg, Fs), 1.0)
np.testing.assert_almost_equal(spectral_variation(constF, Fs), 1.0)
np.testing.assert_almost_equal(spectral_variation(lin, Fs), 0.04096548417849766)
np.testing.assert_almost_equal(spectral_variation(lin0, Fs), 0.39913530062615254, decimal=5)
np.testing.assert_almost_equal(spectral_variation(wave, Fs), 0.9999999999999997, decimal=5)
np.testing.assert_almost_equal(spectral_variation(offsetWave, Fs), 0.9999999999999999, decimal=5)
np.testing.assert_almost_equal(spectral_variation(noiseWave, Fs), 0.9775968083533805, decimal=5)
def test_spectral_maxpeaks():
np.testing.assert_almost_equal(spectral_maxpeaks(const0, Fs), 0.0)
np.testing.assert_almost_equal(spectral_maxpeaks(const1, Fs), 0.0)
np.testing.assert_almost_equal(spectral_maxpeaks(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(spectral_maxpeaks(constF, Fs), 0.0)
np.testing.assert_almost_equal(spectral_maxpeaks(lin, Fs), 0.0)
np.testing.assert_almost_equal(spectral_maxpeaks(lin0, Fs), 1.0, decimal=5)
np.testing.assert_almost_equal(spectral_maxpeaks(wave, Fs), 155, decimal=0)
np.testing.assert_almost_equal(spectral_maxpeaks(offsetWave, Fs), 158, decimal=1)
np.testing.assert_almost_equal(spectral_maxpeaks(noiseWave, Fs), 172.0, decimal=1)
def test_human_range_energy():
np.testing.assert_almost_equal(human_range_energy(const0, Fs), 0.0)
np.testing.assert_almost_equal(human_range_energy(const1, Fs), 0.0)
np.testing.assert_almost_equal(human_range_energy(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(human_range_energy(constF, Fs), 0.0)
np.testing.assert_almost_equal(human_range_energy(lin, Fs), 0.0)
np.testing.assert_almost_equal(human_range_energy(lin0, Fs), 0.0)
np.testing.assert_almost_equal(human_range_energy(wave, Fs), 2.838300923247935e-33)
np.testing.assert_almost_equal(human_range_energy(offsetWave, Fs), 1.6194431630448383e-33)
np.testing.assert_almost_equal(human_range_energy(noiseWave, Fs), 4.5026865350839304e-05)
def test_mfcc():
np.testing.assert_almost_equal(mfcc(const0, Fs), (-1e-08, -2.5654632210061364e-08, -4.099058125255727e-08,
-5.56956514302075e-08, -6.947048992011573e-08,
-8.203468073398136e-08,
-9.313245317896842e-08, -1.0253788861142992e-07,
-1.1005951948899701e-07,
-1.1554422709759472e-07, -1.1888035860690259e-07,
-1.2000000000000002e-07))
np.testing.assert_almost_equal(mfcc(const1, Fs), (0.14096637144714785, 0.4029720554090289, 0.2377457745400458,
0.9307791929462678, -0.8138023913445843, -0.36127671623673,
0.17779314470940918, 1.5842014538963525, -5.868875380858009,
-1.3484207382203723, -1.5899059472962034, 2.9774371742123975))
np.testing.assert_almost_equal(mfcc(constNeg, Fs), (0.14096637144714785, 0.4029720554090289, 0.2377457745400458,
0.9307791929462678, -0.8138023913445843, -0.36127671623673,
0.17779314470940918, 1.5842014538963525, -5.868875380858009,
-1.3484207382203723, -1.5899059472962034, 2.9774371742123975))
np.testing.assert_almost_equal(mfcc(constF, Fs), (0.1409663714471363, 0.40297205540906766, 0.23774577454002216,
0.9307791929463864, -0.8138023913445535, -0.3612767162368284,
0.17779314470931407, 1.584201453896316, -5.868875380858139,
-1.3484207382203004, -1.589905947296293, 2.977437174212552))
np.testing.assert_almost_equal(mfcc(lin, Fs), (63.41077963677539, 42.33256774689686, 22.945623346731722,
-9.267967765468333, -30.918618746635172, -69.45624761250505,
-81.74881720705784, -112.32234611356338, -127.73335353282954,
-145.3505024599537, -152.08439229251312, -170.61228411241296))
np.testing.assert_almost_equal(mfcc(lin0, Fs), (4.472854975902669, 9.303621966161266, 12.815317252229947,
12.65260020301481, 9.763110307405048, 3.627814979708572,
1.0051648150842092, -8.07514557618858, -24.79987026383853,
-36.55749714126207, -49.060094200797785, -61.45654150658956))
np.testing.assert_almost_equal(mfcc(wave, Fs), (115.31298449242963, -23.978080415791883, 64.49711308839377,
-70.83883973188331, -17.4881594184545, -122.5191336465161,
-89.73379214517978, -164.5583844690884, -153.29482394321641,
-204.0607944643521, -189.9059214788022, -219.38937674972897))
np.testing.assert_almost_equal(mfcc(offsetWave, Fs), (0.02803261518615674, 0.21714705316418328, 0.4010268706527706,
1.0741653432632032, -0.26756380975236493,
-0.06446520044381611, 1.2229170142535633, 2.2173729990650166,
-5.161787305125577, -1.777027230578585, -2.2267834681371506,
1.266610194040295))
np.testing.assert_almost_equal(mfcc(noiseWave, Fs), (-59.93874366630627, -20.646010360067542, -5.9381521505819,
13.868391975194648, 65.73380784148053, 67.65563377433688,
35.223042940942214, 73.01746718829553, 137.50395589362876,
111.61718917042731, 82.69709467796633, 110.67135918512074))
def test_power_bandwidth():
np.testing.assert_almost_equal(power_bandwidth(const0, Fs), 0.0)
np.testing.assert_almost_equal(power_bandwidth(const1, Fs), 0.0)
np.testing.assert_almost_equal(power_bandwidth(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(power_bandwidth(constF, Fs), 0.0)
np.testing.assert_almost_equal(power_bandwidth(lin, Fs), 0.0)
np.testing.assert_almost_equal(power_bandwidth(lin0, Fs), 0.0)
np.testing.assert_almost_equal(power_bandwidth(wave, Fs), 2.0)
np.testing.assert_almost_equal(power_bandwidth(offsetWave, Fs), 2.0)
np.testing.assert_almost_equal(power_bandwidth(noiseWave, Fs), 2.0)
def test_fft_mean_coeff():
np.testing.assert_almost_equal(fft_mean_coeff(const0, Fs, nfreq=10),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(fft_mean_coeff(const1, Fs, nfreq=10),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(fft_mean_coeff(constNeg, Fs, nfreq=10),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(fft_mean_coeff(constF, Fs, nfreq=10),
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(fft_mean_coeff(lin, Fs, nfreq=10), (0.00408221375370652, 0.29732082717207287,
0.04400486791011177, 0.006686945426272411,
0.00027732608206304087, 0.0003337183893114616,
0.0008722727267959805, 0.0007221373313148659,
0.00024061479410220662, 2.1097101108186473e-07))
np.testing.assert_almost_equal(fft_mean_coeff(lin0, Fs, nfreq=10), (0.004523228535962903, 0.3294413597474491,
0.04875885641009613, 0.007409357813044217,
0.00030728651752137475, 0.0003697710684891545,
0.0009665071765052403, 0.0008001521676618994,
0.00026660919014094884, 2.337628931654879e-07))
np.testing.assert_almost_equal(fft_mean_coeff(wave, Fs, nfreq=10), (2.0234880089914443e-06, 0.0001448004568848076,
2.1047578415647817e-05, 3.2022732210152474e-06,
1.52158292419209e-07, 1.7741879185514087e-07,
4.2795757073284126e-07, 3.5003942541628605e-07,
1.1626895252132188e-07, 1.6727906953620535e-10))
np.testing.assert_almost_equal(fft_mean_coeff(offsetWave, Fs, nfreq=10), (2.0234880089914642e-06,
0.00014480045688480763,
2.104757841564781e-05,
3.2022732210152483e-06,
1.5215829241920897e-07,
1.7741879185514156e-07,
4.27957570732841e-07,
3.500394254162859e-07,
1.1626895252132173e-07,
1.6727906953620255e-10))
np.testing.assert_almost_equal(fft_mean_coeff(noiseWave, Fs, nfreq=10), (3.2947755935395495e-06,
0.00014466702099241778,
3.838265852158549e-05,
1.6729032217627548e-05,
1.6879950037320804e-05,
1.571169205601392e-05,
1.679718723715948e-05,
1.810371503556574e-05,
2.0106126483830693e-05,
8.91285109135437e-06))
def test_lpcc():
np.testing.assert_almost_equal(lpcc(const0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
np.testing.assert_almost_equal(lpcc(const1), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
np.testing.assert_almost_equal(lpcc(constNeg), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
np.testing.assert_almost_equal(lpcc(constF), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
np.testing.assert_almost_equal(lpcc(lin), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
np.testing.assert_almost_equal(lpcc(lin0), (0.017793342850434657, 0.12419699587050197, 0.17985773867565555,
0.13749027713829948, 0.14521059821841656, 0.14362411136332903,
0.14403924127165643, 0.14362411136332903, 0.14521059821841656,
0.13749027713829948, 0.17985773867565555, 0.12419699587050197))
np.testing.assert_almost_equal(lpcc(wave), (8.08705689884851e-07, 0.10193422882411193, 0.0051922525746904875,
0.0003496693593067946, 2.355214618130234e-05, 1.2419899263690914e-06,
3.091008802744081e-06, 1.2419899263690914e-06, 2.355214618130234e-05,
0.0003496693593067946, 0.0051922525746904875, 0.10193422882411193))
np.testing.assert_almost_equal(lpcc(offsetWave), (8.087054868870942e-07, 0.10193422882503231, 0.005192252575236202,
0.0003496693583308415, 2.3552147454092374e-05,
1.241991615337501e-06, 3.0910069449505212e-06,
1.241991615337501e-06, 2.3552147454092374e-05,
0.0003496693583308415, 0.005192252575236202, 0.10193422882503231))
np.testing.assert_almost_equal(lpcc(noiseWave), (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
def test_spectral_entropy():
np.testing.assert_almost_equal(spectral_entropy(const0, Fs), 0.0)
np.testing.assert_almost_equal(spectral_entropy(const1, Fs), 0.0)
np.testing.assert_almost_equal(spectral_entropy(constNeg, Fs), 0.0)
np.testing.assert_almost_equal(spectral_entropy(constF, Fs), 0.0)
np.testing.assert_almost_equal(spectral_entropy(lin, Fs), 0.6006757398806453)
np.testing.assert_almost_equal(spectral_entropy(lin0, Fs), 0.57319032538303)
np.testing.assert_almost_equal(spectral_entropy(wave, Fs), 1.5228376718814352e-29)
np.testing.assert_almost_equal(spectral_entropy(offsetWave, Fs), 1.783049297437309e-29)
np.testing.assert_almost_equal(spectral_entropy(noiseWave, Fs), 0.030107186831275425)
def test_wavelet_entropy():
np.testing.assert_almost_equal(wavelet_entropy(const0), 0.0)
np.testing.assert_almost_equal(wavelet_entropy(const1), 1.9188378548746368)
np.testing.assert_almost_equal(wavelet_entropy(constNeg), 1.9188378548746368)
np.testing.assert_almost_equal(wavelet_entropy(constF), 1.9188378548746368)
np.testing.assert_almost_equal(wavelet_entropy(lin), 1.9648440772467513)
np.testing.assert_almost_equal(wavelet_entropy(lin0), 2.0713919678725117)
np.testing.assert_almost_equal(wavelet_entropy(wave), 1.7277528462213683)
np.testing.assert_almost_equal(wavelet_entropy(offsetWave), 1.7965939302139549)
np.testing.assert_almost_equal(wavelet_entropy(noiseWave), 2.0467527462416153)
def test_wavelet_abs_mean():
np.testing.assert_almost_equal(wavelet_abs_mean(const0), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(wavelet_abs_mean(const1),
(0.081894185676901, 0.24260084511769256, 0.4653470776794248,
0.8500400580778283, 1.3602249381214044, 1.8378460432593602,
2.2080039502231164, 2.4676456085810874, 2.638131856418627))
np.testing.assert_almost_equal(wavelet_abs_mean(constNeg),
(0.081894185676901, 0.24260084511769256, 0.4653470776794248,
0.8500400580778283, 1.3602249381214044, 1.8378460432593602,
2.2080039502231164, 2.4676456085810874, 2.638131856418627))
np.testing.assert_almost_equal(wavelet_abs_mean(constF),
(0.20473546419225214, 0.6065021127942314, 1.1633676941985622,
2.1251001451945712, 3.4005623453035114, 4.5946151081484015,
5.5200098755577915, 6.169114021452717, 6.5953296410465665))
np.testing.assert_almost_equal(wavelet_abs_mean(lin), (0.7370509925842613, 2.183416725919023, 4.1974435700809565,
7.744819422931153, 12.504051331233388, 16.982183932901865,
20.46332353598833, 22.91143100556329, 24.52363151471446))
np.testing.assert_almost_equal(wavelet_abs_mean(lin0),
(0.0430987066803135, 0.12767505547269026, 0.23510912407745171,
0.3479590829560181, 0.4400900851788993, 0.5024773453284851,
0.5396989380329178, 0.5591602904810937, 0.5669696013289379))
np.testing.assert_almost_equal(wavelet_abs_mean(wave), (5.138703105035948e-05, 0.00015178141653400073,
0.00027925117450851024, 0.0004278724786267016,
0.0005932191214607947, 0.0007717034331954587,
0.0009601854175466062, 0.0011557903088208192,
0.0013558175034366186))
np.testing.assert_almost_equal(wavelet_abs_mean(offsetWave), (0.0032504208945027323, 0.009623752088931016,
0.017761411181034453, 0.027372614777691914,
0.03826512918833778, 0.050306487368868114,
0.06339897203822373, 0.07746693331944604,
0.09244971907566273))
np.testing.assert_almost_equal(wavelet_abs_mean(noiseWave), (4.631139377647647e-05, 7.893225282164063e-05,
0.00033257747958655794, 0.0005792253883615155,
0.0007699898255271558, 0.0009106252575513913,
0.0010387197644970154, 0.0011789334866018457,
0.0013341945911985783))
def test_wavelet_std():
np.testing.assert_almost_equal(wavelet_std(const0), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(wavelet_std(const1), (0.1767186264889806, 0.28069306259219023, 0.3235061868750311,
0.3115893726751135, 0.31446140614407014, 0.3582016825631658,
0.4133090941627322, 0.4598585090675407, 0.4935514064162697))
np.testing.assert_almost_equal(wavelet_std(constNeg), (0.1767186264889806, 0.28069306259219023, 0.3235061868750311,
0.3115893726751135, 0.31446140614407014, 0.3582016825631658,
0.4133090941627322, 0.4598585090675407, 0.4935514064162697))
np.testing.assert_almost_equal(wavelet_std(constF), (0.44179656622245145, 0.7017326564804757, 0.8087654671875778,
0.7789734316877838, 0.7861535153601755, 0.8955042064079146,
1.0332727354068305, 1.1496462726688517, 1.2338785160406742))
np.testing.assert_almost_equal(wavelet_std(lin), (2.721791561180164, 5.325234998810811, 8.137581399111415,
10.529795250703716, 11.836525442245224, 12.296195571788726,
12.315744378517108, 12.135259348389042, 11.869294506387352))
np.testing.assert_almost_equal(wavelet_std(lin0), (2.239406940011677, 4.7878443746478245, 7.797954379287043,
10.418506686200207, 11.746946049852674, 12.045972295386465,
11.828477896749822, 11.408150997410496, 10.932763618021895))
np.testing.assert_almost_equal(wavelet_std(wave), (0.001939366875349316, 0.009733675496927717, 0.025635801097107388,
0.05125305898778544, 0.08783649118731567, 0.13636963970273208,
0.197613166916789, 0.2721306670702481, 0.360305525758368))
np.testing.assert_almost_equal(wavelet_std(offsetWave),
(0.05459142980660159, 0.10410347082332229, 0.155831467554863,
0.2101395066938644, 0.268489203478025, 0.33264452641566,
0.4044076212671741, 0.4854392072251105, 0.5771385517659353))
np.testing.assert_almost_equal(wavelet_std(noiseWave),
(0.08974931069698587, 0.09625674025798765, 0.10445386849293256,
0.11395751571203461, 0.13232763520967267, 0.16659967802754122,
0.2187573594673847, 0.2877270278501564, 0.3722670641715661))
def test_wavelet_var():
np.testing.assert_almost_equal(wavelet_var(const0), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(wavelet_var(const1), (0.031229472948151833, 0.07878859538738324, 0.10465625294642253,
0.09708793716407076, 0.09888597595410582, 0.128308445391083,
0.17082440731761822, 0.21146984836182142, 0.24359299077547786))
np.testing.assert_almost_equal(wavelet_var(constNeg),
(0.031229472948151833, 0.07878859538738324, 0.10465625294642253,
0.09708793716407076, 0.09888597595410582, 0.128308445391083,
0.17082440731761822, 0.21146984836182142, 0.24359299077547786))
np.testing.assert_almost_equal(wavelet_var(constF), (0.19518420592594893, 0.49242872117114533, 0.654101580915141,
0.6067996072754422, 0.6180373497131617, 0.8019277836942689,
1.0676525457351138, 1.3216865522613839, 1.5224561923467361))
np.testing.assert_almost_equal(wavelet_var(lin), (7.408149302511555, 28.35812779255958, 66.22023102716409,
110.87658802174253, 140.10333454491848, 151.19642553967668,
151.67755959697575, 147.26451945266362, 140.88015207935698))
np.testing.assert_almost_equal(wavelet_var(lin0), (5.014943442972464, 22.923453755846815, 60.808092501441976,
108.5452815703984, 137.99074149814933, 145.10544854121827,
139.91288935389912, 130.1459091797181, 119.5253203275432))
np.testing.assert_almost_equal(wavelet_var(wave),
(3.761143877202169e-06, 9.474443867949103e-05, 0.0006571942978904524,
0.0026268760556054137, 0.007715249184099382, 0.018596678632652963,
0.03905096373888271, 0.07405509996009821, 0.12982007189201397))
np.testing.assert_almost_equal(wavelet_var(offsetWave),
(0.0029802242083291084, 0.010837532637462314, 0.02428344628030232,
0.044158612273540676, 0.07208645238426431, 0.11065238095429873,
0.16354552413897414, 0.2356512239113438, 0.33308890793448115))
np.testing.assert_almost_equal(wavelet_var(noiseWave),
(0.008054938770584103, 0.0092653600450937, 0.01091061064313885,
0.012986315387258616, 0.017510603040184203, 0.027755452718880403,
0.04785478232114257, 0.08278684255548469, 0.1385827670669169))
def test_wavelet_entropy():
np.testing.assert_almost_equal(wavelet_energy(const0), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(wavelet_energy(const1), (0.19477199643643478, 0.3710037269882903, 0.56674875884399,
0.9053485723747671, 1.3961009484422982, 1.8724279756816202,
2.2463539016634275, 2.510128422593423, 2.683902509896041))
np.testing.assert_almost_equal(wavelet_energy(constNeg), (0.19477199643643478, 0.3710037269882903, 0.56674875884399,
0.9053485723747671, 1.3961009484422982,
1.8724279756816202,
2.2463539016634275, 2.510128422593423, 2.683902509896041))
np.testing.assert_almost_equal(wavelet_energy(constF), (0.48692999109108687, 0.9275093174707258, 1.4168718971099752,
2.263371430936918, 3.4902523711057456, 4.6810699392040505,
5.615884754158569, 6.275321056483556, 6.709756274740101))
np.testing.assert_almost_equal(wavelet_energy(lin), (2.819821531264169, 5.7554701277638936, 9.156350995411767,
13.071297407509103, 17.21785800380053, 20.966425462405052,
23.883575313078858, 25.926785187819767, 27.244974853151422))
np.testing.assert_almost_equal(wavelet_energy(lin0), (2.2398216316238173, 4.789546395603321, 7.8014978562880115,
10.424315665491429, 11.75518697346929, 12.056447736534448,
11.84078393931808, 11.421846147193937, 10.947455177180416))
np.testing.assert_almost_equal(wavelet_energy(wave),
(0.0019400475520363772, 0.00973485882167256, 0.025637321995655413,
0.051254844946242696, 0.08783849436907175, 0.13637182318514984,
0.19761549963228792, 0.2721331214889804, 0.3603080766970352))
np.testing.assert_almost_equal(wavelet_energy(offsetWave),
(0.054688110630378595, 0.10454735406375197, 0.15684040935755078,
0.21191477606176637, 0.27120227229148447, 0.3364269959823273,
0.4093469845918956, 0.49158147815928066, 0.584496243351187))
np.testing.assert_almost_equal(wavelet_energy(noiseWave),
(0.08974932264551803, 0.09625677262091348, 0.10445439794914707,
0.11395898775133596, 0.13232987540429264, 0.16660216672432593,
0.2187598255162308, 0.2877294431226156, 0.37226945502166053))
run_module_suite()
|
py
|
1a5a4f9b065860c602904ef52376b850b585f21e
|
from setuptools import setup
setup(
name="vmapper",
version="0.0.2",
author="Benny Chin",
author_email="[email protected]",
packages=['vmapper', 'vmapper.geometry', 'vmapper.geometry.templates', 'vmapper.map_element', 'vmapper.templates', 'vmapper.utils'],
include_package_data=True,
url="https://github.com/wcchin/vmapper",
license="LICENSE.txt",
description="Vector MAP ProducER - a simple python library for creating SVG map in python",
long_description=open("README.md").read(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Scientific/Engineering :: Visualization',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='map, geography, catography, svg, ',
install_requires=[
"jinja2",
"pandas",
"geopandas",
],
)
|
py
|
1a5a501ce6fb3266d0afd2dfb73d4fad21f23fc7
|
# coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from k8s.models.common import ObjectMeta
from k8s.models.apiextensions_v1_custom_resource_definition import CustomResourceConversion,\
CustomResourceDefinitionNames, CustomResourceDefinitionSpec, CustomResourceDefinition,\
CustomResourceDefinitionVersion, CustomResourceValidation, JSONSchemaProps
LOG = logging.getLogger(__name__)
class CrdResourcesSyncerApiextensionsV1(object):
@staticmethod
def _create_or_update(kind, plural, short_names, group, schema_properties):
name = "%s.%s" % (plural, group)
metadata = ObjectMeta(name=name)
names = CustomResourceDefinitionNames(kind=kind, plural=plural, shortNames=short_names)
schema = CustomResourceValidation(openAPIV3Schema=JSONSchemaProps(type="object", properties=schema_properties))
version_v1 = CustomResourceDefinitionVersion(name="v1", served=True, storage=True, schema=schema)
spec = CustomResourceDefinitionSpec(
group=group,
names=names,
versions=[version_v1],
preserveUnknownFields=False,
scope="Namespaced",
conversion=CustomResourceConversion(strategy="None")
)
definition = CustomResourceDefinition.get_or_create(metadata=metadata, spec=spec)
definition.save()
LOG.info("Created or updated CustomResourceDefinition with name %s", name)
@classmethod
def update_crd_resources(cls):
object_with_unknown_fields = {"type": "object", "x-kubernetes-preserve-unknown-fields": True}
application_schema_properties = {
"spec": {
"type": "object",
"properties": {
"application": {
"type": "string",
},
"image": {
"type": "string",
},
"config": object_with_unknown_fields,
"additional_labels": {
"type": "object",
"properties": {
"global": object_with_unknown_fields,
"deployment": object_with_unknown_fields,
"horizontal_pod_autoscaler": object_with_unknown_fields,
"ingress": object_with_unknown_fields,
"service": object_with_unknown_fields,
"service_account": object_with_unknown_fields,
"pod": object_with_unknown_fields,
"status": object_with_unknown_fields,
}
},
"additional_annotations": {
"type": "object",
"properties": {
"global": object_with_unknown_fields,
"deployment": object_with_unknown_fields,
"horizontal_pod_autoscaler": object_with_unknown_fields,
"ingress": object_with_unknown_fields,
"service": object_with_unknown_fields,
"service_account": object_with_unknown_fields,
"pod": object_with_unknown_fields,
"status": object_with_unknown_fields,
}
}
}
}
}
application_status_schema_properties = {
"result": {
"type": "string"
},
"logs": {
"type": "array",
"items": {
"type": "string"
}
}
}
cls._create_or_update("Application", "applications", ("app", "fa"), "fiaas.schibsted.io",
application_schema_properties)
cls._create_or_update("ApplicationStatus", "application-statuses", ("status", "appstatus", "fs"),
"fiaas.schibsted.io", application_status_schema_properties)
|
py
|
1a5a50d134dc83ebfb7c74925d4e6f29b6d01186
|
import os
from collections import OrderedDict
from conans.client import tools
from conans.client.build.compiler_flags import architecture_flag, parallel_compiler_cl_flag
from conans.client.build.cppstd_flags import cppstd_from_settings, cppstd_flag_new as cppstd_flag
from conans.client.tools import cross_building
from conans.client.tools.apple import is_apple_os
from conans.client.tools.oss import get_cross_building_settings
from conans.errors import ConanException
from conans.model.build_info import DEFAULT_BIN, DEFAULT_INCLUDE, DEFAULT_LIB, DEFAULT_SHARE
from conans.util.env_reader import get_env
from conans.util.log import logger
verbose_definition_name = "CMAKE_VERBOSE_MAKEFILE"
cmake_install_prefix_var_name = "CMAKE_INSTALL_PREFIX"
runtime_definition_var_name = "CONAN_LINK_RUNTIME"
cmake_in_local_cache_var_name = "CONAN_IN_LOCAL_CACHE"
def get_toolset(settings):
if settings.get_safe("compiler") == "Visual Studio":
subs_toolset = settings.get_safe("compiler.toolset")
if subs_toolset:
return subs_toolset
return None
def get_generator(settings):
# Returns the name of the generator to be used by CMake
if "CONAN_CMAKE_GENERATOR" in os.environ:
return os.environ["CONAN_CMAKE_GENERATOR"]
compiler = settings.get_safe("compiler")
arch = settings.get_safe("arch")
compiler_version = settings.get_safe("compiler.version")
os_build, _, _, _ = get_cross_building_settings(settings)
if not compiler or not compiler_version or not arch:
if os_build == "Windows":
logger.warning("CMake generator could not be deduced from settings")
return None
return "Unix Makefiles"
if compiler == "Visual Studio":
_visuals = {'8': '8 2005',
'9': '9 2008',
'10': '10 2010',
'11': '11 2012',
'12': '12 2013',
'14': '14 2015',
'15': '15 2017',
'16': '16 2019'}.get(compiler_version, "UnknownVersion %s" % compiler_version)
base = "Visual Studio %s" % _visuals
return base
# The generator depends on the build machine, not the target
if os_build == "Windows" and compiler != "qcc":
return "MinGW Makefiles" # it is valid only under Windows
return "Unix Makefiles"
def get_generator_platform(settings, generator):
# Returns the generator platform to be used by CMake
if "CONAN_CMAKE_GENERATOR_PLATFORM" in os.environ:
return os.environ["CONAN_CMAKE_GENERATOR_PLATFORM"]
compiler = settings.get_safe("compiler")
arch = settings.get_safe("arch")
if settings.get_safe("os") == "WindowsCE":
return settings.get_safe("os.platform")
if compiler == "Visual Studio" and generator and "Visual" in generator:
return {"x86": "Win32",
"x86_64": "x64",
"armv7": "ARM",
"armv8": "ARM64"}.get(arch)
return None
def is_multi_configuration(generator):
if not generator:
return False
return "Visual" in generator or "Xcode" in generator
def is_toolset_supported(generator):
# https://cmake.org/cmake/help/v3.14/variable/CMAKE_GENERATOR_TOOLSET.html
if not generator:
return False
return "Visual" in generator or "Xcode" in generator or "Green Hills MULTI" in generator
def is_generator_platform_supported(generator):
# https://cmake.org/cmake/help/v3.14/variable/CMAKE_GENERATOR_PLATFORM.html
if not generator:
return False
return "Visual" in generator or "Green Hills MULTI" in generator
def verbose_definition(value):
return {verbose_definition_name: "ON" if value else "OFF"}
def in_local_cache_definition(value):
return {cmake_in_local_cache_var_name: "ON" if value else "OFF"}
def runtime_definition(runtime):
return {runtime_definition_var_name: "/%s" % runtime} if runtime else {}
def build_type_definition(new_build_type, old_build_type, generator, output):
if new_build_type and new_build_type != old_build_type:
output.warn("Forced CMake build type ('%s') different from the settings build type ('%s')"
% (new_build_type, old_build_type))
build_type = new_build_type or old_build_type
if build_type and not is_multi_configuration(generator):
return {"CMAKE_BUILD_TYPE": build_type}
return {}
class CMakeDefinitionsBuilder(object):
def __init__(self, conanfile, cmake_system_name=True, make_program=None,
parallel=True, generator=None, set_cmake_flags=False,
forced_build_type=None, output=None):
self._conanfile = conanfile
self._forced_cmake_system_name = cmake_system_name
self._make_program = make_program
self._parallel = parallel
self._generator = generator
self._set_cmake_flags = set_cmake_flags
self._forced_build_type = forced_build_type
self._output = output
def _ss(self, setname):
"""safe setting"""
return self._conanfile.settings.get_safe(setname)
def _get_cpp_standard_vars(self):
cppstd = cppstd_from_settings(self._conanfile.settings)
if not cppstd:
return {}
definitions = {}
if cppstd.startswith("gnu"):
definitions["CONAN_CMAKE_CXX_STANDARD"] = cppstd[3:]
definitions["CONAN_CMAKE_CXX_EXTENSIONS"] = "ON"
else:
definitions["CONAN_CMAKE_CXX_STANDARD"] = cppstd
definitions["CONAN_CMAKE_CXX_EXTENSIONS"] = "OFF"
definitions["CONAN_STD_CXX_FLAG"] = cppstd_flag(self._conanfile.settings)
return definitions
def _cmake_cross_build_defines(self):
os_ = self._ss("os")
arch = self._ss("arch")
os_ver_str = "os.api_level" if os_ == "Android" else "os.version"
op_system_version = self._ss(os_ver_str)
env_sn = get_env("CONAN_CMAKE_SYSTEM_NAME", "")
env_sn = {"False": False, "True": True, "": None}.get(env_sn, env_sn)
cmake_system_name = env_sn or self._forced_cmake_system_name
os_build, _, _, _ = get_cross_building_settings(self._conanfile.settings)
compiler = self._ss("compiler")
libcxx = self._ss("compiler.libcxx")
definitions = OrderedDict()
os_ver = get_env("CONAN_CMAKE_SYSTEM_VERSION", op_system_version)
toolchain_file = get_env("CONAN_CMAKE_TOOLCHAIN_FILE", "")
if toolchain_file != "":
logger.info("Setting Cross build toolchain file: %s" % toolchain_file)
definitions["CMAKE_TOOLCHAIN_FILE"] = toolchain_file
return definitions
if cmake_system_name is False:
return definitions
# System name and system version
if cmake_system_name is not True: # String not empty
definitions["CMAKE_SYSTEM_NAME"] = cmake_system_name
else: # detect if we are cross building and the system name and version
if cross_building(self._conanfile.settings): # We are cross building
if os_ != os_build:
if os_: # the_os is the host (regular setting)
definitions["CMAKE_SYSTEM_NAME"] = {"iOS": "Darwin",
"tvOS": "Darwin",
"watchOS": "Darwin",
"Neutrino": "QNX"}.get(os_, os_)
else:
definitions["CMAKE_SYSTEM_NAME"] = "Generic"
if os_ver:
definitions["CMAKE_SYSTEM_VERSION"] = os_ver
if is_apple_os(os_):
definitions["CMAKE_OSX_DEPLOYMENT_TARGET"] = os_ver
# system processor
cmake_system_processor = os.getenv("CONAN_CMAKE_SYSTEM_PROCESSOR")
if cmake_system_processor:
definitions["CMAKE_SYSTEM_PROCESSOR"] = cmake_system_processor
if definitions: # If enabled cross compile
for env_var in ["CONAN_CMAKE_FIND_ROOT_PATH",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE"]:
value = os.getenv(env_var)
if value:
definitions[env_var] = value
if self._conanfile and self._conanfile.deps_cpp_info.sysroot:
sysroot_path = self._conanfile.deps_cpp_info.sysroot
else:
sysroot_path = os.getenv("CONAN_CMAKE_FIND_ROOT_PATH", None)
if sysroot_path:
# Needs to be set here, can't be managed in the cmake generator, CMake needs
# to know about the sysroot before any other thing
definitions["CMAKE_SYSROOT"] = sysroot_path.replace("\\", "/")
# Adjust Android stuff
if str(os_) == "Android" and definitions["CMAKE_SYSTEM_NAME"] == "Android":
arch_abi_settings = tools.to_android_abi(arch)
if arch_abi_settings:
definitions["CMAKE_ANDROID_ARCH_ABI"] = arch_abi_settings
definitions["ANDROID_ABI"] = arch_abi_settings
conan_cmake_android_ndk = os.getenv("CONAN_CMAKE_ANDROID_NDK")
if conan_cmake_android_ndk:
definitions["ANDROID_NDK"] = conan_cmake_android_ndk
definitions["ANDROID_PLATFORM"] = "android-%s" % op_system_version
definitions["ANDROID_TOOLCHAIN"] = compiler
# More details about supported stdc++ libraries here:
# https://developer.android.com/ndk/guides/cpp-support.html
if libcxx:
definitions["ANDROID_STL"] = libcxx
else:
definitions["ANDROID_STL"] = 'none'
logger.info("Setting Cross build flags: %s"
% ", ".join(["%s=%s" % (k, v) for k, v in definitions.items()]))
return definitions
def _get_make_program_definition(self):
make_program = os.getenv("CONAN_MAKE_PROGRAM") or self._make_program
if make_program:
if not tools.which(make_program):
self._output.warn("The specified make program '%s' cannot be found and will be "
"ignored" % make_program)
else:
self._output.info("Using '%s' as CMAKE_MAKE_PROGRAM" % make_program)
return {"CMAKE_MAKE_PROGRAM": make_program}
return {}
def get_definitions(self):
compiler = self._ss("compiler")
compiler_version = self._ss("compiler.version")
arch = self._ss("arch")
os_ = self._ss("os")
libcxx = self._ss("compiler.libcxx")
runtime = self._ss("compiler.runtime")
build_type = self._ss("build_type")
definitions = OrderedDict()
definitions.update(runtime_definition(runtime))
definitions.update(build_type_definition(self._forced_build_type, build_type,
self._generator, self._output))
if str(os_) == "Macos":
if arch == "x86":
definitions["CMAKE_OSX_ARCHITECTURES"] = "i386"
definitions.update(self._cmake_cross_build_defines())
definitions.update(self._get_cpp_standard_vars())
definitions.update(in_local_cache_definition(self._conanfile.in_local_cache))
if compiler:
definitions["CONAN_COMPILER"] = compiler
if compiler_version:
definitions["CONAN_COMPILER_VERSION"] = str(compiler_version)
# C, CXX, LINK FLAGS
if compiler == "Visual Studio":
if self._parallel:
flag = parallel_compiler_cl_flag(output=self._output)
definitions['CONAN_CXX_FLAGS'] = flag
definitions['CONAN_C_FLAGS'] = flag
else: # arch_flag is only set for non Visual Studio
arch_flag = architecture_flag(compiler=compiler, os=os_, arch=arch)
if arch_flag:
definitions['CONAN_CXX_FLAGS'] = arch_flag
definitions['CONAN_SHARED_LINKER_FLAGS'] = arch_flag
definitions['CONAN_C_FLAGS'] = arch_flag
if self._set_cmake_flags:
definitions['CMAKE_CXX_FLAGS'] = arch_flag
definitions['CMAKE_SHARED_LINKER_FLAGS'] = arch_flag
definitions['CMAKE_C_FLAGS'] = arch_flag
if libcxx:
definitions["CONAN_LIBCXX"] = libcxx
# Shared library
try:
definitions["BUILD_SHARED_LIBS"] = "ON" if self._conanfile.options.shared else "OFF"
except ConanException:
pass
# Install to package folder
try:
if self._conanfile.package_folder:
definitions["CMAKE_INSTALL_PREFIX"] = self._conanfile.package_folder
definitions["CMAKE_INSTALL_BINDIR"] = DEFAULT_BIN
definitions["CMAKE_INSTALL_SBINDIR"] = DEFAULT_BIN
definitions["CMAKE_INSTALL_LIBEXECDIR"] = DEFAULT_BIN
definitions["CMAKE_INSTALL_LIBDIR"] = DEFAULT_LIB
definitions["CMAKE_INSTALL_INCLUDEDIR"] = DEFAULT_INCLUDE
definitions["CMAKE_INSTALL_OLDINCLUDEDIR"] = DEFAULT_INCLUDE
definitions["CMAKE_INSTALL_DATAROOTDIR"] = DEFAULT_SHARE
except AttributeError:
pass
# fpic
if not str(os_).startswith("Windows"):
fpic = self._conanfile.options.get_safe("fPIC")
if fpic is not None:
shared = self._conanfile.options.get_safe("shared")
fpic_value = "ON" if (fpic or shared) else "OFF"
definitions["CONAN_CMAKE_POSITION_INDEPENDENT_CODE"] = fpic_value
# Adjust automatically the module path in case the conanfile is using the
# cmake_find_package or cmake_find_package_multi
install_folder = self._conanfile.install_folder.replace("\\", "/")
if "cmake_find_package" in self._conanfile.generators:
definitions["CMAKE_MODULE_PATH"] = install_folder
if "cmake_find_package_multi" in self._conanfile.generators:
# The cmake_find_package_multi only works with targets and generates XXXConfig.cmake
# that require the prefix path and the module path
definitions["CMAKE_PREFIX_PATH"] = install_folder
definitions["CMAKE_MODULE_PATH"] = install_folder
definitions.update(self._get_make_program_definition())
# Disable CMake export registry #3070 (CMake installing modules in user home's)
definitions["CMAKE_EXPORT_NO_PACKAGE_REGISTRY"] = "ON"
return definitions
|
py
|
1a5a51ab4dfd50486ddd9a27228322f7499a3502
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist-keras.py
# Author: Yuxin Wu
import tensorflow as tf
from tensorflow import keras
from tensorpack import *
from tensorpack.contrib.keras import KerasPhaseCallback
from tensorpack.dataflow import dataset
from tensorpack.utils.argtools import memoized
KL = keras.layers
"""
This is an mnist example demonstrating how to use Keras symbolic function inside tensorpack.
This way you can define models in Keras-style, and benefit from the more efficeint trainers in tensorpack.
Note: this example does not work for replicated-style data-parallel trainers.
"""
IMAGE_SIZE = 28
@memoized # this is necessary for sonnet/Keras to work under tensorpack
def get_keras_model():
M = keras.models.Sequential()
M.add(KL.Conv2D(32, 3, activation='relu', input_shape=[IMAGE_SIZE, IMAGE_SIZE, 1], padding='same'))
M.add(KL.MaxPooling2D())
M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
M.add(KL.MaxPooling2D())
M.add(KL.Conv2D(32, 3, padding='same', activation='relu'))
M.add(KL.Flatten())
M.add(KL.Dense(512, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-5)))
M.add(KL.Dropout(0.5))
M.add(KL.Dense(10, activation=None, kernel_regularizer=keras.regularizers.l2(1e-5)))
return M
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]
def build_graph(self, image, label):
image = tf.expand_dims(image, 3) * 2 - 1
M = get_keras_model()
logits = M(image)
# build cost function by tensorflow
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
# for tensorpack validation
acc = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32)
acc = tf.reduce_mean(acc, name='accuracy')
summary.add_moving_summary(acc)
wd_cost = tf.add_n(M.losses, name='regularize_loss') # this is how Keras manage regularizers
cost = tf.add_n([wd_cost, cost], name='total_cost')
summary.add_moving_summary(cost, wd_cost)
return cost
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
train = BatchData(dataset.Mnist('train'), 128)
test = BatchData(dataset.Mnist('test'), 256, remainder=True)
return train, test
if __name__ == '__main__':
logger.auto_set_dir()
dataset_train, dataset_test = get_data()
cfg = TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
KerasPhaseCallback(True), # for Keras training
ModelSaver(),
InferenceRunner(
dataset_test,
ScalarStats(['cross_entropy_loss', 'accuracy'])),
],
max_epoch=100,
)
launch_train_with_config(cfg, QueueInputTrainer())
|
py
|
1a5a5232f9abe7b0266be5204e5c022a4375fff2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, traceback
import simplejson
import openpyxl
from optparse import OptionParser
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
fscache = {}
def get_file(deviceid, version, country):
filename = "%s_%s_%s.txt" % (deviceid, version, country)
if fscache.has_key(filename) == False:
fscache[filename] = open(filename, "wt")
return fscache[filename]
all_events = None
lines = open ("part-00000.txt").readlines ()
for line in lines:
line = line.strip()
if len(line) is not 0:
json = simplejson.loads(line)
all_events = json["events"]
if all_events is not None:
fh = get_file(json["deviceid"], json["ufversion"], json["country"])
for event in all_events:
if event["eventid"] == 2:
fh.write("scrambled: %s " % (event["scrambled"]) + simplejson.dumps(event) + "\n")
elif event["eventid"] != 23:
fh.write(simplejson.dumps(event) + "\n")
else:
fh.write ("{'eventid':23, crashes: [\n")
for reboot in event["reboots"]:
fh.write (simplejson.dumps (reboot) + "\n")
fh.write ("], swfatal: [")
for fatal in event["swfatal"]:
fh.write(simplejson.dumps(fatal) + "\n")
fh.write ("], hwfatal: [")
for fatal in event["hwfatal"]:
fh.write(simplejson.dumps(fatal) + "\n")
fh.write ("]} \n")
|
py
|
1a5a5268d24f91629f5a782caaf39ed3f7440b04
|
"""Module providing custom logging formatters and colorization for ANSI
compatible terminals."""
import inspect
import logging
import os
import random
import threading
from logging import LogRecord
from typing import Any, List
DEFAULT_LOG_FILE = os.path.join(os.sep, 'tmp', 'dftimewolf.log')
MAX_BYTES = 5 * 1024 * 1024
BACKUP_COUNT = 3
SUCCESS = 25 # 25 is right between INFO and WARNING
def _GenerateColorSequences() -> List[str]:
"""Generates ANSI codes for 256 colors.
Works on Linux and macOS, Windows (WSL) to be confirmed.
"""
sequences = []
for i in range(0, 16):
for j in range(0, 16):
code = str(i * 16 + j)
seq = '\u001b[38;5;' + code + 'm'
sequences.append(seq)
return sequences
COLOR_SEQS = _GenerateColorSequences()
RESET_SEQ = '\u001b[0m'
# Cherrypick a few interesting values. We still want the whole list of colors
# so that modules have a good amount colors to chose from.
# pylint: disable=unbalanced-tuple-unpacking
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = COLOR_SEQS[8:16]
BG_RED = '\u001b[41m' # Red background
BG_GREEN = '\u001b[42m' # Green background
BOLD = '\u001b[1m' # Bold / bright modifier
# We'll get something like this:
# [2020-07-09 18:06:05,187] [TimesketchExporter ] INFO Sketch 23 created
LOG_FORMAT = (
'[%(asctime)s] [{0:s}{color:s}%(name)-20s{1:s}] %(levelname)-8s'
' %(message)s')
LEVEL_COLOR_MAP = {
'WARNING': YELLOW,
'SUCCESS': BOLD + BG_GREEN + BLACK,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': BOLD + BG_RED + WHITE,
'ERROR': RED
}
class WolfLogger(logging.getLoggerClass()): # type: ignore
"""Custom logging Class with a `success` logging function."""
def success(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=invalid-name
"""Logs a success message."""
super(WolfLogger, self).log(SUCCESS, *args, **kwargs)
logging.setLoggerClass(WolfLogger)
class WolfFormatter(logging.Formatter):
"""Helper class used to add color to log messages depending on their level."""
def __init__(
self,
colorize: bool = True,
random_color: bool = False,
threaded: bool = False,
**kwargs: Any) -> None:
"""Initializes the WolfFormatter object.
Args:
colorize (bool): If True, output will be colorized.
random_color (bool): If True, will colorize the module name with a random
color picked from COLOR_SEQS.
"""
self.threaded = threaded
self.colorize = colorize
kwargs['fmt'] = LOG_FORMAT.format('', '', color='')
if self.colorize:
color = ''
if random_color:
color = random.choice(COLOR_SEQS)
kwargs['fmt'] = LOG_FORMAT.format(BOLD, RESET_SEQ, color=color)
super(WolfFormatter, self).__init__(**kwargs)
def format(self, record: LogRecord) -> str:
"""Hooks the native format method and colorizes messages if needed.
Args:
record (logging.LogRecord): Native log record.
Returns:
str: The formatted message string.
"""
if self.colorize:
message = record.getMessage()
loglevel_color = LEVEL_COLOR_MAP.get(record.levelname)
if loglevel_color:
message = loglevel_color + message + RESET_SEQ
record.msg = message
if self.threaded:
stack = [i.function for i in inspect.stack()]
if 'Process' in stack:
thread_name = threading.current_thread().getName()
message = record.getMessage()
record.msg = "[{0:s}] {1:s}".format(thread_name, message)
return super(WolfFormatter, self).format(record)
|
py
|
1a5a52feba2228e9c74f05b17ec2a7eeeb339880
|
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import ipv4, arp, udp, tcp, icmp
import ryu.app.blocked_ip as ip_class
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
arp_pkt = pkt.get_protocol(arp.arp)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
if arp_pkt:
# self.logger.info("ARP packet in %s %s", arp_pkt, eth)
if(arp_pkt.src_ip in ip_class.ip_class):
self.logger.info("Blocking Arp request of blocked ip: %s", arp_pkt.src_ip)
return
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
self.logger.info("Packet in : %s %s %s %s %s", dpid, src, dst, in_port, out_port)
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
# check IP Protocol and create a match for IP
if eth.ethertype == ether_types.ETH_TYPE_IP:
ip = pkt.get_protocol(ipv4.ipv4)
srcip = ip.src
dstip = ip.dst
# print("IP packet:",ip)
if isinstance(ip, ipv4.ipv4):
print("IPV4 processing")
#print("packet details:-----------------------",ip_pkt)
if (ip.proto == 17):
print("UDP processing")
udp_pkt = pkt.get_protocol(udp.udp)
#print("packet details:-----------------------",udp_pkt)
if (ip.proto == 6):
print("TCP processing")
tcp_pkt = pkt.get_protocol(tcp.tcp)
#print("packet details:-----------------------",tcp_pkt)
if (ip.proto == 1):
print("ICMP processing")
icmp_pkt = pkt.get_protocol(icmp.icmp)
# print("packet details:-----------------------",icmp_pkt)
# self.logger.info("IP packet in %s %s %s %s", dpid, srcip, dstip, in_port)
self.logger.info("Blocked IPs : %s",ip_class.ip_class)
# if (srcip in ip_class.ip_class ):
# self.logger.info("IP %s is blocked ",srcip)
# return
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP,
ipv4_src=srcip,
ipv4_dst=dstip,
in_port = in_port,
ip_proto = ip.proto
)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
|
py
|
1a5a531159c738560a04c40121a9a6f08cb43ae6
|
from six.moves.urllib.parse import quote
import unittest
from createsend.journey_email import JourneyEmail
class JourneyEmailTestCase(object):
def test_bounces_no_params(self):
self.journey_email.stub_request(self.no_param_uri_for("bounces"), "journey_email_bounces_no_params.json")
bounces = self.journey_email.bounces()
self.assertEqual(len(bounces.Results), 2)
bounce_one = bounces.Results[0]
self.assertEqual(bounce_one.EmailAddress, "[email protected]")
self.assertEqual(bounce_one.BounceType, "Soft")
self.assertEqual(bounce_one.Date, "2019-08-20 14:24:00")
self.assertEqual(bounce_one.Reason, "Soft Bounce - Dns Failure")
self.assertEqual(bounces.ResultsOrderedBy, "Date")
self.assertEqual(bounces.OrderDirection, "ASC")
self.assertEqual(bounces.PageNumber, 1)
self.assertEqual(bounces.PageSize, 1000)
self.assertEqual(bounces.RecordsOnThisPage, 2)
self.assertEqual(bounces.TotalNumberOfRecords, 2)
self.assertEqual(bounces.NumberOfPages, 1)
def test_bounces_with_params(self):
self.journey_email.stub_request(self.param_uri_for("bounces", "2019-01-01", 1, 10, "desc"), "journey_email_bounces_with_params.json")
bounces = self.journey_email.bounces(date="2019-01-01", page=1, page_size=10, order_direction="desc")
self.assertEqual(len(bounces.Results), 2)
bounce_one = bounces.Results[0]
self.assertEqual(bounce_one.EmailAddress, "[email protected]")
self.assertEqual(bounce_one.BounceType, "Hard")
self.assertEqual(bounce_one.Date, "2019-08-21 04:26:00")
self.assertEqual(bounce_one.Reason, "Hard Bounce")
self.assertEqual(bounces.ResultsOrderedBy, "Date")
self.assertEqual(bounces.OrderDirection, "DESC")
self.assertEqual(bounces.PageNumber, 1)
self.assertEqual(bounces.PageSize, 10)
self.assertEqual(bounces.RecordsOnThisPage, 2)
self.assertEqual(bounces.TotalNumberOfRecords, 2)
self.assertEqual(bounces.NumberOfPages, 1)
def test_clicks_no_params(self):
self.journey_email.stub_request(self.no_param_uri_for("clicks"), "journey_email_clicks_no_params.json")
clicks = self.journey_email.clicks()
self.assertEqual(len(clicks.Results), 2)
click_one = clicks.Results[0]
self.assertEqual(click_one.EmailAddress, "[email protected]")
self.assertEqual(click_one.Date, "2019-08-19 10:23:00")
self.assertEqual(click_one.URL, "http://mail.google.com/mail/?hl=en&tab=wm")
self.assertEqual(click_one.IPAddress, "198.148.196.144")
self.assertEqual(click_one.Latitude, -33.8591)
self.assertEqual(click_one.Longitude, 151.200195)
self.assertEqual(click_one.City, "Sydney")
self.assertEqual(click_one.Region, "New South Wales")
self.assertEqual(click_one.CountryCode, "AU")
self.assertEqual(click_one.CountryName, "Australia")
self.assertEquals(clicks.ResultsOrderedBy, "Date")
self.assertEquals(clicks.OrderDirection, "ASC")
self.assertEquals(clicks.PageNumber, 1)
self.assertEquals(clicks.PageSize, 1000)
self.assertEquals(clicks.RecordsOnThisPage, 2)
self.assertEquals(clicks.TotalNumberOfRecords, 2)
self.assertEquals(clicks.NumberOfPages, 1)
def test_clicks_with_params(self):
self.journey_email.stub_request(self.param_uri_for("clicks", "2019-01-01", 1, 10, "desc"), "journey_email_clicks_with_params.json")
clicks = self.journey_email.clicks(date="2019-01-01", page=1, page_size=10, order_direction="desc")
self.assertEqual(len(clicks.Results), 2)
click_one = clicks.Results[0]
self.assertEqual(click_one.EmailAddress, "[email protected]")
self.assertEqual(click_one.Date, "2019-08-19 10:24:00")
self.assertEqual(click_one.URL, "https://example.com")
self.assertEqual(click_one.IPAddress, "198.148.196.144")
self.assertEqual(click_one.Latitude, -33.8591)
self.assertEqual(click_one.Longitude, 151.200195)
self.assertEqual(click_one.City, "Sydney")
self.assertEqual(click_one.Region, "New South Wales")
self.assertEqual(click_one.CountryCode, "AU")
self.assertEqual(click_one.CountryName, "Australia")
self.assertEquals(clicks.ResultsOrderedBy, "Date")
self.assertEquals(clicks.OrderDirection, "DESC")
self.assertEquals(clicks.PageNumber, 1)
self.assertEquals(clicks.PageSize, 10)
self.assertEquals(clicks.RecordsOnThisPage, 2)
self.assertEquals(clicks.TotalNumberOfRecords, 2)
self.assertEquals(clicks.NumberOfPages, 1)
def test_opens_no_params(self):
self.journey_email.stub_request(self.no_param_uri_for("opens"), "journey_email_opens_no_params.json")
opens = self.journey_email.opens()
self.assertEqual(len(opens.Results), 2)
open_one = opens.Results[0]
self.assertEqual(open_one.EmailAddress, "[email protected]")
self.assertEqual(open_one.Date, "2019-08-19 10:23:00")
self.assertEqual(open_one.IPAddress, "198.148.196.144")
self.assertEqual(open_one.Latitude, -33.8591)
self.assertEqual(open_one.Longitude, 151.200195)
self.assertEqual(open_one.City, "Sydney")
self.assertEqual(open_one.Region, "New South Wales")
self.assertEqual(open_one.CountryCode, "AU")
self.assertEqual(open_one.CountryName, "Australia")
self.assertEquals(opens.ResultsOrderedBy, "Date")
self.assertEquals(opens.OrderDirection, "ASC")
self.assertEquals(opens.PageNumber, 1)
self.assertEquals(opens.PageSize, 1000)
self.assertEquals(opens.RecordsOnThisPage, 2)
self.assertEquals(opens.TotalNumberOfRecords, 2)
self.assertEquals(opens.NumberOfPages, 1)
def test_opens_with_params(self):
self.journey_email.stub_request(self.param_uri_for("opens", "2019-01-01", 1, 10, "desc"), "journey_email_opens_with_params.json")
opens = self.journey_email.opens(date="2019-01-01", page=1, page_size=10, order_direction="desc")
self.assertEqual(len(opens.Results), 2)
open_one = opens.Results[0]
self.assertEqual(open_one.EmailAddress, "[email protected]")
self.assertEqual(open_one.Date, "2019-08-19 10:24:00")
self.assertEqual(open_one.IPAddress, "198.148.196.144")
self.assertEqual(open_one.Latitude, -33.8591)
self.assertEqual(open_one.Longitude, 151.200195)
self.assertEqual(open_one.City, "Sydney")
self.assertEqual(open_one.Region, "New South Wales")
self.assertEqual(open_one.CountryCode, "AU")
self.assertEqual(open_one.CountryName, "Australia")
self.assertEquals(opens.ResultsOrderedBy, "Date")
self.assertEquals(opens.OrderDirection, "DESC")
self.assertEquals(opens.PageNumber, 1)
self.assertEquals(opens.PageSize, 10)
self.assertEquals(opens.RecordsOnThisPage, 2)
self.assertEquals(opens.TotalNumberOfRecords, 2)
self.assertEquals(opens.NumberOfPages, 1)
def test_recipients_no_params(self):
self.journey_email.stub_request(self.no_param_uri_for("recipients"), "journey_email_recipients_no_params.json")
recipients = self.journey_email.recipients()
self.assertEqual(len(recipients.Results), 4)
recipient_one = recipients.Results[0]
self.assertEqual(recipient_one.EmailAddress, "[email protected]")
self.assertEqual(recipient_one.SentDate, "2019-08-19 10:23:00")
self.assertEqual(recipients.ResultsOrderedBy, "SentDate")
self.assertEqual(recipients.OrderDirection, "ASC")
self.assertEqual(recipients.PageNumber, 1)
self.assertEqual(recipients.PageSize, 1000)
self.assertEqual(recipients.RecordsOnThisPage, 4)
self.assertEqual(recipients.TotalNumberOfRecords, 4)
self.assertEqual(recipients.NumberOfPages, 1)
def test_recipients_with_params(self):
self.journey_email.stub_request(self.param_uri_for("recipients", "2019-01-01", 1, 10, "desc"), "journey_email_recipients_with_params.json")
recipients = self.journey_email.recipients(date="2019-01-01", page=1, page_size=10, order_direction="desc")
self.assertEqual(len(recipients.Results), 4)
recipient_one = recipients.Results[0]
self.assertEqual(recipient_one.EmailAddress, "[email protected]")
self.assertEqual(recipient_one.SentDate, "2019-08-21 04:26:00")
self.assertEqual(recipients.ResultsOrderedBy, "SentDate")
self.assertEqual(recipients.OrderDirection, "DESC")
self.assertEqual(recipients.PageNumber, 1)
self.assertEqual(recipients.PageSize, 10)
self.assertEqual(recipients.RecordsOnThisPage, 4)
self.assertEqual(recipients.TotalNumberOfRecords, 4)
self.assertEqual(recipients.NumberOfPages, 1)
def test_unsubscribes_no_params(self):
self.journey_email.stub_request(self.no_param_uri_for("unsubscribes"), "journey_email_unsubscribes_no_params.json")
unsubscribes = self.journey_email.unsubscribes()
self.assertEqual(len(unsubscribes.Results), 1)
unsubscribe_one = unsubscribes.Results[0]
self.assertEqual(unsubscribe_one.EmailAddress, "[email protected]")
self.assertEqual(unsubscribe_one.Date, "2019-08-19 10:24:00")
self.assertEqual(unsubscribe_one.IPAddress, "198.148.196.144")
self.assertEqual(unsubscribes.ResultsOrderedBy, "Date")
self.assertEqual(unsubscribes.OrderDirection, "ASC")
self.assertEqual(unsubscribes.PageNumber, 1)
self.assertEqual(unsubscribes.PageSize, 1000)
self.assertEqual(unsubscribes.RecordsOnThisPage, 1)
self.assertEqual(unsubscribes.TotalNumberOfRecords, 1)
self.assertEqual(unsubscribes.NumberOfPages, 1)
def test_unsubscribes_with_params(self):
self.journey_email.stub_request(self.param_uri_for("unsubscribes", "2019-01-01", 1, 10, "desc"), "journey_email_unsubscribes_with_params.json")
unsubscribes = self.journey_email.unsubscribes(date="2019-01-01", page=1, page_size=10, order_direction="desc")
self.assertEqual(len(unsubscribes.Results), 1)
unsubscribe_one = unsubscribes.Results[0]
self.assertEqual(unsubscribe_one.EmailAddress, "[email protected]")
self.assertEqual(unsubscribe_one.Date, "2019-08-19 10:24:00")
self.assertEqual(unsubscribe_one.IPAddress, "198.148.196.144")
self.assertEqual(unsubscribes.ResultsOrderedBy, "Date")
self.assertEqual(unsubscribes.OrderDirection, "DESC")
self.assertEqual(unsubscribes.PageNumber, 1)
self.assertEqual(unsubscribes.PageSize, 10)
self.assertEqual(unsubscribes.RecordsOnThisPage, 1)
self.assertEqual(unsubscribes.TotalNumberOfRecords, 1)
self.assertEqual(unsubscribes.NumberOfPages, 1)
def no_param_uri_for(self, action):
return "journeys/email/%s/%s.json" %\
(self.journey_email_id, action)
def param_uri_for(self, action, date, page, pagesize, orderdirection):
return "journeys/email/%s/%s.json?date=%s&page=%s&pagesize=%s&orderdirection=%s" %\
(self.journey_email_id, action, quote(date, ''), page, pagesize, orderdirection)
class OAuthCampaignTestCase(unittest.TestCase, JourneyEmailTestCase):
"""Test when using OAuth to authenticate"""
def setUp(self):
self.journey_email_id = "787y87y87y87y87y87y87"
self.journey_email = JourneyEmail(
{"access_token": "ASP95S4aR+9KsgfHB0dapTYxNA==", "refresh_token": "5S4aASP9R+9KsgfHB0dapTYxNA=="}, self.journey_email_id)
class ApiKeyCampaignTestCase(unittest.TestCase, JourneyEmailTestCase):
"""Test when using an API key to authenticate"""
def setUp(self):
self.journey_email_id = "787y87y87y87y87y87y87"
self.journey_email = JourneyEmail(
{'api_key': '123123123123123123123'}, self.journey_email_id)
|
py
|
1a5a53b5dc91a7cd344cdbaf0ea24bc7df36c997
|
"""
Copyright (C) 2020 Piek Solutions LLC
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pyvisa as visa
import time
import sys, traceback
import re as regex
import numpy as np
class VisaInstrument:
def __init__(self, ip, gpib_address):
"""
initialize visa instrument resource
:param ip: (str) ip address of Papaya
:param gpib_address: (str) GPIB address of instrument
"""
resource_name = "TCPIP0::%s::inst%s::INSTR" % (ip, gpib_address)
print(resource_name)
rm = visa.ResourceManager()
self.instr = rm.open_resource(resource_name)
self.instr.timeout = 10000
def close(self):
self.instr.close()
def cls(self):
try:
self.instr.write('*CLS')
except ValueError:
print('*CLS fails to clear')
def _set_ESE(self, x):
try:
cmd = '*ESE ' + str(x)
self.instr.write(cmd)
except ValueError:
print ('*ESE write fails')
def _get_ESE(self, x):
try:
resp = self.instr.query('*ESE?')
self._output = float(resp)
except ValueError:
print('*ESE query fails')
return self._output
ESE = property(_get_ESE, _set_ESE, "ESE property")
def _set_SRE(self, x):
try:
cmd = '*SRE ' + str(x)
self.instr.write(cmd)
except ValueError:
print ('*SRE write fails')
def _get_SRE(self, x):
try:
resp = self.instr.query('*SRE?')
self._output = float(resp)
except ValueError:
print('*SRE query fails')
return self._output
SRE = property(_get_SRE, _set_SRE, "SRE property")
def queryIDN(self):
try:
data = self.instr.query('*IDN?')
return data
except ValueError:
print('*IDN query fails')
class Keysight_N9030B(VisaInstrument):
def getTrace(self, tra='TRACE1'):
count = 0
try:
self.instr.write('trac:data? %s' %tra)
resp = self.instr.read()
flag = '\n' in resp
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error getting trace')
print(tmp)
traceback.print_exc()
sys.exit(3)
ary = resp.split(',')
dd = np.array([float(c) for c in ary])
return dd
def getTraceXY(self, tra='san1'):
count = 0
try:
self.instr.write('fetch:%s?' %tra)
resp = self.instr.read()
flag = '\n' in resp
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error getting xy trace')
print(tmp)
traceback.print_exc()
sys.exit(3)
ary = resp.split(',')
dd = np.array([float(c) for c in ary])
return dd
class Anritsu_M4647A(VisaInstrument):
def sweepOnce(self):
self.instr.write('TRS;WFS;HLD')
time.sleep(11)
def readSXX(self, fmt='OS11C'):
try:
self.instr.write(fmt) # C here refers to calibrated
resp = self.instr.read()
s = regex.findall(r'^#\d+', resp)[0] # get the first elm in string instead of list
pos = int(s[1]) + 3
_num = int(s[2:len(s)]) # total number of bytes to read
resp = resp[pos:len(resp)] # remove the header
cnt = len(resp)
while cnt < _num:
tmp = self.instr.read()
cnt += len(tmp)
resp += tmp
except visa.VisaIOError:
traceback.print_exc()
sys.exit(3)
# make them into real numbers
y = resp.split('\n')
y = y[0:len(y)-1] # last element is \n
real = np.zeros(len(y), dtype=float)
imag = np.zeros(len(y), dtype=float)
for i_ in range(0, len(y)):
valstr = y[i_].split(',') # split into real and imag
real[i_] = float(valstr[0])
imag[i_] = float(valstr[1])
c = real + 1.j*imag
return c
def freq(self):
try:
self.instr.write(':sens1:freq:data?')
resp = self.instr.read()
s = regex.findall(r'^#\d+', resp)[0] # get the first elm in string instead of list
pos = int(s[1]) + 3
_num = int(s[2:len(s)]) # total number of bytes to read
resp = resp[pos:len(resp)] # remove the header
cnt = len(resp)
while cnt < _num:
tmp = self.instr.read()
cnt += len(tmp)
resp += tmp
except visa.VisaIOError:
traceback.print_exc()
sys.exit(3)
y = resp.split('\n')
y = y[0:len(y)-1] # last element is \n
val = np.array([float(c) for c in y])
return val
class Keithley_2400(VisaInstrument):
def sourcetype(self, type):
if type == 'voltage':
self.instr.write(':SOUR:FUNC VOLT')
self.instr.write(':SENS:FUNC "CURR"')
elif type == 'current':
self.instr.write(':SOUR:FUNC CURR')
self.instr.write(':SENS:FUNC "VOLT"')
def setvoltage(self, vb, curlimit=0.05):
self.instr.write(':SENS:CURR:PROT %f' % curlimit)
self.instr.write(':SOUR:VOLT:LEV %f' % vb)
def querycurrent(self):
try:
self.instr.write(':FORM:ELEM CURR')
cur = self.instr.query('READ?')
c = float(cur)
except ValueError:
print('Keithley 2400 warning: current reading error...')
print(cur)
c = -1000
return float(c)
def setcurrent(self, cur, vlimit=2):
self.instr.write(':SENS:VOLT:PROT %f' % vlimit)
self.instr.write(':SOUR:CURR:LEV %s' % cur)
def _get_output(self):
try:
resp = self.instr.query(':OUTPUT?')
self._output = float(resp)
except ValueError:
print('Keithley 2400 query fails')
return self._output
def _set_output(self, x):
try:
cmd = ':OUTPUT ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Keithley 2400 write fails')
self._output = x
output = property(_get_output, _set_output, "output property")
class Agilent_E3631(VisaInstrument):
def _get_outPutOnOff(self):
try:
resp = self.instr.query(':outp?')
self._outputOnOff = resp
except ValueError:
print('Agilent E3631 query outp fails')
return self._outputOnOff
def _set_outPutOnOff(self, x):
try:
cmd = 'outp ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 write outp fails')
self._outputOnOff = x
outputOnOff = property(_get_outPutOnOff, _set_outPutOnOff, "outputOnOff property")
def queryCurrent(self):
try:
resp=self.instr.query(':meas:curr:dc?')
except ValueError:
print('Agilent E3631 query current fails')
return float(resp)
def queryVoltage(self):
try:
resp=self.instr.query(':meas:volt:dc?')
except ValueError:
print('Agilent E3631 query voltage fails')
return float(resp)
def selectPowerSupply(self, x):
"""
select power supply instrument,
:param x: (int) 1 is P6V, 2 is P25V and 3 is N25V
:return: none
"""
try:
cmd = 'INST:NSEL ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 select power supply fails')
def setP6VSupply(self, x):
try:
# P6V is 1
self.instr.write('INST:NSEL 1')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set P6V fails')
def queryP6VSetVoltage(self):
try:
# P6V is 1
self.instr.write('INST:NSEL 1')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query P6V fails')
return float(val)
def setP25VSupply(self,x):
try:
# P25V is 2
self.instr.write('INST:NSEL 2')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set P25V fails')
def queryP25VSetVoltage(self):
try:
# P25V is 2
self.instr.write('INST:NSEL 2')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query P25V fails')
return float(val)
def setN25VSupply(self, x):
# N25V is 3
try:
self.instr.write('INST:NSEL 3')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set N25V fails')
def queryN25VSetVoltage(self):
# N25V is 3
try:
self.instr.write('INST:NSEL 3')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query N25V fails')
return float(val)
class Keysight_E3649A(VisaInstrument):
def _get_outputOnOff(self):
"""
query output state
:return: 0(OFF) or 1(ON)
"""
try:
resp = self.instr.query('OUTP?')
self._outputOnOff = resp.rstrip()
except ValueError:
print('Agilent E3649A query outp on/off fails')
return self._outputOnOff
def _set_outputOnOff(self, x):
"""
turn output on or off
:param x: either ON or OFF
:return: None
"""
try:
self.instr.write('OUTP ' + str(x))
except ValueError:
print('Agilent E3649A write outp on/off fails')
self._outputOnOff = x
outputOnOff = property(_get_outputOnOff, _set_outputOnOff, "outputOnOff property")
def queryCurrent(self, output_num=None):
"""
query current of selected output
:param output_num: (int) the output to query (None|1|2);
default value None uses the output previously set.
:return: (float) current
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query('MEAS:CURR:DC?')
return float(resp)
except visa.VisaIOError or ValueError:
print('Agilent E3649A query current fails')
def setCurrent(self, curr, output_num=None):
"""
query current of selected output
:param curr: (float) the desired current level
:param output_num: (int) the output to query (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write('CURR ' + str(curr))
except visa.VisaIOError or ValueError:
print('Agilent E3649A query current fails')
def queryVoltage(self, output_num=None):
"""
query voltage of selected output
:param output_num: (int) the output to read (None|1|2);
default value None uses the output previously set.
:return: (float) voltage
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query('MEAS:VOLT:DC?')
return float(resp)
except visa.VisaIOError or ValueError:
print('Agilent E3649A query voltage fails')
def setVoltage(self, volt, output_num=None):
"""
set voltage of selected output
:param volt: (float) the desired voltage level
:param output_num: (int) the output to set (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write('VOLT ' + str(volt))
except visa.VisaIOError or ValueError:
print('Agilent E3649A set voltage fails')
def selectOutput(self, output_num):
"""
select which output to modify
:param output_num: (int) the output to modify (1|2)
:return: None
"""
try:
self.instr.write('INST:NSEL ' + str(output_num))
except visa.VisaIOError:
print('Agilent E3649A select output fails')
def queryOutputRange(self, output_num=None):
"""
query range setting of selected output
:param output_num: (int) the output to read (None|1|2);
default value None uses the output previously set.
:return: (str) P35V or P60V
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query(':VOLT:RANG?')
return resp.rstrip()
except visa.VisaIOError:
print('Agilent E3649A query output range fails')
def setOutputRange(self, volt_range, output_num=None):
"""
set voltage range of selected output
:param volt_range: the voltage range to set output to (P35V|LOW|P60V|HIGH)
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG ' + str(volt_range))
except visa.VisaIOError:
print('Agilent E3649A set output voltage fails')
def setOutputLow(self, output_num=None):
"""
set voltage range of selected output to 35V
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG LOW')
except visa.VisaIOError:
print('Agilent E3649A set output voltage LOW fails')
def setOutputHigh(self, output_num=None):
"""
set voltage range of output to 60V
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG HIGH')
except visa.VisaIOError:
print('Agilent E3649A set output voltage HIGH fails')
def enableVoltageProtection(self, enable=1, output_num=None):
"""
enable or disable the overvoltage protection function.
:param enable: (0|1|OFF|ON)
:param output_num: output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:PROT:STAT ' + str(enable))
except visa.VisaIOError:
print('Agilent E3649A enable voltage protection fails')
def setVoltageProtection(self, volt, output_num=None):
"""
set the voltage level at which the overvoltage protection
(OVP) circuit will trip.
:param volt: voltage level, 'MIN', or 'MAX'
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:PROT ' + str(volt))
except visa.VisaIOError:
print('Agilent E3649A set output voltage protection fails')
def queryVoltageProtection(self, output_num=None):
"""
query the protection state and voltage level at which the
overvoltage protection (OVP) circuit will trip.
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: tuple (int, str) consisting of enable 0 (OFF) or 1 (ON)
and the voltage trip level.
"""
try:
ena = self.instr.query('VOLT:PROT:STAT?')
level = self.instr.query('VOLT:PROT?')
return ena.rstrip(), level.rstrip()
except visa.VisaIOError:
print('Agilent E3649A query output voltage protection fails')
class Agilent_33401(VisaInstrument):
def acVoltage(self):
try:
self.instr.write(':meas:volt:ac?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query ac volt fails')
def acCurrent(self):
try:
self.instr.write(':meas:curr:ac?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query ac curr fails')
def dcVoltage(self):
try:
self.instr.write(':meas:volt:dc?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query dc volt fails')
def dcCurrent(self):
try:
self.instr.write(':meas:curr:dc?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query dc curr fails')
class Keithley_2510(VisaInstrument):
def querytemp(self):
try:
self.instr.write(':MEAS:TEMP?')
temp = self.instr.read()
t = float(temp)
except ValueError:
print('Keithley 2510 warning: temp read error...')
print(temp)
t = -1000
return float(t)
def settemp(self, setT='25'):
self.instr.write(':SOUR:TEMP %f' % setT)
def _get_output(self):
try:
resp = self.instr.query(':OUTPUT?')
self._output = float(resp)
except ValueError:
print('Keithley 2510 query outp fails')
return self._output
def _set_output(self, x):
try:
cmd = ':OUTPUT ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Keithley 2510 write outp fails')
self._output = x
output = property(_get_output, _set_output, "output property")
class Newport_3150(VisaInstrument):
def querytemp(self):
temp = self.instr.query(':TEC:T?')
try:
t = float(temp)
except ValueError:
print('Newport 3150 warning: temp read error...')
print(temp)
t = -1000
return float(t)
def settemp(self, setT='25'):
self.instr.write(':TEC:T %f' % setT)
class Agilent_8163(VisaInstrument):
def queryIDN(self):
try:
resp = self.instr.query('*IDN?')
except ValueError:
print('Agilent 8163 fails query')
return resp
def querypower(self):
try:
opt = self.instr.query('READ:POW?')
except ValueError:
print('Agilent 8163 fails query')
return float(opt)
class Keysight_Dca(VisaInstrument):
def initialize(self): # initiallize for PAM4 measurement
pass
def get_er(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:OER:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
er = self.instr.query(':MEASure:EYE:OER?')
return float(er)
except ValueError:
print('Keysight dca error')
def getOMA(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:OOMA:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
oma = self.instr.query(':MEASure:EYE:OOMA?')
return float(oma)
except ValueError:
print('Keysight dca error')
def getRLM(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:PAM:LINearity:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
rlm = self.instr.query(':MEASure:EYE:PAM:LINearity?')
return float(rlm)
except ValueError:
print('Keysight dca error')
def autoscale(self):
self.instr.write(':SYSTem:AUToscale')
try:
self.instr.query('*OPC?')
except ValueError:
print('Keysight dca error')
def clear(self):
self.instr.write(':ACQuire:CDISplay')
try:
self.instr.query('*OPC?')
except ValueError:
print('Keysight dca error')
def run(self):
self.instr.write(':ACQuire:RUN')
class Agilent_86142(VisaInstrument):
def _get_startWavelength(self):
try:
resp = self.instr.query(':sens:wav:star?')
self._startWavelength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._startWavelength
def _set_startWavelength(self, x):
try:
cmd = ':sens:wav:star ' + str(x)
self.instr.write(cmd)
self._startWavelength = x
except visa.VisaIOError:
print('Agilent 86142 write fails')
startWavelength = property(_get_startWavelength, _set_startWavelength, "startWavelength property")
def _get_stopWavelength(self):
try:
resp = self.instr.query(':sens:wav:stop?')
self._startWavelength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._startWavelength
def _set_stopWavelength(self, x):
try:
cmd = ':sens:wav:stop ' + str(x)
self.instr.write(cmd)
self._stopWavelength = x
except visa.VisaIOError:
print('Agilent 86142 write fails')
stopWavelength = property(_get_stopWavelength, _set_stopWavelength, "stopWavelength property")
def _get_traceLength(self):
try:
resp = self.instr.query(':SENS:SWE:POIN?')
self._traceLength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._traceLength
def _set_traceLength(self, x):
try:
cmd = ':SENS:SWE:POIN ' + str(x)
self.instr.write(cmd)
self._traceLength = x
except ValueError:
print('Agilent 86142 write fails')
traceLength = property(_get_traceLength, _set_traceLength, "traceLength property")
def getTrace(self):
tmp = ''
try:
self.instr.write('form ascii')
self.instr.write('trac? tra')
resp = self.instr.read()
flag = '\n' in resp
count = 0
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error')
print(tmp)
traceback.print_exc()
sys.exit(3)
return resp
def getTrace1(self, pts):
tmp = ''
elmcount = []
count = 0
itr=0
try:
self.instr.write('form ascii')
self.instr.write('trac? tra')
resp = self.instr.read()
count += len(resp.split(','))
while count < pts:
tmp = self.instr.read()
count += len(tmp.split(','))
elmcount.append(count)
resp += tmp
itr += 1
except visa.VisaIOError:
print('error')
print(tmp)
traceback.print_exc()
sys.exit(3)
return resp
def getTraceBin(self):
try:
self.instr.write('form real32')
self.instr.write('trac? tra')
resp = self.instr.read()
return resp
except ValueError:
print('Agilent 86142 write fails')
class JDSU_HA9(VisaInstrument):
_attenuation = 0
_beamIsBlocked = 0
def _get_attenuation(self):
try:
resp = self.instr.query('att?')
self._attenuation = float(resp)
except ValueError:
print('JDSU HA9 query fails')
return self._attenuation
def _set_attenuation(self, x):
try:
cmd = 'att ' + str(x)
self.instr.write(cmd)
self._attenuation = x
except ValueError:
print('JDSU HA9 write fails')
attenuation = property(_get_attenuation, _set_attenuation, "attenuation property")
def _get_beamIsBlocked(self):
try:
resp = self.instr.query('D?')
self._beamIsBlocked = int(resp)
except ValueError:
print('JDSU HA9 query fails')
return self._beamIsBlocked
def _set_beamIsBlocked(self, x):
try:
cmd = 'D ' + str(int(x))
self.instr.write(cmd)
self._beamIsBlocked = int(x)
except ValueError:
print('JDSU HA9 write fails')
beamIsBlocked = property(_get_beamIsBlocked, _set_beamIsBlocked, "beamIsBlock property")
class N9020A_SpectrumAnalyzer(VisaInstrument):
_inputCoupling = 'DC' # default
_bandwidthResolution_MHz = 0.5
_bandwidthVideo_MHz = 10
_sweepPoints = 1001
_startFreqMHz = 10e-3
_stopFreqMHz = 1350
_traceAve = 1
_contSweep = 0
def _set_contSweep(self, x=1):
try:
cmd = ':INIT:CONT ' + str(x)
self.instr.write(cmd)
self._contSweep = str(x)
except ValueError:
print('N9020A fails to set cont sweep config')
def _get_contSweep(self):
try:
resp = self.instr.query(':INIT:CONT?')
self._contSweep=resp
except ValueError:
print('N9020A fails to get cont sweep config')
return self._contSweep
contSweep = property(_get_contSweep, _set_contSweep, 'input coupling property')
def _set_inputCoupling(self, x='DC'):
try:
cmd = 'INPut:COUPling ' + str(x)
self.instr.write(cmd)
self._inputCoupling = str(x)
except ValueError:
print('N9020A fails to set input coupling')
def _get_inputCoupling(self):
try:
resp = self.instr.query('INP:COUP?')
self._inputCoupling = resp
except ValueError:
print('N9020A fails to get input coupling')
return self._inputCoupling
inputCoupling = property(_get_inputCoupling, _set_inputCoupling, 'input coupling property')
def _set_bandwidthResolution_MHz(self,x=0.5):
try:
cmd = 'BANDWIDTH:RESOLUTION ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._bandwidthResolution_MHz = float(x)
except ValueError:
print('N9020A fails to set bandwidth resolution')
def _get_bandwidthResolution_MHz(self):
try:
resp = self.instr.query('BANDWIDTH:RESOLUTION?')
self._bandwidthResolution_MHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get bandwidth resolution')
return self._bandwidthResolution_MHz
resolutionBW_MHz = property(_get_bandwidthResolution_MHz, _set_bandwidthResolution_MHz, 'bandwidth resolution property')
def _set_bandwidthVideo_MHz(self, x=0.5):
try:
cmd = 'BANDWIDTH:VIDEO ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._bandwidthResolution_MHz = float(x)
except ValueError:
print('N9020A fails to set video bandwidth')
def _get_bandwidthVideo_MHz(self):
try:
resp = self.instr.query('BANDWIDTH:VIDEO?')
self._bandwidthResolution_MHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get video bandwidth')
return self._bandwidthResolution_MHz
videoBW_MHz = property(_get_bandwidthVideo_MHz, _set_bandwidthVideo_MHz, 'video bandwidth property')
def _set_sweepPoints(self,x=1001):
try:
cmd = 'SWEEP:POINTS ' + str(x)
self.instr.write(cmd)
self._sweepPoints = int(x)
except ValueError:
print('N9020A fails to set sweep points')
def _get_sweepPoints(self):
try:
resp = self.instr.query('SWEEP:POINTS?')
self._sweepPoints = int(resp) # in MHz
except ValueError:
print('N9020A fails to get sweep points')
return self._sweepPoints
sweepPoints = property(_get_sweepPoints, _set_sweepPoints, 'sweep points')
def _set_startFreqMHz(self,x=10e-3):
try:
cmd = 'FREQUENCY:START ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._startFreqMHz = float(x)
except ValueError:
print('N9020A fails to set start frequency')
def _get_startFreqMHz(self):
try:
resp = self.instr.query('FREQUENCY:START?')
self._startFreqMHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get stop frequency')
return self._startFreqMHz
startFreqMHz = property(_get_startFreqMHz, _set_startFreqMHz,'start frequency property')
def _set_stopFreqMHz(self, x=13.5e3):
try:
cmd = 'FREQUENCY:STOP ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._stopFreqMHz = float(x)
except ValueError:
print('N9020A fails to set start frequency')
def _get_stopFreqMHz(self):
try:
resp = self.instr.query('FREQUENCY:STOP?')
self._stopFreqMHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get stop frequency')
return self._stopFreqMHz
stopFreqMHz = property(_get_stopFreqMHz, _set_stopFreqMHz, 'start frequency property')
def _set_traceAve(self, x=1):
try:
if x >= 1:
cmd = 'ACP:AVER:COUN ' + str(x)
self.instr.write(cmd)
if x == 0:
self.instr.write('ACPower:AVERage OFF')
self._traceAve = int(x)
except ValueError:
print('N9020A fails to set trace average')
def _get_traceAve(self):
try:
resp = self.instr.query('ACPower:AVERage:COUNt?')
self._traceAve = int(resp)
except ValueError:
print('N9020A fails to get stop frequency')
return self._traceAve
traceAve = property(_get_traceAve, _set_traceAve, 'trace average')
def getTrace(self):
_points = self._get_sweepPoints()
_stopf = self._get_stopFreqMHz()
_startf = self._get_startFreqMHz()
_freq = np.linspace(_startf, _stopf, _points)
tmp = ''
try:
self.instr.write('FORMAT:TRACE:DATA ASCII')
self.instr.write('TRAC? TRACE1')
resp = self.instr.read()
flag = '\n' in resp
count = 0
while not flag:
tmp = self.instr.read()
resp += (tmp)
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('N9020A get trace error')
print(tmp)
resp = tmp
traceback.print_exc()
sys.exit(3)
resp = resp.split(',')
y = [float(d) for d in resp]
y = np.array(y)
return _freq, y
def setMarkerPos(self,pos=0):
_points = self._get_sweepPoints()
cmd = 'calc:mark1:X:pos:cent ' + str(pos)
try:
if pos < _points:
self.instr.write(cmd)
except visa.VisaIOError:
print('N9020A write error: ' + cmd)
def getMarkerNoise(self, pos=0):
# cmd = 'CALC:MARK:FUNCNOIS'
try:
# self.instr.write(cmd)
self.setMarkerPos(pos)
val = self.instr.query('CALC:MARK:Y?')
return float(val)
except visa.VisaIOError:
print('N9020A getMarkerNoise error')
def getMarkerNoiceTrace(self):
_points = self._get_sweepPoints()
_stopf = self._get_stopFreqMHz()
_startf = self._get_startFreqMHz()
_freq = np.linspace(_startf, _stopf, _points)
try:
self.instr.write('CALC:MARK:FUNCNOIS')
_points = self._get_sweepPoints()
except visa.VisaIOError:
print('N9020A getMarkerNoiceTrace error')
# preallocate array
data = np.zeros(_points, dtype=float)
try:
for i in range(0, _points,1):
self.instr.write('calc:mark1:X:pos:cent %d' % i)
val = self.instr.query('CALC:MARK:Y?')
data[i] = float(val)
except ValueError:
print('N9020A getMarkerNoiceTrace error')
return _freq, data
def setTraceType(self, x='WRITe'):
try:
cmd = 'trace1:type %s' % x
self.instr.write(cmd)
except visa.VisaIOError:
print('N9020A trace type write error %s' % x)
def getTraceType(self):
try:
cmd = 'trace1:type?'
resp = self.instr.query(cmd)
except visa.VisaIOError:
print('N9020A trace type query error')
return resp
class Agilent_86122A(VisaInstrument):
def getFreq(self):
try:
self.instr.write(':MEAS:SCAL:POW:FREQ?')
resp = float(self.instr.read())
return resp
except visa.VisaIOError:
print('Agilent 86122A error')
def getMultipleFreq(self):
try:
self.instr.write(':MEAS:ARR:POW:FREQ?')
resp = self.instr.read()
return resp
except visa.VisaIOError:
print('Agilent 86122A error')
class Agilent_N5183B(VisaInstrument):
def _get_outPutOnOff(self):
try:
resp = self.instr.query(':outp?')
self._outputOnOff = resp
except ValueError:
print('Agilent N5183B query fails')
return self._outputOnOff
def _set_outPutOnOff(self, x):
try:
cmd = 'outp ' + str(x)
self.instr.write(cmd)
self._outputOnOff = x
except ValueError:
print('Agilent N5183B write fails')
outputOnOff = property(_get_outPutOnOff, _set_outPutOnOff, "outputOnOff property")
def setFreq(self, freq_Hz=1000000):
try:
cmd = ':freq ' + str(freq_Hz)
self.instr.write(cmd)
except ValueError:
print('Agilent N5183B write fails')
def getFreq(self):
try:
resp = self.instr.query(':outp?')
return float(resp)
except ValueError:
print('Agilent N5183B write fails')
def setPowerLevel(self, pow_dBm=-20.0):
try:
cmd = ':pow:lev %d' % pow_dBm
self.instr.write(cmd)
except ValueError:
print('Agilent N5183B write fails')
def getPowerLevel(self):
try:
cmd = ':pow:lev?'
resp = self.instr.query(cmd)
return float(resp)
except ValueError:
print('Agilent N5183B query fails')
class SRS(VisaInstrument):
_pidPolarity = 0
_pidLoop = 0
def PIDConnect(self):
try:
self.instr.write('CONN 7, \"ZZZ\"')
time.sleep(1)
except ValueError:
print('SRS Connect fails')
def PIDDiscoonect(self):
try:
self.instr.write('\"ZZZ\"')
except ValueError:
print('SRS Disconnect fails')
def _PIDPolaritySet(self, pol=0):
try:
self.instr.write('APOL %d' % int(pol))
self.instr._pidPolarity = int(pol)
except ValueError:
print('SRS APOL set fails')
def _PIDPolarityGet(self):
try:
resp = self.instr.query('APOL?')
self._pidPolarity = int(resp)
except ValueError:
print('SRS APOL set fails')
return self._pidPolarity
PIDPolarity = property(_PIDPolarityGet, _PIDPolaritySet, 'PID Polarity')
def _setPIDLoop(self, loop=0):
try:
self.instr.write('AMAN %d' % int(loop))
except ValueError:
print('SRS AMAN set fails')
self._pidLoop = int(loop)
def _getPIDLoop(self):
try:
resp = self.instr.query('AMAN?')
self._pidLoop = int(resp)
except ValueError:
print('SRS AMAN get fails')
return self._pidLoop
PIDLoop = property(_getPIDLoop, _setPIDLoop, 'PID Loop on/off')
def setMout(self, val=0):
cmd = 'MOUT %f' % val
print('setting Mout %s' % cmd)
try:
self.instr.write(cmd)
except ValueError:
print('SRS MOUT set fails')
def getMout(self):
try:
resp = self.instr.query('MOUT?')
return float(resp)
except ValueError:
print('SRS MOUT get fails')
class Agilent8163A(VisaInstrument):
def setVoa(self, x):
try:
cmd = ':INPUT1:CHAN1:att ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent 8163A write fails')
def getVoa(self):
try:
cmd = ':INPUT1:CHAN1:att?'
val = self.instr.query(cmd)
return float(val)
except ValueError:
print('Agilent 8163A query fails')
def getOpm(self, ch):
try:
self.instr.write('*CLS')
power = self.instr.query(':FETC2:CHAN{}:POW? '.format(ch))
return float(power)
except ValueError:
print('Agilent 8163A query error')
def initOpm(self):
try:
self.instr.write('*CLS')
for i in range(1, 2):
self.write(':SENS2:CHAN{}:POW:WAV 1550.0nm'.format(i))
self.write(':SENS2:CHAN{}:POW:ATIM 200ms'.format(i))
except ValueError:
print('Agilent 8163A write error')
|
py
|
1a5a5443e8ed834793e99f78b512a909ca0f8336
|
# -*- coding: utf-8 -*-
def main():
import sys
sys.setrecursionlimit(10 ** 8)
input = sys.stdin.readline
n = int(input())
graph = [[] for _ in range(n)]
ab = list()
dp = [0] * n
for _ in range(n - 1):
ai, bi = map(int, input().split())
ai -= 1
bi -= 1
ab.append((ai, bi))
graph[ai].append(bi)
graph[bi].append(ai)
def dfs(pos, parent=-1):
dp[pos] = 1
for v in graph[pos]:
if v == parent:
continue
dfs(v, pos)
dp[pos] += dp[v]
dfs(0)
ans = 0
for ai, bi in ab:
count = min(dp[ai], dp[bi])
ans += count * (n - count)
print(ans)
if __name__ == "__main__":
main()
|
py
|
1a5a56972908d018cadceb46d5e7efa2c109f6d2
|
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', # noqa
backbone=dict(
type='VisionTransformer',
img_size=(512, 512),
patch_size=16,
in_channels=3,
embed_dims=768,
num_layers=12,
num_heads=12,
mlp_ratio=4,
out_indices=(2, 5, 8, 11),
qkv_bias=True,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
with_cls_token=True,
norm_cfg=dict(type='LN', eps=1e-6),
act_cfg=dict(type='GELU'),
norm_eval=False,
interpolate_mode='bicubic'),
neck=dict(
type='MultiLevelNeck',
in_channels=[768, 768, 768, 768],
out_channels=768,
scales=[4, 2, 1, 0.5]),
decode_head=dict(
type='UPerHead',
in_channels=[768, 768, 768, 768],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=768,
in_index=3,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole')) # yapf: disable
|
py
|
1a5a56c46ed519d2251f40b480bd0c05c281d594
|
#!/usr/bin/env python3
import argparse
import logging
import mariadb
import yaml
import sys
# Setup logging
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Setup console logging
logging_console_handler = logging.StreamHandler()
logging_formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
logging_console_handler.setFormatter(logging_formatter)
logger.addHandler(logging_console_handler)
def main(argv):
args_parser = argparse.ArgumentParser(description="Setup a global MySQL metadata database for CLP.")
args_parser.add_argument("--config-file", required=True, help="Metadata database basic config file.")
parsed_args = args_parser.parse_args(argv[1:])
config_file_path = parsed_args.config_file
with open(config_file_path, 'r') as f:
config = yaml.safe_load(f)
if config is None:
raise Exception(f"Unable to parse configuration from {config_file_path}.")
required_keys = ["host", "port", "username", "password", "name"]
for key in required_keys:
if key not in config:
raise Exception(f"'{key}' missing from config file.")
host = config["host"]
port = config["port"]
username = config["username"]
password = config["password"]
db_name = config["name"]
table_prefix = config["table_prefix"]
try:
mysql_conn = mariadb.connect(host=host, port=port, username=username, password=password)
mysql_cursor = mysql_conn.cursor()
except mariadb.Error as err:
logger.error("Failed to connect - {}".format(err.msg))
return -1
try:
# Create database
try:
mysql_cursor.execute("CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET 'utf8'".format(db_name))
except mariadb.Error as err:
logger.error("Failed to create database - {}".format(err.msg))
return -1
# Use database
try:
mysql_cursor.execute("USE {}".format(db_name))
except mariadb.Error as err:
logger.error("Failed to use database - {}".format(err.msg))
return -1
# Create tables
try:
mysql_cursor.execute(f"""CREATE TABLE IF NOT EXISTS `{table_prefix}archives` (
`pagination_id` BIGINT unsigned NOT NULL AUTO_INCREMENT,
`id` VARCHAR(64) NOT NULL,
`uncompressed_size` BIGINT NOT NULL,
`size` BIGINT NOT NULL,
`creator_id` VARCHAR(64) NOT NULL,
`creation_ix` INT NOT NULL,
KEY `archives_creation_order` (`creator_id`,`creation_ix`) USING BTREE,
UNIQUE KEY `archive_id` (`id`) USING BTREE,
PRIMARY KEY (`pagination_id`)
)""")
mysql_cursor.execute(f"""CREATE TABLE IF NOT EXISTS `{table_prefix}files` (
`id` VARCHAR(64) NOT NULL,
`orig_file_id` VARCHAR(64) NOT NULL,
`path` VARCHAR(12288) NOT NULL,
`begin_timestamp` BIGINT NOT NULL,
`end_timestamp` BIGINT NOT NULL,
`num_uncompressed_bytes` BIGINT NOT NULL,
`num_messages` BIGINT NOT NULL,
`archive_id` VARCHAR(64) NOT NULL,
KEY `files_path` (`path`(768)) USING BTREE,
KEY `files_archive_id` (`archive_id`) USING BTREE,
PRIMARY KEY (`id`)
) ROW_FORMAT=DYNAMIC""")
except mariadb.Error as err:
logger.error("Failed to create table - {}".format(err.msg))
return -1
mysql_conn.commit()
finally:
mysql_cursor.close()
mysql_conn.close()
return 0
if "__main__" == __name__:
sys.exit(main(sys.argv))
|
py
|
1a5a57c97ddf93ea3a0fd6f4071e66a50dcf89e8
|
import numba
|
py
|
1a5a5b29baf628837ddf15955d7db7a4a8cfb50a
|
"""
Test file for Chapter Number Theory.
"""
r"""
sage: a=IntegerModRing(15)(3); b=IntegerModRing(17)(3); print a, b
3 3
sage: a == b
False
sage: R=a.base_ring(); R
Ring of integers modulo 15
sage: R.characteristic()
15
sage: print a+a, a-17, a*a+1, a^3
6 1 10 12
sage: 1/(a+1)
4
sage: 1/a
Traceback (most recent call last):
...
ZeroDivisionError: Inverse does not exist.
sage: z=lift(a); y=ZZ(a); print y, type(y), y==z
3 <type 'sage.rings.integer.Integer'> True
sage: [Mod(x,15).additive_order() for x in range(0,15)]
[1, 15, 15, 5, 15, 3, 5, 15, 15, 5, 3, 15, 5, 15, 15]
sage: [[x,Mod(x,15).multiplicative_order()] for x in range(1,15) if gcd(x,15)==1]
[[1, 1], [2, 4], [4, 2], [7, 4], [8, 4], [11, 2], [13, 4], [14, 2]]
sage: p=10^20+39; mod(2,p).multiplicative_order()
50000000000000000019
sage: mod(3,p).multiplicative_order()
100000000000000000038
sage: n=3^100000; a=n-1; e=100
sage: timeit('(a^e) % n') # random long time
5 loops, best of 3: 387 ms per loop
sage: timeit('power_mod(a,e,n)') # random
125 loops, best of 3: 3.46 ms per loop
sage: R = GF(17); [1/R(x) for x in range(1,17)]
[1, 9, 6, 13, 7, 3, 5, 15, 2, 12, 14, 10, 4, 11, 8, 16]
sage: R = GF(9,name='x'); R
Finite Field in x of size 3^2
sage: R.polynomial()
x^2 + 2*x + 2
sage: Set([r for r in R])
{0, 1, 2, x, x + 1, x + 2, 2*x, 2*x + 1, 2*x + 2}
sage: Q.<x> = PolynomialRing(GF(3))
sage: R2 = GF(9,name='x',modulus=x^2+1); R2
Finite Field in x of size 3^2
sage: p = R(x+1); R2(p)
Traceback (most recent call last):
...
TypeError: unable to coerce from a finite field other than the prime subfield
sage: rational_reconstruction(411,1000)
-13/17
sage: rational_reconstruction(409,1000)
Traceback (most recent call last):
...
ValueError: Rational reconstruction of 409 (mod 1000) does not exist.
sage: def harmonic(n):
... return sum([1/x for x in range(1,n+1)])
sage: def harmonic_mod(n,m):
... return add([1/x % m for x in range(1,n+1)])
sage: def harmonic2(n):
... q = lcm(range(1,n+1))
... pmax = RR(q*(log(n)+1))
... m = ZZ(2*pmax^2)
... m = ceil(m/q)*q + 1
... a = harmonic_mod(n,m)
... return rational_reconstruction(a,m)
sage: harmonic(100) == harmonic2(100)
True
sage: a=2; b=3; m=5; n=7; lambda0=(b-a)/m % n; a+lambda0*m
17
sage: crt(2,3,5,7)
17
sage: def harmonic3(n):
... q = lcm(range(1,n+1))
... pmax = RR(q*(log(n)+1))
... B = ZZ(2*pmax^2)
... m = 1; a = 0; p = 2^63
... while m<B:
... p = next_prime(p)
... b = harmonic_mod(n,p)
... a = crt(a,b,m,p)
... m = m*p
... return rational_reconstruction(a,m)
sage: harmonic(100) == harmonic3(100)
True
sage: crt(15,1,30,4)
45
sage: crt(15,2,30,4)
Traceback (most recent call last):
...
ValueError: No solution to crt problem since gcd(30,4) does not divide 15-2
sage: p=previous_prime(2^400)
sage: timeit('is_pseudoprime(p)') # random
625 loops, best of 3: 1.07 ms per loop
sage: timeit('is_prime(p)') # random long time
5 loops, best of 3: 485 ms per loop
sage: [560 % (x-1) for x in [3,11,17]]
[0, 0, 0]
sage: def count_primes1(n):
... return add([1 for p in range(n+1) if is_prime(p)])
sage: def count_primes2(n):
... return add([1 for p in range(n+1) if is_pseudoprime(p)])
sage: def count_primes3(n):
... s=0; p=2
... while p <= n: s+=1; p=next_prime(p)
... return s
sage: def count_primes4(n):
... s=0; p=2
... while p <= n: s+=1; p=next_probable_prime(p)
... return s
sage: def count_primes5(n):
... s=0
... for p in prime_range(n): s+=1
... return s
sage: timeit('count_primes1(10^5)') # random, not tested
5 loops, best of 3: 674 ms per loop
sage: timeit('count_primes2(10^5)') # random, not tested
5 loops, best of 3: 256 ms per loop
sage: timeit('count_primes3(10^5)') # random
5 loops, best of 3: 49.2 ms per loop
sage: timeit('count_primes4(10^5)') # random
5 loops, best of 3: 48.6 ms per loop
sage: timeit('count_primes5(10^5)') # random
125 loops, best of 3: 2.67 ms per loop
sage: p=(2^42737+1)//3; a=3^42737
sage: timeit('a.gcd(p)') # random
125 loops, best of 3: 4.3 ms per loop
sage: timeit('a.jacobi(p)') # random
25 loops, best of 3: 26.1 ms per loop
sage: p=10^10+19; a=mod(17,p); a.log(2)
6954104378
sage: mod(2,p)^6954104378
17
sage: p=10^20+39; a=mod(17,p)
sage: time r=a.log(3) # not tested
CPU times: user 89.63 s, sys: 1.70 s, total: 91.33 s
"""
|
py
|
1a5a5cd55b617d6f28d421ba71c793c5fb9bd711
|
import uuid
import arrow
def is_uuid(data):
"""Check is data is a valid uuid. If data is a list,
checks if all elements of the list are valid uuids"""
temp = [data] if not isinstance(data, list) else data
for i in temp:
try:
uuid.UUID(str(i), version=4)
except ValueError:
return False
return True
def to_list(obj):
""" Return a list containing obj if obj is not already an iterable"""
try:
iter(obj)
return obj
except TypeError:
return [obj]
def get_start_end_ts(day=None):
if not day:
# yesterday at midnight
date = arrow.utcnow().shift(days=-1).floor("day")
else:
# given day, at midnight (arrow works in UTC by default)
date = arrow.get(day)
start = date.timestamp
end = date.ceil("day").timestamp
return start, end
|
py
|
1a5a5e097b33065f5dff2cb32b7e5f64443b0af7
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import AzureKeyCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import AnomalyDetectorClientConfiguration
from .operations import AnomalyDetectorClientOperationsMixin
from . import models
class AnomalyDetectorClient(AnomalyDetectorClientOperationsMixin):
"""The Anomaly Detector API detects anomalies automatically in time series data. It supports two kinds of mode, one is for stateless using, another is for stateful using. In stateless mode, there are three functionalities. Entire Detect is for detecting the whole series with model trained by the time series, Last Detect is detecting last point with model trained by points before. ChangePoint Detect is for detecting trend changes in time series. In stateful mode, user can store time series, the stored time series will be used for detection anomalies. Under this mode, user can still use the above three functionalities by only giving a time range without preparing time series in client side. Besides the above three functionalities, stateful model also provide group based detection and labeling service. By leveraging labeling service user can provide labels for each detection result, these labels will be used for retuning or regenerating detection models. Inconsistency detection is a kind of group based detection, this detection will find inconsistency ones in a set of time series. By using anomaly detector service, business customers can discover incidents and establish a logic flow for root cause analysis.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
:param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://westus2.api.cognitive.microsoft.com).
:type endpoint: str
"""
def __init__(
self,
credential, # type: AzureKeyCredential
endpoint, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{Endpoint}/anomalydetector/v1.1-preview'
self._config = AnomalyDetectorClientConfiguration(credential, endpoint, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AnomalyDetectorClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
py
|
1a5a5ed9df7008ecec39ba34d576ca4ea1548941
|
# -*- coding:utf-8 -*-
from __future__ import print_function
from setuptools import setup, find_packages
from glob import glob
import pyprobar
with open(glob('requirements.*')[0], encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
with open("README.md", "r", encoding='utf-8') as fr:
long_description = fr.read()
setup(
name = pyprobar.__name__ ,
version=pyprobar.__version__,
packages = find_packages(),
include_package_data = True,
description = " An easy-to-use and colorful progress bar for python." ,
long_description=long_description,
long_description_content_type="text/markdown",
author = "K.y" ,
author_email="[email protected]",
url = "https://github.com/beidongjiedeguang/python-progress-bar" ,
license = "MIT" ,
install_requires=install_requires,
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords=[
'Python Utils',
'Machine Learning',
'Neural Networks',
'Natural Language Processing',
'Computer Vision'
]
)
|
py
|
1a5a5ef443aeba4e8c36fa6888431a927550f4b7
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Prediction']
class Prediction(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_analyze: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
display_name: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
grades: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PredictionGradesArgs']]]]] = None,
hub_name: Optional[pulumi.Input[str]] = None,
involved_interaction_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
involved_kpi_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
involved_relationships: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mappings: Optional[pulumi.Input[pulumi.InputType['PredictionMappingsArgs']]] = None,
negative_outcome_expression: Optional[pulumi.Input[str]] = None,
positive_outcome_expression: Optional[pulumi.Input[str]] = None,
prediction_name: Optional[pulumi.Input[str]] = None,
primary_profile_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope_expression: Optional[pulumi.Input[str]] = None,
score_label: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The prediction resource format.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_analyze: Whether do auto analyze.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] description: Description of the prediction.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] display_name: Display name of the prediction.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PredictionGradesArgs']]]] grades: The prediction grades.
:param pulumi.Input[str] hub_name: The name of the hub.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_interaction_types: Interaction types involved in the prediction.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_kpi_types: KPI types involved in the prediction.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_relationships: Relationships involved in the prediction.
:param pulumi.Input[pulumi.InputType['PredictionMappingsArgs']] mappings: Definition of the link mapping of prediction.
:param pulumi.Input[str] negative_outcome_expression: Negative outcome expression.
:param pulumi.Input[str] positive_outcome_expression: Positive outcome expression.
:param pulumi.Input[str] prediction_name: Name of the prediction.
:param pulumi.Input[str] primary_profile_type: Primary profile type.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] scope_expression: Scope expression.
:param pulumi.Input[str] score_label: Score label.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if auto_analyze is None and not opts.urn:
raise TypeError("Missing required property 'auto_analyze'")
__props__['auto_analyze'] = auto_analyze
__props__['description'] = description
__props__['display_name'] = display_name
__props__['grades'] = grades
if hub_name is None and not opts.urn:
raise TypeError("Missing required property 'hub_name'")
__props__['hub_name'] = hub_name
__props__['involved_interaction_types'] = involved_interaction_types
__props__['involved_kpi_types'] = involved_kpi_types
__props__['involved_relationships'] = involved_relationships
if mappings is None and not opts.urn:
raise TypeError("Missing required property 'mappings'")
__props__['mappings'] = mappings
if negative_outcome_expression is None and not opts.urn:
raise TypeError("Missing required property 'negative_outcome_expression'")
__props__['negative_outcome_expression'] = negative_outcome_expression
if positive_outcome_expression is None and not opts.urn:
raise TypeError("Missing required property 'positive_outcome_expression'")
__props__['positive_outcome_expression'] = positive_outcome_expression
__props__['prediction_name'] = prediction_name
if primary_profile_type is None and not opts.urn:
raise TypeError("Missing required property 'primary_profile_type'")
__props__['primary_profile_type'] = primary_profile_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if scope_expression is None and not opts.urn:
raise TypeError("Missing required property 'scope_expression'")
__props__['scope_expression'] = scope_expression
if score_label is None and not opts.urn:
raise TypeError("Missing required property 'score_label'")
__props__['score_label'] = score_label
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['system_generated_entities'] = None
__props__['tenant_id'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:customerinsights:Prediction"), pulumi.Alias(type_="azure-nextgen:customerinsights/latest:Prediction")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Prediction, __self__).__init__(
'azure-nextgen:customerinsights/v20170426:Prediction',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Prediction':
"""
Get an existing Prediction resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Prediction(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoAnalyze")
def auto_analyze(self) -> pulumi.Output[bool]:
"""
Whether do auto analyze.
"""
return pulumi.get(self, "auto_analyze")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Description of the prediction.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Display name of the prediction.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def grades(self) -> pulumi.Output[Optional[Sequence['outputs.PredictionResponseGrades']]]:
"""
The prediction grades.
"""
return pulumi.get(self, "grades")
@property
@pulumi.getter(name="involvedInteractionTypes")
def involved_interaction_types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Interaction types involved in the prediction.
"""
return pulumi.get(self, "involved_interaction_types")
@property
@pulumi.getter(name="involvedKpiTypes")
def involved_kpi_types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
KPI types involved in the prediction.
"""
return pulumi.get(self, "involved_kpi_types")
@property
@pulumi.getter(name="involvedRelationships")
def involved_relationships(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Relationships involved in the prediction.
"""
return pulumi.get(self, "involved_relationships")
@property
@pulumi.getter
def mappings(self) -> pulumi.Output['outputs.PredictionResponseMappings']:
"""
Definition of the link mapping of prediction.
"""
return pulumi.get(self, "mappings")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="negativeOutcomeExpression")
def negative_outcome_expression(self) -> pulumi.Output[str]:
"""
Negative outcome expression.
"""
return pulumi.get(self, "negative_outcome_expression")
@property
@pulumi.getter(name="positiveOutcomeExpression")
def positive_outcome_expression(self) -> pulumi.Output[str]:
"""
Positive outcome expression.
"""
return pulumi.get(self, "positive_outcome_expression")
@property
@pulumi.getter(name="predictionName")
def prediction_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the prediction.
"""
return pulumi.get(self, "prediction_name")
@property
@pulumi.getter(name="primaryProfileType")
def primary_profile_type(self) -> pulumi.Output[str]:
"""
Primary profile type.
"""
return pulumi.get(self, "primary_profile_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="scopeExpression")
def scope_expression(self) -> pulumi.Output[str]:
"""
Scope expression.
"""
return pulumi.get(self, "scope_expression")
@property
@pulumi.getter(name="scoreLabel")
def score_label(self) -> pulumi.Output[str]:
"""
Score label.
"""
return pulumi.get(self, "score_label")
@property
@pulumi.getter(name="systemGeneratedEntities")
def system_generated_entities(self) -> pulumi.Output['outputs.PredictionResponseSystemGeneratedEntities']:
"""
System generated entities.
"""
return pulumi.get(self, "system_generated_entities")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
The hub name.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py
|
1a5a6038a623836ea9db10637d9da0da010d57f8
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SourceProperties(Model):
"""The properties of the source code repository.
All required parameters must be populated in order to send to Azure.
:param source_control_type: Required. The type of source control service.
Possible values include: 'Github', 'VisualStudioTeamService'
:type source_control_type: str or
~azure.mgmt.containerregistry.v2018_09_01.models.SourceControlType
:param repository_url: Required. The full URL to the source code
repository
:type repository_url: str
:param branch: The branch name of the source code.
:type branch: str
:param source_control_auth_properties: The authorization properties for
accessing the source code repository and to set up
webhooks for notifications.
:type source_control_auth_properties:
~azure.mgmt.containerregistry.v2018_09_01.models.AuthInfo
"""
_validation = {
'source_control_type': {'required': True},
'repository_url': {'required': True},
}
_attribute_map = {
'source_control_type': {'key': 'sourceControlType', 'type': 'str'},
'repository_url': {'key': 'repositoryUrl', 'type': 'str'},
'branch': {'key': 'branch', 'type': 'str'},
'source_control_auth_properties': {'key': 'sourceControlAuthProperties', 'type': 'AuthInfo'},
}
def __init__(self, **kwargs):
super(SourceProperties, self).__init__(**kwargs)
self.source_control_type = kwargs.get('source_control_type', None)
self.repository_url = kwargs.get('repository_url', None)
self.branch = kwargs.get('branch', None)
self.source_control_auth_properties = kwargs.get('source_control_auth_properties', None)
|
py
|
1a5a604653f79566608ea6dc2d0d85290ca4bd17
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""TensorFlow custom ops builder.
"""
import os
import re
import uuid
import hashlib
import tempfile
import shutil
import tensorflow as tf
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
#----------------------------------------------------------------------------
# Global options.
cuda_cache_path = os.path.join(os.path.dirname(__file__), '_cudacache')
cuda_cache_version_tag = 'v1'
do_not_hash_included_headers = False # Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe!
verbose = True # Print status messages to stdout.
compiler_bindir_search_path = [
'C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.23.28105/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.29.30037/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin',
]
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
for compiler_path in compiler_bindir_search_path:
if os.path.isdir(compiler_path):
return compiler_path
return None
def _get_compute_cap(device):
caps_str = device.physical_device_desc
m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
major = m.group(1)
minor = m.group(2)
return (major, minor)
def _get_cuda_gpu_arch_string():
gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU']
if len(gpus) == 0:
raise RuntimeError('No GPU devices found')
(major, minor) = _get_compute_cap(gpus[0])
return 'sm_%s%s' % (major, minor)
def _run_cmd(cmd):
with os.popen(cmd) as pipe:
output = pipe.read()
status = pipe.close()
if status is not None:
raise RuntimeError('NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s' % (cmd, output))
def _prepare_nvcc_cli(opts):
cmd = 'nvcc ' + opts.strip()
cmd += ' --disable-warnings'
cmd += ' --include-path "%s"' % tf.sysconfig.get_include()
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive')
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
# Require that _find_compiler_bindir succeeds on Windows. Allow
# nvcc to use whatever is the default on Linux.
if os.name == 'nt':
raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__)
else:
cmd += ' --compiler-bindir "%s"' % compiler_bindir
cmd += ' 2>&1'
return cmd
#----------------------------------------------------------------------------
# Main entry point.
_plugin_cache = dict()
def get_plugin(cuda_file):
cuda_file_base = os.path.basename(cuda_file)
cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)
# Already in cache?
if cuda_file in _plugin_cache:
return _plugin_cache[cuda_file]
# Setup plugin.
if verbose:
print('Setting up TensorFlow plugin "%s": ' % cuda_file_base, end='', flush=True)
try:
# Hash CUDA source.
md5 = hashlib.md5()
with open(cuda_file, 'rb') as f:
md5.update(f.read())
md5.update(b'\n')
# Hash headers included by the CUDA code by running it through the preprocessor.
if not do_not_hash_included_headers:
if verbose:
print('Preprocessing... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)
_run_cmd(_prepare_nvcc_cli('"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)))
with open(tmp_file, 'rb') as f:
bad_file_str = ('"' + cuda_file.replace('\\', '/') + '"').encode('utf-8') # __FILE__ in error check macros
good_file_str = ('"' + cuda_file_base + '"').encode('utf-8')
for ln in f:
if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas
ln = ln.replace(bad_file_str, good_file_str)
md5.update(ln)
md5.update(b'\n')
# Select compiler options.
compile_opts = ''
if os.name == 'nt':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')
elif os.name == 'posix':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')
compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\''
else:
assert False # not Windows or Linux, w00t?
compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()
compile_opts += ' --use_fast_math'
nvcc_cmd = _prepare_nvcc_cli(compile_opts)
# Hash build configuration.
md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n')
md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n')
md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n')
# Compile if not already compiled.
bin_file_ext = '.dll' if os.name == 'nt' else '.so'
bin_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + md5.hexdigest() + bin_file_ext)
if not os.path.isfile(bin_file):
if verbose:
print('Compiling... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)
_run_cmd(nvcc_cmd + ' "%s" --shared -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))
os.makedirs(cuda_cache_path, exist_ok=True)
intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)
shutil.copyfile(tmp_file, intermediate_file)
os.rename(intermediate_file, bin_file) # atomic
# Load.
if verbose:
print('Loading... ', end='', flush=True)
plugin = tf.load_op_library(bin_file)
# Add to cache.
_plugin_cache[cuda_file] = plugin
if verbose:
print('Done.', flush=True)
return plugin
except:
if verbose:
print('Failed!', flush=True)
raise
#----------------------------------------------------------------------------
|
py
|
1a5a6089d7686453063ae3fedad58d5b5df5d669
|
from __future__ import (
absolute_import,
unicode_literals,
)
import abc
from typing import (
Dict,
FrozenSet,
Type,
)
import six
__all__ = (
'Serializer',
)
class _SerializerMeta(abc.ABCMeta):
_mime_type_to_serializer_map = {} # type: Dict[six.text_type, Type[Serializer]]
_all_supported_mime_types = frozenset() # type: FrozenSet[six.text_type]
def __new__(mcs, name, bases, body):
# Don't allow multiple inheritance as it mucks up mime-type collection
if len(bases) != 1:
raise ValueError('You cannot use multiple inheritance with Serializers')
cls = super(_SerializerMeta, mcs).__new__(mcs, name, bases, body)
if bases and bases[0] is not object:
if not issubclass(cls, Serializer):
raise TypeError('The internal _SerializerMeta is only valid on Serializers')
if not cls.mime_type or not cls.mime_type.strip():
raise ValueError('All serializers must have a non-null, non-blank MIME type')
if cls.mime_type in mcs._all_supported_mime_types:
raise ValueError('Another serializer {cls} already supports mime type {mime_type}'.format(
cls=mcs._mime_type_to_serializer_map[cls.mime_type],
mime_type=cls.mime_type,
))
mcs._mime_type_to_serializer_map[cls.mime_type] = cls
mcs._all_supported_mime_types = frozenset(mcs._mime_type_to_serializer_map.keys())
return cls
@property
def all_supported_mime_types(cls): # type: () -> FrozenSet[six.text_type]
"""
Return all mime types supported by all implementations of `Serializer`.
:return: A frozen set of mime types.
"""
return cls._all_supported_mime_types
@six.add_metaclass(_SerializerMeta)
class Serializer(object):
"""
The mime type that this serializer supports.
"""
mime_type = None # type: six.text_type
@classmethod
def resolve_serializer(cls, mime_type): # type: (six.text_type) -> Serializer
"""
Given the requested mime type, return an initialized `Serializer` that understands that mime type.
:param mime_type: The mime type for which to get a compatible `Serializer`
:return: A compatible `Serializer`.
:raises: ValueError if there is no `Serializer` that understands this mime type.
"""
if mime_type not in cls.all_supported_mime_types:
raise ValueError('Mime type {} is not supported'.format(mime_type))
return cls._mime_type_to_serializer_map[mime_type]()
@abc.abstractmethod
def dict_to_blob(self, message_dict): # type: (Dict) -> six.binary_type
"""
Take a message in the form of a dict and return a serialized message in the form of bytes (string).
:param message_dict: The message to serialize into a blob.
:return: The serialized blob.
"""
@abc.abstractmethod
def blob_to_dict(self, blob): # type: (six.binary_type) -> Dict
"""
Take a serialized message in the form of bytes (string) and return a dict.
:param blob: The blob to deserialize into a message
:return: The deserialized message.
"""
|
py
|
1a5a60ddb0363f2ef3e04beee8c135c5a1047043
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
import collections
import copy
import random
import threading
import unittest
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_assert_ops_in_graph(self):
with ops.Graph().as_default():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegex(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def test_assert_equal_graph_def_hash_table(self):
def get_graph_def():
with ops.Graph().as_default() as g:
x = constant_op.constant([2, 9], name="x")
keys = constant_op.constant([1, 2], name="keys")
values = constant_op.constant([3, 4], name="values")
default = constant_op.constant(-1, name="default")
table = lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default)
_ = table.lookup(x)
return g.as_graph_def()
def_1 = get_graph_def()
def_2 = get_graph_def()
# The unique shared_name of each table makes the graph unequal.
with self.assertRaisesRegex(AssertionError, "hash_table_"):
test_util.assert_equal_graph_def(def_1, def_2,
hash_table_shared_name=False)
# That can be ignored. (NOTE: modifies GraphDefs in-place.)
test_util.assert_equal_graph_def(def_1, def_2,
hash_table_shared_name=True)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.disable_asan("Skip test if ASAN is enabled.")
def testDisableAsan(self):
self.assertFalse(pywrap_sanitizers.is_asan_enabled())
@test_util.disable_msan("Skip test if MSAN is enabled.")
def testDisableMsan(self):
self.assertFalse(pywrap_sanitizers.is_msan_enabled())
@test_util.disable_tsan("Skip test if TSAN is enabled.")
def testDisableTsan(self):
self.assertFalse(pywrap_sanitizers.is_tsan_enabled())
@test_util.disable_ubsan("Skip test if UBSAN is enabled.")
def testDisableUbsan(self):
self.assertFalse(pywrap_sanitizers.is_ubsan_enabled())
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegex(AssertionError, r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@parameterized.named_parameters(
dict(testcase_name="tensors", ragged_tensors=False),
dict(testcase_name="ragged_tensors", ragged_tensors=True))
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self, ragged_tensors: bool):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
if ragged_tensors:
a = ragged_tensor.RaggedTensor.from_tensor(a)
b = ragged_tensor.RaggedTensor.from_tensor(b)
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegex(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegex(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegex(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegex(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testAssertDictEqual(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
d = "testing123"
expected = {"a": a, "b": b, "c": c, "d": d}
actual = {"a": a, "b": b, "c": constant_op.constant(c), "d": d}
self.assertDictEqual(expected, expected)
self.assertDictEqual(expected, actual)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegex(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllEqual(self):
i = variables.Variable([100], dtype=dtypes.int32, name="i")
j = constant_op.constant([20], dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertNotAllEqual([100] * 3, i)
self.assertNotAllEqual([120] * 3, k)
self.assertNotAllEqual([20] * 3, j)
with self.assertRaisesRegex(
AssertionError, r"two values are equal at all elements.*extra message"):
self.assertNotAllEqual([120], k, msg="extra message")
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeScalar(self):
x = constant_op.constant(10.0, name="x")
nan = constant_op.constant(np.nan, name="nan")
self.assertAllInRange(x, 5, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(nan, 5, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 1, 2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new default graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
with context.eager_mode():
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
a_rand = random_ops.random_normal([1])
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
b_rand = random_ops.random_normal([1])
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertAllEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc) # pylint: disable=assignment-from-no-return
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegex(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[1:2], ["run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
@combinations.generate(combinations.combine(arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_combinations(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFunctionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFunctionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
if ops.inside_function():
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFunctionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
@test_util.run_in_graph_and_eager_modes
def test_consistent_random_seed_in_assert_all_equal(self):
random_seed.set_seed(1066)
index = random_ops.random_shuffle([0, 1, 2, 3, 4], seed=2021)
# This failed when `a` and `b` were evaluated in separate sessions.
self.assertAllEqual(index, index)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
class SkipTestTest(test_util.TensorFlowTestCase):
def _verify_test_in_set_up_or_tear_down(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def setUp(self):
super(SkipTestTest, self).setUp()
self._verify_test_in_set_up_or_tear_down()
def tearDown(self):
super(SkipTestTest, self).tearDown()
self._verify_test_in_set_up_or_tear_down()
def test_skip_if_error_should_skip(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("test message")
def test_skip_if_error_should_skip_with_list(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_expected_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_error_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError()
def test_skip_if_error_should_raise_message_mismatch(self):
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def test_skip_if_error_should_raise_no_message(self):
try:
with self.assertRaisesRegex(ValueError, ""):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError()
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegex(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(LeakedObjectTest, self).__init__(*args, **kwargs)
self.accumulation = []
@unittest.expectedFailure
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
self.assertTrue(LeakedObjectTest("test_has_leak").run().wasSuccessful())
self.assertTrue(LeakedObjectTest("test_has_no_leak").run().wasSuccessful())
class RunFunctionsEagerlyInV2Test(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.named_parameters(
[("_RunEagerly", True), ("_RunGraph", False)])
def test_run_functions_eagerly(self, run_eagerly): # pylint: disable=g-wrong-blank-lines
results = []
@def_function.function
def add_two(x):
for _ in range(5):
x += 2
results.append(x)
return x
with test_util.run_functions_eagerly(run_eagerly):
add_two(constant_op.constant(2.))
if context.executing_eagerly():
if run_eagerly:
self.assertTrue(isinstance(t, ops.EagerTensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
if __name__ == "__main__":
googletest.main()
|
py
|
1a5a61b1ff24c8a023641a5b3bbd624f9c0e1679
|
"""Fast thresholded subspace-constrained mean shift for geospatial data.
Introduction:
-------------
DREDGE, short for 'density ridge estimation describing geospatial evidence',
arguably an unnecessarily forced acronym, is a tool to find density ridges.
Based on the subspace-constrained mean shift algorithm [1], it approximates
principal curves for a given set of latitude-longitude coordinates. Various
improvements over the initial algorithm, and alterations to facilitate the
application to geospatial data, are implemented: Thresholding, as described
in cosmological research [2, 3] avoids dominant density ridges in sparsely
populated areas of the dataset. In addition, the haversine formula is used
as a distance metric to calculate the great circle distance, which makes the
tool applicable not only to city-scale data, but also to datasets spanning
multiple countries by taking the Earth's curvature into consideration.
Since DREDGE was initially developed to be applied to crime incident data,
the default bandwidth calculation follows a best-practice approach that is
well-accepted within quantitative criminology, using the mean distance to a
given number of nearest neighbors [4]. Since practitioners in that area of
study are often interested in the highest-density regions of dataset, the
tool also features the possibility to specify a top-percentage level for a
kernel density estimate that the ridge points should fall within.
Quickstart:
-----------
DREDGE is designed to be easy to use and needs only one input, name the
array of latitude-longitude values for coordinates. This data has to be
provided in the form of a NumPy array with two columns, with the latitudes
in the first and the longitudes in the second column. Additionally, four
optional parameters can be manually set by the user:
(1) The parameter 'neighbors' specifies the number of nearest neighbors
that should be used to calculate the optimal bandwidth if the latter
is not provided by the user. The default number of neighbors is 10.
(2) The parameter 'bandwidth' provides the bandwidth that is used for the
kernel density estimator and Gaussian kernel evaluations. By default,
an optimal bandwidth using the average distance to a number of neighbors
across all points in the provided dataset is calculated, with the number
of neighbors given by the parameter 'neighbors' explained above.
(3) The parameter 'convergence' specifies the convergence threshold to
determine when to stop iterations and return the density ridge points.
If the resulting density ridges don't follow clearly visible lines,
this parameter can be set to a lower value. The default is 0.01.
(4) The parameter 'percentage' should be set if only density ridge points
from high-density regions, as per a kernel density estimate of the
provided set of coordinates, are to be returned. If, fore example, the
parameter is set to '5', the density ridge points are evaluated via
the kernel density estimator, and only those above the 95th percentile,
as opposed to all of them as the default, are returned to the user.
A simple example for using DREDGE looks like this:
------------------------------------------------------------
| from dredge import filaments |
| |
| filaments(coordinates = your_latitudes_and_longitudes, |
| percentage = your_top_density_percentage) |
| |
------------------------------------------------------------
Here, the optional parameter 'percentage', which is explained above, is used.
Author:
--------
Ben Moews
Institute for Astronomy (IfA)
School of Physics & Astronomy
The University of Edinburgh
References:
-----------
[1] Ozertem, U. and Erdogmus, D. (2011): "Locally defined principal curves
and surfaces", JMLR, Vol. 12, pp. 1249-1286
[2] Chen, Y. C. et al. (2015), "Cosmic web reconstruction through density
ridges: Method and algorithm", MNRAS, Vol. 454, pp. 1140-1156
[3] Chen, Y. C. et al. (2016), "Cosmic web reconstruction through density
ridges: Catalogue", MNRAS, Vol. 461, pp. 3896-3909
[4] Williamson, D. et al. (1999), "A better method to smooth crime incident
data", ESRI ArcUser Magazine, January-March 1999, pp. 1-5
Packages and versions:
----------------------
The versions listed below were used in the development of X, but the exact
version numbers aren't specifically required. The installation process via
PyPI will take care of installing or updating every library to at least the
level that fulfills the requirement of providing the necessary functionality.
Python 3.4.5
NumPy 1.11.3
SciPy 0.18.1
Scikit-learn 0.19.1
"""
# Load the necessary libraries
import sys
import numpy as np
import scipy as sp
from sklearn.neighbors import KernelDensity as KDE
from sklearn.neighbors import NearestNeighbors as KNN
def filaments(coordinates,
neighbors = 10,
bandwidth = None,
convergence = 0.005,
percentage = None):
"""Estimate density rigdges for a user-provided dataset of coordinates.
This function uses an augmented version of the subspace-constrained mean
shift algorithm to return density ridges for a set of langitude-longitude
coordinates. Apart from the haversine distance to compute a more accurate
version of a common optimal kernel bandwidth calculation in criminology,
the code also features thresholding to avoid ridges in sparsely populated
areas. While only the coordinate set is a required input, the user can
override the number of nearest neighbors used to calculate the bandwidth
and the bandwidth itself, as well as the convergence threshold used to
assess when to terminate and the percentage indicating which top-level of
filament points in high-density regions should be returned. If the latter
is not chose, all filament points are returned in the output instead.
Parameters:
-----------
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
neighbors : int, defaults to 10
The number of neighbors used for the optimal bandwidth calculation.
bandwidth : float, defaults to None
The bandwidth used for kernel density estimates of data points.
convergence : float, defaults to 0.005
The convergence threshold for the inter-iteration update difference.
percentage : float, defaults to None
The percentage of highest-density filament points that are returned.
Returns:
--------
ridges : array-like
The coordinates for the estimated density ridges of the data.
Attributes:
-----------
None
"""
# Check if the inputs are valid
parameter_check(coordinates = coordinates,
neighbors = neighbors,
bandwidth = bandwidth,
convergence = convergence,
percentage = percentage)
print("Input parameters valid!\n")
print("Preparing for iterations ...\n")
# Check whether no bandwidth is provided
if bandwidth is None:
# Compute the average distance to the given number of neighbors
nearest_neighbors = KNN(n_neighbors = neighbors,
algorithm = 'ball_tree',
metric = 'haversine').fit(coordinates)
distances, _ = nearest_neighbors.kneighbors(X = coordinates)
bandwidth = np.mean(distances[:, 1:distances.shape[1]])
print("Automatically computed bandwidth: %f\n" % bandwidth)
# Compute a Gaussian KDE with the haversine formula
density_estimate = KDE(bandwidth = bandwidth,
metric = 'haversine',
kernel = 'gaussian',
algorithm = 'ball_tree').fit(coordinates)
# Create an evenly-spaced mesh in for the provided coordinates
mesh = mesh_generation(coordinates)
# Compute the threshold to omit mesh points in low-density areas
threshold, densities = threshold_function(mesh, density_estimate)
# Cut low-density mesh points from the set
ridges = mesh[densities > threshold, :]
# Intitialize the update change as larger than the convergence
update_change = np.multiply(2, convergence)
# Initialize the previous update change as zero
previous_update = 0
# Loop over the number of prescripted iterations
iteration_number = 0
#while not update_change < convergence:
while not update_change < convergence:
# Update the current iteration number
iteration_number = iteration_number + 1
# Print the current iteration number
print("Iteration %d ..." % iteration_number)
# Create a list to store all update values
updates = []
# Loop over the number of points in the mesh
for i in range(ridges.shape[0]):
# Compute the update movements for each point
point_updates = update_function(ridges[i], coordinates, bandwidth)
# Add the update movement to the respective point
ridges[i] = ridges[i] + point_updates
# Store the change between updates to check convergence
updates.append(np.abs(np.mean(np.sum(point_updates))))
# Get the update change to check convergence
update_average = np.mean(np.sum(updates))
update_change = np.abs(previous_update - update_average)
previous_update = update_average
# Check whether a top-percentage of points should be returned
if percentage is not None:
# Evaluate all mesh points on the kernel density estimate
evaluations = density_estimate.score_samples(ridges)
# Calculate the threshold value for a given percentage
valid_percentile = np.percentile(evaluations, [100 - percentage])
# Retain only the mesh points that are above the threshold
ridges = ridges[np.where(evaluations > valid_percentile)]
# Return the iteratively updated mesh as the density ridges
print("\nDone!")
return ridges
def haversine(point_1,
point_2):
"""Calculate the haversine distance between two coordinates.
This function calculates he haversine formula for two latitude-longitude
tuples, a formula used for the great-circle distance on a sphere. While
the effect of using this more accurate distance, as opposed to the more
common Euclidean distance, is negligible for smaller scales, this choice
allows the code to also be used on larger scales by taking the curvature
of the Earth into account.
Parameters:
-----------
point_1 : array-like
The coordinates for a point as a tuple of type [float, float].
point_2 : array-like
The coordinates for a point as a tuple of type [float, float].
Returns:
--------
haversine_distance : float
The haversine distance between the two provided points.
Attributes:
-----------
None
"""
# Specify the radius of the Earth in kilometers
earth_radius = 6372.8
# Extract latitudes and longitudes from the provided points
latitude_1 = point_1[0]
latitude_2 = point_2[0]
longitude_1 = point_1[1]
longitude_2 = point_2[1]
# Convert the latitudes and longitudes to radians
latitude_1, longitude_1 = np.radians((latitude_1, longitude_1))
latitude_2, longitude_2 = np.radians((latitude_2, longitude_2))
# Calculate the differences between latitudes in radians
latitude_difference = latitude_2 - latitude_1
# Calculate the differences between longitudes in radians
longitude_difference = longitude_2 - longitude_1
# Calculate the haversine distance between the coordinates
step_1 = np.square(np.sin(np.multiply(latitude_difference, 0.5)))
step_2 = np.square(np.sin(np.multiply(longitude_difference, 0.5)))
step_3 = np.multiply(np.cos(latitude_1), np.cos(latitude_2))
step_4 = np.arcsin(np.sqrt(step_1 + np.multiply(step_2, step_3)))
haversine_distance = np.multiply(np.multiply(2, earth_radius), step_4)
# Return the computed haversine distance for the coordinates
return haversine_distance
def mesh_generation(coordinates):
"""Generate a set of uniformly-random distributed points as a mesh.
The subspace-constrained mean shift algorithm operates on either a grid
or a uniform-random set of coordinates to iteratively shift them towards
the estimated density ridges. Due to the functionality of the code, the
second approach is chosen, with a uniformly-random set of coordinates
in the intervals covered by the provided dataset as a mesh. In order to
not operate on a too-small or too-large number of mesh points, the size
of the mesh is constrained to a lower limit of 50,000 and an upper limit
of 100,000, with the size of the provided dataset being used if it falls
within these limits. This is done to avoid overly long running times.
Parameters:
-----------
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
Returns:
--------
mesh : array-like
The set of uniform-random coordinates in the dataset's intervals.
Attributes:
-----------
None
"""
# Get the minimum and maximum for the latitudes
min_latitude = np.min(coordinates[:, 0])
max_latitude = np.max(coordinates[:, 0])
# Get the minimum and maximum for the longitudes
min_longitude = np.min(coordinates[:, 1])
max_longitude = np.max(coordinates[:, 1])
# Get the number of provided coordinates
size = int(np.min([1e5, np.max([5e4, len(coordinates)])]))
# Create an array of uniform-random points as a mesh
mesh_1 = np.random.uniform(min_latitude, max_latitude, size)
mesh_2 = np.random.uniform(min_longitude, max_longitude, size)
mesh = np.vstack((mesh_1.flatten(), mesh_2.flatten())).T
# Return the evenly-spaced mesh for the coordinates
return mesh
def threshold_function(mesh,
density_estimate):
"""Calculate the cut-off threshold for mesh point deletions.
This function calculates the threshold that is used to deleted mesh
points from the initial uniformly-random set of mesh points. The
rationale behind this approach is to avoid filaments in sparsely
populated regions of the provided dataset, leading to a final result
that only covers filaments in regions of a suitably high density.
Parameters:
-----------
mesh : array-like
The set of uniform-random coordinates in the dataset's intervals.
density_estimate : scikit-learn object
The kernel density estimator fitted on the provided dataset.
Returns:
--------
threshold : float
The cut-off threshold for the omission of points in the mesh.
density_array : array-like
The density estimates for all points in the given mesh.
Attributes:
-----------
None
"""
# Calculate the average of density estimates for the data
density_array = np.exp(density_estimate.score_samples(mesh))
density_sum = np.sum(density_array)
density_average = np.divide(density_sum, len(mesh))
# Compute the threshold via the RMS in the density fluctuation
density_difference = np.subtract(density_array, density_average)
square_sum = np.sum(np.square(density_difference))
threshold = np.sqrt(np.divide(square_sum, len(density_difference)))
# Return the threshold for the provided mesh and density etimate
return threshold, density_array
def update_function(point,
coordinates,
bandwidth):
"""Calculate the mean shift update for a provided mesh point.
This function calculates the mean shift update for a given point of
the mesh at the current iteration. This is done through a spectral
decomposition of the local inverse covariance matrix, shifting the
respective point closer towards the nearest estimated ridge. The
updates are provided as a tuple in the latitude-longitude space to
be added to the point's coordinate values.
Parameters:
-----------
point : array-like
The latitude-longitude coordinate tuple for a single mesh point.
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
Returns:
--------
point_updates : float
The tuple of latitude and longitude updates for the mesh point.
Attributes:
-----------
None
"""
# first, calculate the interpoint distance
squared_distance = np.sum(np.square(coordinates - point), axis=1)
# eqvaluate the kernel at each distance
weights = gaussian_kernel(squared_distance, bandwidth)
# now reweight each point
shift = np.divide(coordinates.T.dot(weights), np.sum(weights))
# first, we evaluate the mean shift update
update = shift - point
# Calculate the local inverse covariance for the decomposition
inverse_covariance = local_inv_cov(point, coordinates, bandwidth)
# Compute the eigendecomposition of the local inverse covariance
eigen_values, eigen_vectors = np.linalg.eig(inverse_covariance)
# Align the eigenvectors with the sorted eigenvalues
sorted_eigen_values = np.argsort(eigen_values)
eigen_vectors = eigen_vectors[:, sorted_eigen_values]
# Cut the eigenvectors according to the sorted eigenvalues
cut_eigen_vectors = eigen_vectors[:, 1:]
# Project the update to the eigenvector-spanned orthogonal subspace
point_updates = cut_eigen_vectors.dot(cut_eigen_vectors.T).dot(update)
# Return the projections as the point updates
return point_updates
def gaussian_kernel(values,
bandwidth):
"""Calculate the Gaussian kernel evaluation of distance values.
This function evaluates a Gaussian kernel for the squared distances
between a mesh point and the dataset, and for a given bandwidth.
Parameters:
-----------
values : array-like
The distances between a mesh point and provided coordinates.
bandwidth : float
The bandwidth used for kernel density estimates of data points.
Returns:
--------
kernel_value : float
The Gaussian kernel evaluations for the given distances.
Attributes:
-----------
None
"""
# Compute the kernel value for the given values
temp_1 = np.multiply(np.pi, np.square(bandwidth))
temp_2 = np.divide(1, np.sqrt(temp_1))
temp_3 = np.divide(values, np.square(bandwidth))
kernel_value = np.exp(np.multiply(np.negative(0.5), temp_3))
# Return the computed kernel value
return kernel_value
def local_inv_cov(point,
coordinates,
bandwidth):
"""Compute the local inverse covariance from the gradient and Hessian.
This function computes the local inverse covariance matrix for a given
mesh point and the provided dataset, using a given bandwidth. In order
to reach this result, the covariance matrix for the distances between
a mesh point and the dataset is calculated. After that, the Hessian
matrix is used to calculate the gradient at the given point's location.
Finally, the latter is used to arrive at the local inverse covariance.
Parameters:
-----------
point : array-like
The latitude-longitude coordinate tuple for a single mesh point.
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
bandwidth : float
The bandwidth used for kernel density estimates of data points.
Returns:
--------
inverse_covariance : array-like
The local inverse covariance for the given point and coordinates.
Attributes:
-----------
None
"""
# Calculate the squared distance between points
squared_distance = np.sum(np.square(coordinates - point), axis=1)
# Compute the average of the weights as the estimate
weights = gaussian_kernel(squared_distance, bandwidth)
weight_average = np.mean(weights)
# Get the number of points and the dimensionality
number_points, number_columns = coordinates.shape
# Calculate one over the given bandwidth
fraction_1 = np.divide(1, np.square(bandwidth))
# Calculate one over the given number of points
fraction_2 = np.divide(1, number_points)
# Compute the mean for the provided points
mu = np.multiply(fraction_1, (coordinates - point))
# Compute the covariance matrix for the provided points
covariance = gaussian_kernel(squared_distance, bandwidth)
# Compute the Hessian matrix for the provided points
temp_1 = np.multiply(fraction_1, np.eye(number_columns))
temp_2 = (np.multiply(covariance, mu.T)).dot(mu)
temp_3 = np.multiply(fraction_2, temp_2)
temp_4 = np.multiply(temp_1, np.sum(covariance))
hessian = temp_3 - np.multiply(fraction_2, temp_4)
# Get the number of data points and the dimensionality
number_rows, number_columns = coordinates.shape
# Compute the gradient at the given point for the data
temp_5 = np.mean(np.multiply(covariance, mu.T), axis=1)
gradient = np.negative(temp_5)
# Compute the loval inverse covariance for the inputs
temp_6 = np.divide(np.negative(1), weight_average)
temp_7 = np.divide(1, np.square(weight_average))
temp_8 = np.multiply(temp_7, gradient.dot(gradient.T))
inverse_covariance = np.multiply(temp_6, hessian) + temp_8
# Return the local inverse covariance
return inverse_covariance
def parameter_check(coordinates,
neighbors,
bandwidth,
convergence,
percentage):
"""Check the main function inputs for unsuitable formats or values.
This function checks all of the user-provided main function inputs for
their suitability to be used by the code. This is done right at the
top of the main function to catch input errors early and before any
time is spent on time-consuming computations. Each faulty input is
identified, and a customized error message is printed for the user
to inform about the correct inputs before the code is terminated.
Parameters:
-----------
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
neighbors : int
The number of neighbors used for the optimal bandwidth calculation.
bandwidth : float
The bandwidth used for kernel density estimates of data points.
convergence : float
The convergence threshold for the inter-iteration update difference.
percentage : float
The percentage of highest-density filament points that are returned.
Returns:
--------
None
Attributes:
-----------
None
"""
# Create a boolean vector to keep track of incorrect inputs
incorrect_inputs = np.zeros(5, dtype = bool)
# Check whether two-dimensional coordinates are provided
if not type(coordinates) == np.ndarray:
incorrect_inputs[0] = True
elif not coordinates.shape[1] == 2:
incorrect_inputs[0] = True
# Check whether neighbors is a positive integer or float
if not ((type(neighbors) == int and neighbors > 0)
and not ((type(neighbors) == float)
and (neighbors > 0)
and (neighbors.is_integer() == True))):
incorrect_inputs[1] = True
# Check whether bandwidth is a positive integer or float
if not bandwidth == None:
if not ((type(bandwidth) == int and bandwidth > 0)
or (type(bandwidth) == float) and bandwidth > 0):
incorrect_inputs[2] = True
# Check whether convergence is a positive integer or float
if not convergence == None:
if not ((type(convergence) == int and convergence > 0)
or (type(convergence) == float) and convergence > 0):
incorrect_inputs[3] = True
# Check whether percentage is a valid percentage value
if not percentage == None:
if not ((type(percentage) == int and percentage >= 0
and percentage <= 100)
or ((type(percentage) == float) and percentage >= 0
and percentage <= 100)):
incorrect_inputs[4] = True
# Define error messages for each parameter failing the tests
errors = ['ERROR: coordinates: Must be a 2-column numpy.ndarray',
'ERROR: neighbors: Must be a whole-number int or float > 0',
'ERROR: bandwidth: Must be an int or float > 0, or None',
'ERROR: convergence: Must be an int or float > 0, or None',
'ERROR: percentage: Must be an int or float in [0, 100], or None']
# Print eventual error messages and terminate the code
if any(value == True for value in incorrect_inputs):
for i in range(0, len(errors)):
if incorrect_inputs[i] == True:
print(errors[i])
sys.exit()
|
py
|
1a5a61feccffe83f3589041973285eb9df9cbb07
|
#!/usr/bin/env python2
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: op-test-framework/common/../MaintenancePage.py $
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
from Page import Page
from seleniumimports import *
from BmcPageConstants import BmcPageConstants
from selenium.webdriver.support.ui import Select
from OpTestConstants import OpTestConstants as BMC_CONST
from OpTestError import OpTestError
import time
##
# @file: MaintenancePage.py
# @brief: This file contains functions to browse through Maintenance options
#
##
# Maintenance_Page
# @brief: This class provides interface to Maintenance menu and other page interactions
#
class MaintenancePage():
OptionDict = {
'IPMI':'_chkPrsrvStatus3',
'NETWORK':'_chkPrsrvStatus4'
}
##
# @brief Constructor - Takes a pointer to BMC WebDriver
# @param page instance
# @return none
#
def __init__(self, page):
self.Page = page
##
# @brief Function to traverse to BMC Maintenance Page
#
# @param None
#
# @return BMC_CONST.FW_SUCCESS upon success
# raise OpTestError when fails
#
def getMaintenancePage(self):
try:
Maintenance = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_MAINTENANCE)))
Maintenance.click()
Maintenance_menu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_LN_MAINTENANCE_MENU)))
Maintenance_submenu = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.CSS_SELECTOR,
BmcPageConstants.BMC_LN_PRESERVE_CONFIG_HREF)))
Maintenance_submenu.click()
except:
l_msg = "Failed to get Maintainance Page"
print(l_msg)
raise OpTestError(l_msg)
return BMC_CONST.FW_SUCCESS
##
# @brief This function selects various options to be preserved
#
# @param optionname - Name of the option to be preserved. Has to be from
# OptionDict
# @param iselect - Set to true if option needs to be preserved
#
# @return BMC_CONST.FW_SUCCESS upon success
# raise OpTestError when fails
#
def selectOption(self, optionname, iselect):
try:
#Switch to top-level page/frame container
self.Page.driver.switch_to.default_content()
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_MAINFRAME))
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_PAGEFRAME))
Maintenance = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
optionname)))
if iselect is True:
if Maintenance.is_selected() is False:
Maintenance.click()
else:
if Maintenance.is_selected() is True:
Maintenance.click()
except:
l_msg = "Failed to select Options for preserving settings"
print(l_msg)
raise OpTestError(l_msg)
return BMC_CONST.FW_SUCCESS
##
# @brief This function preserves IPMI option
# @param none
# @return BMC_CONST.FW_SUCCESS upon success
#
def preserveIPMI(self):
return self.selectOption(self.OptionDict['IPMI'], True)
##
# @brief This function preserves NETWORK option
# @param none
# @return BMC_CONST.FW_SUCCESS upon success
#
def preserveNetwork(self):
return self.selectOption(self.OptionDict['NETWORK'], True)
##
# @brief This function hits 'Save' button on the Maintenance page
# @param none
# @return BMC_CONST.FW_SUCCESS upon success
# raise OpTestError when fails
#
def savePage(self):
try:
#Switch to top-level page/frame container
self.Page.driver.switch_to.default_content()
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_MAINFRAME))
self.Page.driver.switch_to.frame(
self.Page.driver.find_element_by_id(
BmcPageConstants.BMC_PAGEFRAME))
Maintenance = WebDriverWait(self.Page.driver,BMC_CONST.WEB_DRIVER_WAIT).until(
EC.presence_of_element_located((By.ID,
BmcPageConstants.BMC_SAVE_BTN)))
Maintenance.click()
except:
l_msg = "Failed to savePage"
print(l_msg)
raise OpTestError(l_msg)
return BMC_CONST.FW_SUCCESS
|
py
|
1a5a622bd28a8c07a1b4f26ff89f1d5fc966a5b9
|
import os
from io import BytesIO
import mimetypes
from django.db.models.fields.files import ImageField
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import signals
from PIL import Image
# todo: Add 'delete_with_model' option that will delete thumbnail and image when model is deleted.
def _get_thumbnail_filename(filename, append_text="-thumbnail"):
"""
Returns a thumbnail version of the file name.
"""
name, ext = os.path.splitext(filename)
return ''.join([name, append_text, ext])
class ThumbnailField(object):
"""
Instances of this class will be used to access data of the
generated thumbnails. A thumbnail is created when the image is saved
initially, but there's nothing persisted that references the thumbnail.
When the `SizedImageField` is instantiated, it gets this thumbnail
field attached to it where the thumbnail becomes accessible.
for example: `image.thumbnail.url`
"""
def __init__(self, name, storage):
"""
Uses same storage as the parent field
"""
self.name = name
self.storage = storage
@property
def path(self):
return self.storage.path(self.name)
@property
def url(self):
return self.storage.url(self.name)
@property
def size(self):
return self.storage.size(self.name)
class SizedImageField(ImageField):
"""
An Image field that allows auto resizing auto creation of thumbnails.
"""
def __init__(self,
verbose_name=None,
name=None,
width_field=None,
height_field=None,
size=None,
thumbnail_size=None,
**kwargs):
"""
Added fields:
- size: a tuple containing width and height to resize image, and
an optional boolean setting if is wanted forcing that size
(None for not resizing).
- thumbnail_size: a tuple with same values than `size' (None for
not creating a thumbnail
Example: (640, 480, True) -> Will resize image to a width of 640px and
a height of 480px. File will be cut if necessary for forcing
the image to have the desired size
"""
self.size = self._get_resize_options(size)
self.thumbnail_size = self._get_resize_options(thumbnail_size)
super(SizedImageField, self).__init__(verbose_name, name, width_field,
height_field, **kwargs)
def _get_resize_options(self, dimensions):
"""
:param dimensions:
A tuple of (width, height, force_size).
'force_size' can be left off and will default to False.
"""
if dimensions and isinstance(dimensions, (tuple, list)):
if len(dimensions) < 3:
dimensions = tuple(dimensions) + (False, )
return dimensions
def contribute_to_class(self, cls, name):
"""
Makes sure thumbnail gets set when image field initialized.
"""
super(SizedImageField, self).contribute_to_class(cls, name)
signals.post_init.connect(self._set_thumbnail, sender=cls)
def pre_save(self, model_instance, add):
"""
Resizes, commits image to storage, and returns field's value just before saving.
"""
file = getattr(model_instance, self.attname)
if file and not file._committed:
file.name = self._clean_file_name(model_instance, file.name)
file.file = self._resize_image(model_instance, file)
file.save(file.name, file, save=False)
return file
def _clean_file_name(self, model_instance, filename):
"""
We need to make sure we know the full file name before we save the thumbnail so
we can be sure the name doesn't change on save.
This method gets the available filename and returns just the file part.
"""
available_name = self.storage.get_available_name(
self.generate_filename(model_instance, filename))
return os.path.basename(available_name)
def _create_thumbnail(self, model_instance, thumbnail, image_name):
"""
Resizes and saves the thumbnail image
"""
thumbnail = self._do_resize(thumbnail, self.thumbnail_size)
full_image_name = self.generate_filename(model_instance, image_name)
thumbnail_filename = _get_thumbnail_filename(full_image_name)
thumb = self._get_simple_uploaded_file(thumbnail, thumbnail_filename)
self.storage.save(thumbnail_filename, thumb)
def _resize_image(self, model_instance, image_field):
""""""
image_name = image_field.name
image = Image.open(image_field.file)
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
if self.size:
image = self._do_resize(image, self.size)
if self.thumbnail_size:
self._create_thumbnail(model_instance, image.copy(), image_name)
return self._get_simple_uploaded_file(image, image_name)
def _do_resize(self, img, dimensions):
width, height, force_size = dimensions
if force_size:
img.resize((width, height), Image.ANTIALIAS)
else:
img.thumbnail((width, height), Image.ANTIALIAS)
return img
def _set_thumbnail(self, instance=None, **kwargs):
"""
Sets a `thumbnail` attribute on the image field class.
On thumbnail you can access name, url, path attributes
"""
image_field = getattr(instance, self.name)
if image_field:
thumbnail_filename = _get_thumbnail_filename(image_field.name)
thumbnail_field = ThumbnailField(thumbnail_filename, self.storage)
setattr(image_field, 'thumbnail', thumbnail_field)
def _get_simple_uploaded_file(self, image, file_name):
"""
:param image:
a python PIL ``Image`` instance.
:param file_name:
The file name of the image.
:returns:
A django ``SimpleUploadedFile`` instance ready to be saved.
"""
extension = os.path.splitext(file_name)[1]
mimetype, encoding = mimetypes.guess_type(file_name)
content_type = mimetype or 'image/png'
temp_handle = BytesIO()
image.save(temp_handle, self._get_pil_format(extension))
temp_handle.seek(0) # rewind the file
suf = SimpleUploadedFile(
file_name,
temp_handle.read(),
content_type=content_type,
)
return suf
def _get_pil_format(self, extension):
"""
:param extension:
The file name extension (.png, .jpg, etc...)
:returns:
The file format PIL needs from the file extension.
Eg. PNG or JPEG
"""
return Image.EXTENSION[extension.lower()]
|
py
|
1a5a63af0df9b5e74208735a4af4223825a0bb03
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookstore.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py
|
1a5a6400f0a6a1ec6d62020a014fd30465c556f1
|
class ContentFilteringRules(object):
def __init__(self, session):
super(ContentFilteringRules, self).__init__()
self._session = session
def getNetworkContentFiltering(self, networkId: str):
"""
**Return the content filtering settings for an MX network**
https://api.meraki.com/api_docs#return-the-content-filtering-settings-for-an-mx-network
- networkId (string)
"""
metadata = {
'tags': ['Content filtering rules'],
'operation': 'getNetworkContentFiltering',
}
resource = f'/networks/{networkId}/contentFiltering'
return self._session.get(metadata, resource)
def updateNetworkContentFiltering(self, networkId: str, **kwargs):
"""
**Update the content filtering settings for an MX network**
https://api.meraki.com/api_docs#update-the-content-filtering-settings-for-an-mx-network
- networkId (string)
- allowedUrlPatterns (array): A whitelist of URL patterns to allow
- blockedUrlPatterns (array): A blacklist of URL patterns to block
- blockedUrlCategories (array): A list of URL categories to block
- urlCategoryListSize (string): URL category list size which is either 'topSites' or 'fullList'
"""
kwargs.update(locals())
if 'urlCategoryListSize' in kwargs:
options = ['topSites', 'fullList']
assert kwargs['urlCategoryListSize'] in options, f'''"urlCategoryListSize" cannot be "{kwargs['urlCategoryListSize']}", & must be set to one of: {options}'''
metadata = {
'tags': ['Content filtering rules'],
'operation': 'updateNetworkContentFiltering',
}
resource = f'/networks/{networkId}/contentFiltering'
body_params = ['allowedUrlPatterns', 'blockedUrlPatterns', 'blockedUrlCategories', 'urlCategoryListSize']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
|
py
|
1a5a64bd5a32d0d22121111b40af71a3a007d7be
|
import os
from pikka_bird_collector.collectors.mysql import Mysql
from pikka_bird_collector.collectors.base_port_command import BasePortCommand
class TestMysql:
@staticmethod
def fixture_path(filename):
return os.path.join(os.path.dirname(__file__), '../fixtures', filename)
@staticmethod
def read_fixture(filename):
with open(filename, 'r') as f_h:
d = f_h.read()
return d
def mock_cmd_show_status(self):
f = TestMysql.fixture_path('mysql/show_status.txt')
return TestMysql.read_fixture(f)
def mock_cmd_show_master_status(self):
f = TestMysql.fixture_path('mysql/show_master_status.txt')
return TestMysql.read_fixture(f)
def mock_cmd_show_slave_status(self):
f = TestMysql.fixture_path('mysql/show_slave_status.txt')
return TestMysql.read_fixture(f)
def mock_cmd_show_slave_hosts(self):
f = TestMysql.fixture_path('mysql/show_slave_hosts.txt')
return TestMysql.read_fixture(f)
def mock_cmd_show_variables(self):
f = TestMysql.fixture_path('mysql/show_variables.txt')
return TestMysql.read_fixture(f)
def mock_exec_command(self, command_f):
if Mysql.CMD_SHOW_STATUS in command_f:
return self.mock_cmd_show_status()
elif Mysql.CMD_SHOW_MASTER_STATUS in command_f:
return self.mock_cmd_show_master_status()
elif Mysql.CMD_SHOW_SLAVE_STATUS in command_f:
return self.mock_cmd_show_slave_status()
elif Mysql.CMD_SHOW_SLAVE_HOSTS in command_f:
return self.mock_cmd_show_slave_hosts()
elif Mysql.CMD_SHOW_VARIABLES in command_f:
return self.mock_cmd_show_variables()
def mock_collect_status(self):
return {
'aborted_clients': 0,
'aborted_connects': 13,
'binlog_cache_disk_use': 0,
'binlog_cache_use': 0,
'binlog_stmt_cache_disk_use': 0,
'binlog_stmt_cache_use': 0,
'bytes_received': 224,
'bytes_sent': 168,
'com_admin_commands': 0,
'com_alter_db': 0,
'com_alter_db_upgrade': 0,
'com_alter_event': 0,
'com_alter_function': 0,
'com_alter_procedure': 0,
'com_alter_server': 0,
'com_alter_table': 0,
'com_alter_tablespace': 0,
'com_alter_user': 0,
'com_analyze': 0,
'com_assign_to_keycache': 0,
'com_begin': 0,
'com_binlog': 0,
'com_call_procedure': 0,
'com_change_db': 0,
'com_change_master': 0,
'com_check': 0,
'com_checksum': 0,
'com_commit': 0,
'com_create_db': 0,
'com_create_event': 0,
'com_create_function': 0,
'com_create_index': 0,
'com_create_procedure': 0,
'com_create_server': 0,
'com_create_table': 0,
'com_create_trigger': 0,
'com_create_udf': 0,
'com_create_user': 0,
'com_create_view': 0,
'com_dealloc_sql': 0,
'com_delete': 0,
'com_delete_multi': 0,
'com_do': 0,
'com_drop_db': 0,
'com_drop_event': 0,
'com_drop_function': 0,
'com_drop_index': 0,
'com_drop_procedure': 0,
'com_drop_server': 0,
'com_drop_table': 0,
'com_drop_trigger': 0,
'com_drop_user': 0,
'com_drop_view': 0,
'com_empty_query': 0,
'com_execute_sql': 0,
'com_flush': 0,
'com_get_diagnostics': 0,
'com_grant': 0,
'com_ha_close': 0,
'com_ha_open': 0,
'com_ha_read': 0,
'com_help': 0,
'com_insert': 0,
'com_insert_select': 0,
'com_install_plugin': 0,
'com_kill': 0,
'com_load': 0,
'com_lock_tables': 0,
'com_optimize': 0,
'com_preload_keys': 0,
'com_prepare_sql': 0,
'com_purge': 0,
'com_purge_before_date': 0,
'com_release_savepoint': 0,
'com_rename_table': 0,
'com_rename_user': 0,
'com_repair': 0,
'com_replace': 0,
'com_replace_select': 0,
'com_reset': 0,
'com_resignal': 0,
'com_revoke': 0,
'com_revoke_all': 0,
'com_rollback': 0,
'com_rollback_to_savepoint': 0,
'com_savepoint': 0,
'com_select': 1,
'com_set_option': 0,
'com_show_binlog_events': 0,
'com_show_binlogs': 0,
'com_show_charsets': 0,
'com_show_collations': 0,
'com_show_create_db': 0,
'com_show_create_event': 0,
'com_show_create_func': 0,
'com_show_create_proc': 0,
'com_show_create_table': 0,
'com_show_create_trigger': 0,
'com_show_databases': 0,
'com_show_engine_logs': 0,
'com_show_engine_mutex': 0,
'com_show_engine_status': 0,
'com_show_errors': 0,
'com_show_events': 0,
'com_show_fields': 0,
'com_show_function_code': 0,
'com_show_function_status': 0,
'com_show_grants': 0,
'com_show_keys': 0,
'com_show_master_status': 0,
'com_show_open_tables': 0,
'com_show_plugins': 0,
'com_show_privileges': 0,
'com_show_procedure_code': 0,
'com_show_procedure_status': 0,
'com_show_processlist': 0,
'com_show_profile': 0,
'com_show_profiles': 0,
'com_show_relaylog_events': 0,
'com_show_slave_hosts': 0,
'com_show_slave_status': 0,
'com_show_status': 1,
'com_show_storage_engines': 0,
'com_show_table_status': 0,
'com_show_tables': 0,
'com_show_triggers': 0,
'com_show_variables': 0,
'com_show_warnings': 0,
'com_signal': 0,
'com_slave_start': 0,
'com_slave_stop': 0,
'com_stmt_close': 0,
'com_stmt_execute': 0,
'com_stmt_fetch': 0,
'com_stmt_prepare': 0,
'com_stmt_reprepare': 0,
'com_stmt_reset': 0,
'com_stmt_send_long_data': 0,
'com_truncate': 0,
'com_uninstall_plugin': 0,
'com_unlock_tables': 0,
'com_update': 0,
'com_update_multi': 0,
'com_xa_commit': 0,
'com_xa_end': 0,
'com_xa_prepare': 0,
'com_xa_recover': 0,
'com_xa_rollback': 0,
'com_xa_start': 0,
'compression': False,
'connection_errors_accept': 0,
'connection_errors_internal': 0,
'connection_errors_max_connections': 0,
'connection_errors_peer_address': 0,
'connection_errors_select': 0,
'connection_errors_tcpwrap': 0,
'connections': 148,
'created_tmp_disk_tables': 0,
'created_tmp_files': 5,
'created_tmp_tables': 0,
'delayed_errors': 0,
'delayed_insert_threads': 0,
'delayed_writes': 0,
'flush_commands': 1,
'handler_commit': 0,
'handler_delete': 0,
'handler_discover': 0,
'handler_external_lock': 0,
'handler_mrr_init': 0,
'handler_prepare': 0,
'handler_read_first': 0,
'handler_read_key': 0,
'handler_read_last': 0,
'handler_read_next': 0,
'handler_read_prev': 0,
'handler_read_rnd': 0,
'handler_read_rnd_next': 0,
'handler_rollback': 0,
'handler_savepoint': 0,
'handler_savepoint_rollback': 0,
'handler_update': 0,
'handler_write': 0,
'innodb_available_undo_logs': 128,
'innodb_buffer_pool_bytes_data': 7454720,
'innodb_buffer_pool_bytes_dirty': 0,
'innodb_buffer_pool_dump_status': 'not started',
'innodb_buffer_pool_load_status': 'not started',
'innodb_buffer_pool_pages_data': 455,
'innodb_buffer_pool_pages_dirty': 0,
'innodb_buffer_pool_pages_flushed': 1,
'innodb_buffer_pool_pages_free': 7736,
'innodb_buffer_pool_pages_misc': 0,
'innodb_buffer_pool_pages_total': 8191,
'innodb_buffer_pool_read_ahead': 0,
'innodb_buffer_pool_read_ahead_evicted': 0,
'innodb_buffer_pool_read_ahead_rnd': 0,
'innodb_buffer_pool_read_requests': 8252,
'innodb_buffer_pool_reads': 456,
'innodb_buffer_pool_wait_free': 0,
'innodb_buffer_pool_write_requests': 1,
'innodb_data_fsyncs': 5,
'innodb_data_pending_fsyncs': 0,
'innodb_data_pending_reads': 0,
'innodb_data_pending_writes': 0,
'innodb_data_read': 7540736,
'innodb_data_reads': 477,
'innodb_data_writes': 5,
'innodb_data_written': 34304,
'innodb_dblwr_pages_written': 1,
'innodb_dblwr_writes': 1,
'innodb_have_atomic_builtins': True,
'innodb_log_waits': 0,
'innodb_log_write_requests': 0,
'innodb_log_writes': 1,
'innodb_num_open_files': 14,
'innodb_os_log_fsyncs': 3,
'innodb_os_log_pending_fsyncs': 0,
'innodb_os_log_pending_writes': 0,
'innodb_os_log_written': 512,
'innodb_page_size': 16384,
'innodb_pages_created': 0,
'innodb_pages_read': 455,
'innodb_pages_written': 1,
'innodb_row_lock_current_waits': 0,
'innodb_row_lock_time': 0,
'innodb_row_lock_time_avg': 0,
'innodb_row_lock_time_max': 0,
'innodb_row_lock_waits': 0,
'innodb_rows_deleted': 0,
'innodb_rows_inserted': 0,
'innodb_rows_read': 0,
'innodb_rows_updated': 0,
'innodb_truncated_status_writes': 0,
'key_blocks_not_flushed': 0,
'key_blocks_unused': 6698,
'key_blocks_used': 0,
'key_read_requests': 0,
'key_reads': 0,
'key_write_requests': 0,
'key_writes': 0,
'last_query_cost': 0.0,
'last_query_partial_plans': 0,
'max_used_connections': 2,
'not_flushed_delayed_rows': 0,
'open_files': 18,
'open_streams': 0,
'open_table_definitions': 68,
'open_tables': 61,
'opened_files': 118,
'opened_table_definitions': 0,
'opened_tables': 0,
'performance_schema_accounts_lost': 0,
'performance_schema_cond_classes_lost': 0,
'performance_schema_cond_instances_lost': 0,
'performance_schema_digest_lost': 0,
'performance_schema_file_classes_lost': 0,
'performance_schema_file_handles_lost': 0,
'performance_schema_file_instances_lost': 0,
'performance_schema_hosts_lost': 0,
'performance_schema_locker_lost': 0,
'performance_schema_mutex_classes_lost': 0,
'performance_schema_mutex_instances_lost': 0,
'performance_schema_rwlock_classes_lost': 0,
'performance_schema_rwlock_instances_lost': 0,
'performance_schema_session_connect_attrs_lost': 0,
'performance_schema_socket_classes_lost': 0,
'performance_schema_socket_instances_lost': 0,
'performance_schema_stage_classes_lost': 0,
'performance_schema_statement_classes_lost': 0,
'performance_schema_table_handles_lost': 0,
'performance_schema_table_instances_lost': 0,
'performance_schema_thread_classes_lost': 0,
'performance_schema_thread_instances_lost': 0,
'performance_schema_users_lost': 0,
'prepared_stmt_count': 0,
'qcache_free_blocks': 1,
'qcache_free_memory': 1031336,
'qcache_hits': 0,
'qcache_inserts': 0,
'qcache_lowmem_prunes': 0,
'qcache_not_cached': 136,
'qcache_queries_in_cache': 0,
'qcache_total_blocks': 1,
'queries': 410,
'questions': 2,
'rsa_public_key': None,
'select_full_join': 0,
'select_full_range_join': 0,
'select_range': 0,
'select_range_check': 0,
'select_scan': 0,
'slave_heartbeat_period': None,
'slave_last_heartbeat': None,
'slave_open_temp_tables': 0,
'slave_received_heartbeats': None,
'slave_retried_transactions': None,
'slave_running': False,
'slow_launch_threads': 0,
'slow_queries': 0,
'sort_merge_passes': 0,
'sort_range': 0,
'sort_rows': 0,
'sort_scan': 0,
'ssl_accept_renegotiates': 0,
'ssl_accepts': 0,
'ssl_callback_cache_hits': 0,
'ssl_cipher': None,
'ssl_cipher_list': None,
'ssl_client_connects': 0,
'ssl_connect_renegotiates': 0,
'ssl_ctx_verify_depth': 0,
'ssl_ctx_verify_mode': 0,
'ssl_default_timeout': 0,
'ssl_finished_accepts': 0,
'ssl_finished_connects': 0,
'ssl_server_not_after': None,
'ssl_server_not_before': None,
'ssl_session_cache_hits': 0,
'ssl_session_cache_misses': 0,
'ssl_session_cache_mode': 'NONE',
'ssl_session_cache_overflows': 0,
'ssl_session_cache_size': 0,
'ssl_session_cache_timeouts': 0,
'ssl_sessions_reused': 0,
'ssl_used_session_cache_entries': 0,
'ssl_verify_depth': 0,
'ssl_verify_mode': 0,
'ssl_version': None,
'table_locks_immediate': 74,
'table_locks_waited': 0,
'table_open_cache_hits': 0,
'table_open_cache_misses': 0,
'table_open_cache_overflows': 0,
'tc_log_max_pages_used': 0,
'tc_log_page_size': 0,
'tc_log_page_waits': 0,
'threads_cached': 0,
'threads_connected': 2,
'threads_created': 2,
'threads_running': 1,
'uptime': 2616535,
'uptime_since_flush_status': 2616535}
def mock_collect_master_status(self):
return {
'mysql-bin.000024': {
'binlog_do_db': None,
'binlog_ignore_db': None,
'file': 'mysql-bin.000024',
'position': 64795006}}
def mock_collect_slave_status(self):
return {
'connect_retry': 60,
'exec_master_log_pos': 64707836,
'last_errno': 0,
'last_error': None,
'last_io_errno': 0,
'last_io_error': None,
'last_sql_errno': 0,
'last_sql_error': None,
'master_host': 'i-00000000.example.com',
'master_log_file': 'mysql-bin.000024',
'master_port': 3306,
'master_server_id': 1,
'master_ssl_allowed': False,
'master_ssl_ca_file': None,
'master_ssl_ca_path': None,
'master_ssl_cert': None,
'master_ssl_cipher': None,
'master_ssl_key': None,
'master_ssl_verify_server_cert': False,
'master_user': 'repl',
'read_master_log_pos': 64707836,
'relay_log_file': 'mysqld-relay-bin.000064',
'relay_log_pos': 64659963,
'relay_log_space': 64660762,
'relay_master_log_file': 'mysql-bin.000024',
'replicate_do_db': None,
'replicate_do_table': None,
'replicate_ignore_db': None,
'replicate_ignore_server_ids': None,
'replicate_ignore_table': None,
'replicate_wild_do_table': None,
'replicate_wild_ignore_table': None,
'seconds_behind_master': 0,
'skip_counter': 0,
'slave_io_running': True,
'slave_io_state': 'Waiting for master to send event',
'slave_sql_running': True,
'until_condition': 'None',
'until_log_file': None,
'until_log_pos': 0}
def mock_collect_slave_hosts(self):
return {
'2': {
'host': 'i-00000000',
'master_id': 1,
'port': 3306,
'server_id': 2}}
def mock_collect_variables(self):
return {
'auto_increment_increment': 1,
'auto_increment_offset': 1,
'autocommit': True,
'automatic_sp_privileges': True,
'back_log': 80,
'basedir': '/usr/local/Cellar/mysql/5.6.23',
'big_tables': False,
'bind_address': '127.0.0.1',
'binlog_cache_size': 32768,
'binlog_checksum': 'CRC32',
'binlog_direct_non_transactional_updates': False,
'binlog_error_action': 'IGNORE_ERROR',
'binlog_format': 'STATEMENT',
'binlog_gtid_simple_recovery': False,
'binlog_max_flush_queue_time': 0,
'binlog_order_commits': True,
'binlog_row_image': 'FULL',
'binlog_rows_query_log_events': False,
'binlog_stmt_cache_size': 32768,
'binlogging_impossible_mode': 'IGNORE_ERROR',
'block_encryption_mode': 'aes-128-ecb',
'bulk_insert_buffer_size': 8388608,
'character_set_client': 'utf8',
'character_set_connection': 'utf8',
'character_set_database': 'utf8',
'character_set_filesystem': 'binary',
'character_set_results': 'utf8',
'character_set_server': 'utf8',
'character_set_system': 'utf8',
'character_sets_dir': '/usr/local/Cellar/mysql/5.6.23/share/mysql/charsets/',
'collation_connection': 'utf8_general_ci',
'collation_database': 'utf8_general_ci',
'collation_server': 'utf8_general_ci',
'completion_type': 'NO_CHAIN',
'concurrent_insert': 'AUTO',
'connect_timeout': 10,
'core_file': False,
'datadir': '/usr/local/var/mysql/',
'date_format': '%Y-%m-%d',
'datetime_format': '%Y-%m-%d %H:%i:%s',
'default_storage_engine': 'InnoDB',
'default_tmp_storage_engine': 'InnoDB',
'default_week_format': 0,
'delay_key_write': True,
'delayed_insert_limit': 100,
'delayed_insert_timeout': 300,
'delayed_queue_size': 1000,
'disconnect_on_expired_password': True,
'div_precision_increment': 4,
'end_markers_in_json': False,
'enforce_gtid_consistency': False,
'eq_range_index_dive_limit': 10,
'error_count': 0,
'event_scheduler': False,
'expire_logs_days': 0,
'explicit_defaults_for_timestamp': False,
'external_user': None,
'flush': False,
'flush_time': 0,
'foreign_key_checks': True,
'ft_boolean_syntax': '+ -><()~*:""&|',
'ft_max_word_len': 84,
'ft_min_word_len': 4,
'ft_query_expansion_limit': 20,
'ft_stopword_file': '(built-in)',
'general_log': False,
'general_log_file': '/usr/local/var/mysql/tiredpixel.log',
'group_concat_max_len': 1024,
'gtid_executed': None,
'gtid_mode': False,
'gtid_next': 'AUTOMATIC',
'gtid_owned': None,
'gtid_purged': None,
'have_compress': True,
'have_crypt': True,
'have_dynamic_loading': True,
'have_geometry': True,
'have_openssl': 'DISABLED',
'have_profiling': True,
'have_query_cache': True,
'have_rtree_keys': True,
'have_ssl': 'DISABLED',
'have_symlink': True,
'host_cache_size': 279,
'hostname': 'tiredpixel.home',
'identity': 0,
'ignore_builtin_innodb': False,
'ignore_db_dirs': None,
'init_connect': None,
'init_file': None,
'init_slave': None,
'innodb_adaptive_flushing': True,
'innodb_adaptive_flushing_lwm': 10,
'innodb_adaptive_hash_index': True,
'innodb_adaptive_max_sleep_delay': 150000,
'innodb_additional_mem_pool_size': 8388608,
'innodb_api_bk_commit_interval': 5,
'innodb_api_disable_rowlock': False,
'innodb_api_enable_binlog': False,
'innodb_api_enable_mdl': False,
'innodb_api_trx_level': 0,
'innodb_autoextend_increment': 64,
'innodb_autoinc_lock_mode': 1,
'innodb_buffer_pool_dump_at_shutdown': False,
'innodb_buffer_pool_dump_now': False,
'innodb_buffer_pool_filename': 'ib_buffer_pool',
'innodb_buffer_pool_instances': 8,
'innodb_buffer_pool_load_abort': False,
'innodb_buffer_pool_load_at_startup': False,
'innodb_buffer_pool_load_now': False,
'innodb_buffer_pool_size': 134217728,
'innodb_change_buffer_max_size': 25,
'innodb_change_buffering': 'all',
'innodb_checksum_algorithm': 'innodb',
'innodb_checksums': True,
'innodb_cmp_per_index_enabled': False,
'innodb_commit_concurrency': 0,
'innodb_compression_failure_threshold_pct': 5,
'innodb_compression_level': 6,
'innodb_compression_pad_pct_max': 50,
'innodb_concurrency_tickets': 5000,
'innodb_data_file_path': 'ibdata1:12M:autoextend',
'innodb_data_home_dir': None,
'innodb_disable_sort_file_cache': False,
'innodb_doublewrite': True,
'innodb_fast_shutdown': 1,
'innodb_file_format': 'Antelope',
'innodb_file_format_check': True,
'innodb_file_format_max': 'Antelope',
'innodb_file_per_table': True,
'innodb_flush_log_at_timeout': 1,
'innodb_flush_log_at_trx_commit': 1,
'innodb_flush_method': None,
'innodb_flush_neighbors': 1,
'innodb_flushing_avg_loops': 30,
'innodb_force_load_corrupted': False,
'innodb_force_recovery': 0,
'innodb_ft_aux_table': None,
'innodb_ft_cache_size': 8000000,
'innodb_ft_enable_diag_print': False,
'innodb_ft_enable_stopword': True,
'innodb_ft_max_token_size': 84,
'innodb_ft_min_token_size': 3,
'innodb_ft_num_word_optimize': 2000,
'innodb_ft_result_cache_limit': 2000000000,
'innodb_ft_server_stopword_table': None,
'innodb_ft_sort_pll_degree': 2,
'innodb_ft_total_cache_size': 640000000,
'innodb_ft_user_stopword_table': None,
'innodb_io_capacity': 200,
'innodb_io_capacity_max': 2000,
'innodb_large_prefix': False,
'innodb_lock_wait_timeout': 50,
'innodb_locks_unsafe_for_binlog': False,
'innodb_log_buffer_size': 8388608,
'innodb_log_compressed_pages': True,
'innodb_log_file_size': 50331648,
'innodb_log_files_in_group': 2,
'innodb_log_group_home_dir': './',
'innodb_lru_scan_depth': 1024,
'innodb_max_dirty_pages_pct': 75,
'innodb_max_dirty_pages_pct_lwm': 0,
'innodb_max_purge_lag': 0,
'innodb_max_purge_lag_delay': 0,
'innodb_mirrored_log_groups': 1,
'innodb_monitor_disable': None,
'innodb_monitor_enable': None,
'innodb_monitor_reset': None,
'innodb_monitor_reset_all': None,
'innodb_old_blocks_pct': 37,
'innodb_old_blocks_time': 1000,
'innodb_online_alter_log_max_size': 134217728,
'innodb_open_files': 2000,
'innodb_optimize_fulltext_only': False,
'innodb_page_size': 16384,
'innodb_print_all_deadlocks': False,
'innodb_purge_batch_size': 300,
'innodb_purge_threads': 1,
'innodb_random_read_ahead': False,
'innodb_read_ahead_threshold': 56,
'innodb_read_io_threads': 4,
'innodb_read_only': False,
'innodb_replication_delay': 0,
'innodb_rollback_on_timeout': False,
'innodb_rollback_segments': 128,
'innodb_sort_buffer_size': 1048576,
'innodb_spin_wait_delay': 6,
'innodb_stats_auto_recalc': True,
'innodb_stats_method': 'nulls_equal',
'innodb_stats_on_metadata': False,
'innodb_stats_persistent': True,
'innodb_stats_persistent_sample_pages': 20,
'innodb_stats_sample_pages': 8,
'innodb_stats_transient_sample_pages': 8,
'innodb_status_output': False,
'innodb_status_output_locks': False,
'innodb_strict_mode': False,
'innodb_support_xa': True,
'innodb_sync_array_size': 1,
'innodb_sync_spin_loops': 30,
'innodb_table_locks': True,
'innodb_thread_concurrency': 0,
'innodb_thread_sleep_delay': 10000,
'innodb_undo_directory': '.',
'innodb_undo_logs': 128,
'innodb_undo_tablespaces': 0,
'innodb_use_native_aio': False,
'innodb_use_sys_malloc': True,
'innodb_version': '5.6.23',
'innodb_write_io_threads': 4,
'insert_id': 0,
'interactive_timeout': 28800,
'join_buffer_size': 262144,
'keep_files_on_create': False,
'key_buffer_size': 8388608,
'key_cache_age_threshold': 300,
'key_cache_block_size': 1024,
'key_cache_division_limit': 100,
'large_files_support': True,
'large_page_size': 0,
'large_pages': False,
'last_insert_id': 0,
'lc_messages': 'en_US',
'lc_messages_dir': '/usr/local/Cellar/mysql/5.6.23/share/mysql/',
'lc_time_names': 'en_US',
'license': 'GPL',
'local_infile': True,
'lock_wait_timeout': 31536000,
'locked_in_memory': False,
'log_bin': False,
'log_bin_basename': None,
'log_bin_index': None,
'log_bin_trust_function_creators': False,
'log_bin_use_v1_row_events': False,
'log_error': '/usr/local/var/mysql/tiredpixel.home.err',
'log_output': 'FILE',
'log_queries_not_using_indexes': False,
'log_slave_updates': False,
'log_slow_admin_statements': False,
'log_slow_slave_statements': False,
'log_throttle_queries_not_using_indexes': 0,
'log_warnings': 1,
'long_query_time': 10.000000,
'low_priority_updates': False,
'lower_case_file_system': True,
'lower_case_table_names': 2,
'master_info_repository': 'FILE',
'master_verify_checksum': False,
'max_allowed_packet': 4194304,
'max_binlog_cache_size': 18446744073709547520,
'max_binlog_size': 1073741824,
'max_binlog_stmt_cache_size': 18446744073709547520,
'max_connect_errors': 100,
'max_connections': 151,
'max_delayed_threads': 20,
'max_error_count': 64,
'max_heap_table_size': 16777216,
'max_insert_delayed_threads': 20,
'max_join_size': 18446744073709551615,
'max_length_for_sort_data': 1024,
'max_prepared_stmt_count': 16382,
'max_relay_log_size': 0,
'max_seeks_for_key': 18446744073709551615,
'max_sort_length': 1024,
'max_sp_recursion_depth': 0,
'max_tmp_tables': 32,
'max_user_connections': 0,
'max_write_lock_count': 18446744073709551615,
'metadata_locks_cache_size': 1024,
'metadata_locks_hash_instances': 8,
'min_examined_row_limit': 0,
'multi_range_count': 256,
'myisam_data_pointer_size': 6,
'myisam_max_sort_file_size': 9223372036853727232,
'myisam_mmap_size': 18446744073709551615,
'myisam_recover_options': False,
'myisam_repair_threads': 1,
'myisam_sort_buffer_size': 8388608,
'myisam_stats_method': 'nulls_unequal',
'myisam_use_mmap': False,
'net_buffer_length': 16384,
'net_read_timeout': 30,
'net_retry_count': 10,
'net_write_timeout': 60,
'new': False,
'old': False,
'old_alter_table': False,
'old_passwords': 0,
'open_files_limit': 5000,
'optimizer_prune_level': 1,
'optimizer_search_depth': 62,
'optimizer_switch': 'index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on',
'optimizer_trace': 'enabled=off,one_line=off',
'optimizer_trace_features': 'greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on',
'optimizer_trace_limit': 1,
'optimizer_trace_max_mem_size': 16384,
'optimizer_trace_offset': -1,
'performance_schema': True,
'performance_schema_accounts_size': 100,
'performance_schema_digests_size': 10000,
'performance_schema_events_stages_history_long_size': 10000,
'performance_schema_events_stages_history_size': 10,
'performance_schema_events_statements_history_long_size': 10000,
'performance_schema_events_statements_history_size': 10,
'performance_schema_events_waits_history_long_size': 10000,
'performance_schema_events_waits_history_size': 10,
'performance_schema_hosts_size': 100,
'performance_schema_max_cond_classes': 80,
'performance_schema_max_cond_instances': 3504,
'performance_schema_max_file_classes': 50,
'performance_schema_max_file_handles': 32768,
'performance_schema_max_file_instances': 7693,
'performance_schema_max_mutex_classes': 200,
'performance_schema_max_mutex_instances': 15906,
'performance_schema_max_rwlock_classes': 40,
'performance_schema_max_rwlock_instances': 9102,
'performance_schema_max_socket_classes': 10,
'performance_schema_max_socket_instances': 322,
'performance_schema_max_stage_classes': 150,
'performance_schema_max_statement_classes': 168,
'performance_schema_max_table_handles': 4000,
'performance_schema_max_table_instances': 12500,
'performance_schema_max_thread_classes': 50,
'performance_schema_max_thread_instances': 402,
'performance_schema_session_connect_attrs_size': 512,
'performance_schema_setup_actors_size': 100,
'performance_schema_setup_objects_size': 100,
'performance_schema_users_size': 100,
'pid_file': '/usr/local/var/mysql/tiredpixel.home.pid',
'plugin_dir': '/usr/local/Cellar/mysql/5.6.23/lib/plugin/',
'port': 3306,
'preload_buffer_size': 32768,
'profiling': False,
'profiling_history_size': 15,
'protocol_version': 10,
'proxy_user': None,
'pseudo_slave_mode': False,
'pseudo_thread_id': 80,
'query_alloc_block_size': 8192,
'query_cache_limit': 1048576,
'query_cache_min_res_unit': 4096,
'query_cache_size': 1048576,
'query_cache_type': False,
'query_cache_wlock_invalidate': False,
'query_prealloc_size': 8192,
'rand_seed1': 0,
'rand_seed2': 0,
'range_alloc_block_size': 4096,
'read_buffer_size': 131072,
'read_only': False,
'read_rnd_buffer_size': 262144,
'relay_log': None,
'relay_log_basename': None,
'relay_log_index': None,
'relay_log_info_file': 'relay-log.info',
'relay_log_info_repository': 'FILE',
'relay_log_purge': True,
'relay_log_recovery': False,
'relay_log_space_limit': 0,
'report_host': None,
'report_password': None,
'report_port': 3306,
'report_user': None,
'rpl_stop_slave_timeout': 31536000,
'secure_auth': True,
'secure_file_priv': None,
'server_id': 0,
'server_id_bits': 32,
'server_uuid': '5d2f94a0-4658-11e4-92e7-0a41270292d6',
'sha256_password_private_key_path': 'private_key.pem',
'sha256_password_public_key_path': 'public_key.pem',
'simplified_binlog_gtid_recovery': False,
'skip_external_locking': True,
'skip_name_resolve': False,
'skip_networking': False,
'skip_show_database': False,
'slave_allow_batching': False,
'slave_checkpoint_group': 512,
'slave_checkpoint_period': 300,
'slave_compressed_protocol': False,
'slave_exec_mode': 'STRICT',
'slave_load_tmpdir': '/var/folders/wl/9pmj8jnd33d8pgrn9gd1gl5r0000gn/T/',
'slave_max_allowed_packet': 1073741824,
'slave_net_timeout': 3600,
'slave_parallel_workers': 0,
'slave_pending_jobs_size_max': 16777216,
'slave_rows_search_algorithms': 'TABLE_SCAN,INDEX_SCAN',
'slave_skip_errors': False,
'slave_sql_verify_checksum': True,
'slave_transaction_retries': 10,
'slave_type_conversions': None,
'slow_launch_time': 2,
'slow_query_log': False,
'slow_query_log_file': '/usr/local/var/mysql/tiredpixel-slow.log',
'socket': '/tmp/mysql.sock',
'sort_buffer_size': 262144,
'sql_auto_is_null': False,
'sql_big_selects': True,
'sql_buffer_result': False,
'sql_log_bin': True,
'sql_log_off': False,
'sql_mode': 'NO_ENGINE_SUBSTITUTION',
'sql_notes': True,
'sql_quote_show_create': True,
'sql_safe_updates': False,
'sql_select_limit': 18446744073709551615,
'sql_slave_skip_counter': 0,
'sql_warnings': False,
'ssl_ca': None,
'ssl_capath': None,
'ssl_cert': None,
'ssl_cipher': None,
'ssl_crl': None,
'ssl_crlpath': None,
'ssl_key': None,
'storage_engine': 'InnoDB',
'stored_program_cache': 256,
'sync_binlog': 0,
'sync_frm': True,
'sync_master_info': 10000,
'sync_relay_log': 10000,
'sync_relay_log_info': 10000,
'system_time_zone': 'BST',
'table_definition_cache': 1400,
'table_open_cache': 2000,
'table_open_cache_instances': 1,
'thread_cache_size': 9,
'thread_concurrency': 10,
'thread_handling': 'one-thread-per-connection',
'thread_stack': 262144,
'time_format': '%H:%i:%s',
'time_zone': 'SYSTEM',
'timed_mutexes': False,
'timestamp': 1430653686.849428,
'tmp_table_size': 16777216,
'tmpdir': '/var/folders/wl/9pmj8jnd33d8pgrn9gd1gl5r0000gn/T/',
'transaction_alloc_block_size': 8192,
'transaction_allow_batching': False,
'transaction_prealloc_size': 4096,
'tx_isolation': 'REPEATABLE-READ',
'tx_read_only': False,
'unique_checks': True,
'updatable_views_with_limit': True,
'version': '5.6.23',
'version_comment': 'Homebrew',
'version_compile_machine': 'x86_64',
'version_compile_os': 'osx10.10',
'wait_timeout': 28800,
'warning_count': 0}
def test_command_tool(self):
assert (Mysql.command_tool(3306, {}, 'SHOW VARIABLES') ==
['mysql', '--host', '127.0.0.1', '--port', 3306,
'--execute', 'SHOW VARIABLES',
'--batch', '--raw', '--column-names'])
def test_command_tool_user(self):
assert (Mysql.command_tool(3306, { 'user': "USER" }, 'SHOW VARIABLES') ==
['mysql', '--host', '127.0.0.1', '--port', 3306,
'--execute', 'SHOW VARIABLES',
'--batch', '--raw', '--column-names',
'--user=USER'])
def test_command_tool_password(self):
assert (Mysql.command_tool(3306, { 'password': 'PASS"WORD' }, 'SHOW VARIABLES') ==
['mysql', '--host', '127.0.0.1', '--port', 3306,
'--execute', 'SHOW VARIABLES',
'--batch', '--raw', '--column-names',
'--password=PASS"WORD'])
def test_enabled(self):
mysql = Mysql({}, { 3306: {} })
assert mysql.enabled() == True
def test_enabled_no_ports(self):
mysql = Mysql({}, {})
assert mysql.enabled() == False
def test_collect(self, monkeypatch):
monkeypatch.setattr(BasePortCommand, 'exec_command',
self.mock_exec_command)
mysql = Mysql({}, { 3306: {} })
metrics = mysql.collect()
metrics_t0 = {
'status': self.mock_collect_status(),
'master_status': self.mock_collect_master_status(),
'slave_status': self.mock_collect_slave_status(),
'slave_hosts': self.mock_collect_slave_hosts(),
'variables': self.mock_collect_variables()}
metrics_t = {
'status': self.mock_collect_status()}
for setting, v in Mysql.COLLECT_SETTING_DEFAULTS.items():
if v:
metrics_t[setting] = metrics_t0[setting]
assert metrics[3306] == metrics_t
for setting, v in Mysql.COLLECT_SETTING_DEFAULTS.items():
mysql2 = Mysql({}, { 3306: { 'collect': { setting: v } } })
metrics2 = mysql2.collect()
metrics_t2 = metrics_t.copy()
if v:
metrics_t2[setting] = metrics_t0[setting]
assert metrics2[3306] == metrics_t2
|
py
|
1a5a64e7962beabc33b69ec182fda8cf03a011de
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that implements an event loop based on twisted
( https://twistedmatrix.com ).
"""
from twisted.internet import reactor, protocol
from threading import Event, Thread, Lock
from functools import partial
import logging
import weakref
import atexit
from cassandra import OperationTimedOut
from cassandra.connection import Connection, ConnectionShutdown
from cassandra.protocol import RegisterMessage
log = logging.getLogger(__name__)
def _cleanup(cleanup_weakref):
try:
cleanup_weakref()._cleanup()
except ReferenceError:
return
class TwistedConnectionProtocol(protocol.Protocol):
"""
Twisted Protocol class for handling data received and connection
made events.
"""
def dataReceived(self, data):
"""
Callback function that is called when data has been received
on the connection.
Reaches back to the Connection object and queues the data for
processing.
"""
self.transport.connector.factory.conn._iobuf.write(data)
self.transport.connector.factory.conn.handle_read()
def connectionMade(self):
"""
Callback function that is called when a connection has succeeded.
Reaches back to the Connection object and confirms that the connection
is ready.
"""
self.transport.connector.factory.conn.client_connection_made()
def connectionLost(self, reason):
# reason is a Failure instance
self.transport.connector.factory.conn.defunct(reason.value)
class TwistedConnectionClientFactory(protocol.ClientFactory):
def __init__(self, connection):
# ClientFactory does not define __init__() in parent classes
# and does not inherit from object.
self.conn = connection
def buildProtocol(self, addr):
"""
Twisted function that defines which kind of protocol to use
in the ClientFactory.
"""
return TwistedConnectionProtocol()
def clientConnectionFailed(self, connector, reason):
"""
Overridden twisted callback which is called when the
connection attempt fails.
"""
log.debug("Connect failed: %s", reason)
self.conn.defunct(reason.value)
def clientConnectionLost(self, connector, reason):
"""
Overridden twisted callback which is called when the
connection goes away (cleanly or otherwise).
It should be safe to call defunct() here instead of just close, because
we can assume that if the connection was closed cleanly, there are no
callbacks to error out. If this assumption turns out to be false, we
can call close() instead of defunct() when "reason" is an appropriate
type.
"""
log.debug("Connect lost: %s", reason)
self.conn.defunct(reason.value)
class TwistedLoop(object):
_lock = None
_thread = None
def __init__(self):
self._lock = Lock()
def maybe_start(self):
with self._lock:
if not reactor.running:
self._thread = Thread(target=reactor.run,
name="cassandra_driver_event_loop",
kwargs={'installSignalHandlers': False})
self._thread.daemon = True
self._thread.start()
atexit.register(partial(_cleanup, weakref.ref(self)))
def _cleanup(self):
if self._thread:
reactor.callFromThread(reactor.stop)
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning("Event loop thread could not be joined, so "
"shutdown may not be clean. Please call "
"Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
class TwistedConnection(Connection):
"""
An implementation of :class:`.Connection` that utilizes the
Twisted event loop.
"""
_loop = None
_total_reqd_bytes = 0
@classmethod
def initialize_reactor(cls):
if not cls._loop:
cls._loop = TwistedLoop()
@classmethod
def factory(cls, *args, **kwargs):
"""
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
"""
timeout = kwargs.pop('timeout', 5.0)
conn = cls(*args, **kwargs)
conn.connected_event.wait(timeout)
if conn.last_error:
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection")
else:
return conn
def __init__(self, *args, **kwargs):
"""
Initialization method.
Note that we can't call reactor methods directly here because
it's not thread-safe, so we schedule the reactor/connection
stuff to be run from the event loop thread when it gets the
chance.
"""
Connection.__init__(self, *args, **kwargs)
self.connected_event = Event()
self.is_closed = True
self.connector = None
self._callbacks = {}
reactor.callFromThread(self.add_connection)
self._loop.maybe_start()
def add_connection(self):
"""
Convenience function to connect and store the resulting
connector.
"""
self.connector = reactor.connectTCP(
host=self.host, port=self.port,
factory=TwistedConnectionClientFactory(self))
def client_connection_made(self):
"""
Called by twisted protocol when a connection attempt has
succeeded.
"""
with self.lock:
self.is_closed = False
self._send_options_message()
def close(self):
"""
Disconnect and error-out all callbacks.
"""
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self.connector.disconnect()
log.debug("Closed socket to %s", self.host)
if not self.is_defunct:
self.error_all_callbacks(
ConnectionShutdown("Connection to %s was closed" % self.host))
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_read(self):
"""
Process the incoming data buffer.
"""
self.process_io_buffer()
def push(self, data):
"""
This function is called when outgoing data should be queued
for sending.
Note that we can't call transport.write() directly because
it is not thread-safe, so we schedule it to run from within
the event loop when it gets the chance.
"""
reactor.callFromThread(self.connector.transport.write, data)
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
"""
Register multiple callback/event type pairs, expressed as a dict.
"""
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()),
timeout=register_timeout)
|
py
|
1a5a660cf02d2ac5600666ecfec3cc15b660d577
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
from .rule import Rule
|
py
|
1a5a66be84b49782f36eb348fb71558857d95bfd
|
from model.new_user_data import N_u_d
import random
import string
import os.path
import json
import getopt
import sys
import jsonpickle
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*20
return prefix + "".join([random.choice(symbols) for i in range (random.randrange(maxlen))])
testdata = [
N_u_d(namef=random_string("namef", 10), namem=random_string("namem", 10), namel=random_string("namel", 10),
nick=random_string("nick", 6), title=random_string("title", 9), firm=random_string("firm", 12),
addr=random_string("address", 20), phone_h=random_string("phoneh", 7),
phone_m=random_string("phone_m", 7), phone_work=random_string("phone_w", 7),
phone_fax=random_string("phone_fax", 7), email_1=random_string("email1", 7),
email_2=random_string("email_2", 10), email_3=random_string("email_3", 10),
homep=random_string("home_page", 12), day_1 = "//div[@id='content']/form/select[1]//option[3]",
month_1 = "//div[@id='content']/form/select[2]//option[2]", year_1 = random_string("year", 6),
day_2 = "//div[@id='content']/form/select[3]//option[3]",
month_2 = "//div[@id='content']/form/select[4]//option[2]",
year_2 = random_string("year", 6), address_2=random_string("address", 15),
phone_h2=random_string("phone_h2", 6), notes=random_string("notes", 20))
for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options('json', indent=2)
out.write(jsonpickle.encode(testdata))
|
py
|
1a5a68f69bb0ec97262c2167ecba0074a7f64dc9
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import datetime
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException
plugin_name = 'pushover'
log = logging.getLogger(plugin_name)
PUSHOVER_URL = 'https://api.pushover.net/1/messages.json'
requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('pushover.net', '5 seconds'))
class PushoverNotifier(object):
"""
Example::
notify:
entries:
via:
- pushover:
user_key: <USER_KEY> (can also be a list of userkeys)
token: <TOKEN>
[device: <DEVICE_STRING>]
[priority: <PRIORITY>]
[url: <URL>]
[url_title: <URL_TITLE>]
[sound: <SOUND>]
[retry]: <RETRY>]
[expire]: <EXPIRE>]
[callback]: <CALLBACK>]
[html]: <HTML>]
"""
schema = {
'type': 'object',
'properties': {
'user_key': one_or_more({'type': 'string'}),
'api_key': {'type': 'string', 'default': 'aPwSHwkLcNaavShxktBpgJH4bRWc3m'},
'device': one_or_more({'type': 'string'}),
'priority': {'oneOf': [
{'type': 'number', 'minimum': -2, 'maximum': 2},
{'type': 'string'}]},
'url': {'type': 'string'},
'url_title': {'type': 'string'},
'sound': {'type': 'string'},
'retry': {'type': 'integer', 'minimum': 30},
'expire': {'type': 'integer', 'maximum': 86400},
'callback': {'type': 'string'},
'html': {'type': 'boolean'}
},
'required': ['user_key'],
'additionalProperties': False
}
def notify(self, title, message, config):
"""
Sends a Pushover notification
:param str title: the message's title
:param str message: the message to send
:param dict config: The pushover config
"""
notification = {'token': config.get('api_key'), 'message': message, 'title': title,
'device': config.get('device'), 'priority': config.get('priority'), 'url': config.get('url'),
'url_title': config.get('url_title'), 'sound': config.get('sound'),
'retry': config.get('retry'), 'expire': config.get('expire'),
'callback': config.get('callback')}
# HTML parsing mode
if config.get('html'):
notification['html'] = 1
# Support multiple devices
if isinstance(notification['device'], list):
notification['device'] = ','.join(notification['device'])
# Special case, verify certain fields exists if priority is 2
priority = config.get('priority')
expire = config.get('expire')
retry = config.get('retry')
if priority == 2 and not all([expire, retry]):
log.warning('Priority set to 2 but fields "expire" and "retry" are not both present.Lowering priority to 1')
notification['priority'] = 1
if not isinstance(config['user_key'], list):
config['user_key'] = [config['user_key']]
for user in config['user_key']:
notification['user'] = user
try:
response = requests.post(PUSHOVER_URL, data=notification)
except RequestException as e:
if e.response is not None:
if e.response.status_code == 429:
reset_time = datetime.datetime.fromtimestamp(
int(e.response.headers['X-Limit-App-Reset'])).strftime('%Y-%m-%d %H:%M:%S')
error_message = 'Monthly pushover message limit reached. Next reset: %s' % reset_time
else:
error_message = e.response.json()['errors'][0]
else:
error_message = str(e)
raise PluginWarning(error_message)
reset_time = datetime.datetime.fromtimestamp(
int(response.headers['X-Limit-App-Reset'])).strftime('%Y-%m-%d %H:%M:%S')
remaining = response.headers['X-Limit-App-Remaining']
log.debug('Pushover notification sent. Notifications remaining until next reset: %s. '
'Next reset at: %s', remaining, reset_time)
@event('plugin.register')
def register_plugin():
plugin.register(PushoverNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
|
py
|
1a5a69728501c3e33bc3ef6c150b4e04f75e00b2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AndroidMAMPolicy(Resource):
"""Android Policy entity for Intune MAM.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param tags: Resource Tags
:type tags: dict
:param location: Resource Location
:type location: str
:param friendly_name:
:type friendly_name: str
:param description:
:type description: str
:param app_sharing_from_level: Possible values include: 'none',
'policyManagedApps', 'allApps'. Default value: "none" .
:type app_sharing_from_level: str
:param app_sharing_to_level: Possible values include: 'none',
'policyManagedApps', 'allApps'. Default value: "none" .
:type app_sharing_to_level: str
:param authentication: Possible values include: 'required',
'notRequired'. Default value: "required" .
:type authentication: str
:param clipboard_sharing_level: Possible values include: 'blocked',
'policyManagedApps', 'policyManagedAppsWithPasteIn', 'allApps'. Default
value: "blocked" .
:type clipboard_sharing_level: str
:param data_backup: Possible values include: 'allow', 'block'. Default
value: "allow" .
:type data_backup: str
:param file_sharing_save_as: Possible values include: 'allow', 'block'.
Default value: "allow" .
:type file_sharing_save_as: str
:param pin: Possible values include: 'required', 'notRequired'. Default
value: "required" .
:type pin: str
:param pin_num_retry:
:type pin_num_retry: int
:param device_compliance: Possible values include: 'enable', 'disable'.
Default value: "enable" .
:type device_compliance: str
:param managed_browser: Possible values include: 'required',
'notRequired'. Default value: "required" .
:type managed_browser: str
:param access_recheck_offline_timeout:
:type access_recheck_offline_timeout: timedelta
:param access_recheck_online_timeout:
:type access_recheck_online_timeout: timedelta
:param offline_wipe_timeout:
:type offline_wipe_timeout: timedelta
:ivar num_of_apps:
:vartype num_of_apps: int
:ivar group_status: Possible values include: 'notTargeted', 'targeted'.
Default value: "notTargeted" .
:vartype group_status: str
:ivar last_modified_time:
:vartype last_modified_time: datetime
:param screen_capture: Possible values include: 'allow', 'block'. Default
value: "allow" .
:type screen_capture: str
:param file_encryption: Possible values include: 'required',
'notRequired'. Default value: "required" .
:type file_encryption: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'friendly_name': {'required': True},
'num_of_apps': {'readonly': True},
'group_status': {'readonly': True},
'last_modified_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'app_sharing_from_level': {'key': 'properties.appSharingFromLevel', 'type': 'str'},
'app_sharing_to_level': {'key': 'properties.appSharingToLevel', 'type': 'str'},
'authentication': {'key': 'properties.authentication', 'type': 'str'},
'clipboard_sharing_level': {'key': 'properties.clipboardSharingLevel', 'type': 'str'},
'data_backup': {'key': 'properties.dataBackup', 'type': 'str'},
'file_sharing_save_as': {'key': 'properties.fileSharingSaveAs', 'type': 'str'},
'pin': {'key': 'properties.pin', 'type': 'str'},
'pin_num_retry': {'key': 'properties.pinNumRetry', 'type': 'int'},
'device_compliance': {'key': 'properties.deviceCompliance', 'type': 'str'},
'managed_browser': {'key': 'properties.managedBrowser', 'type': 'str'},
'access_recheck_offline_timeout': {'key': 'properties.accessRecheckOfflineTimeout', 'type': 'duration'},
'access_recheck_online_timeout': {'key': 'properties.accessRecheckOnlineTimeout', 'type': 'duration'},
'offline_wipe_timeout': {'key': 'properties.offlineWipeTimeout', 'type': 'duration'},
'num_of_apps': {'key': 'properties.numOfApps', 'type': 'int'},
'group_status': {'key': 'properties.groupStatus', 'type': 'str'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'iso-8601'},
'screen_capture': {'key': 'properties.screenCapture', 'type': 'str'},
'file_encryption': {'key': 'properties.fileEncryption', 'type': 'str'},
}
def __init__(self, friendly_name, tags=None, location=None, description=None, app_sharing_from_level="none", app_sharing_to_level="none", authentication="required", clipboard_sharing_level="blocked", data_backup="allow", file_sharing_save_as="allow", pin="required", pin_num_retry=None, device_compliance="enable", managed_browser="required", access_recheck_offline_timeout=None, access_recheck_online_timeout=None, offline_wipe_timeout=None, screen_capture="allow", file_encryption="required"):
super(AndroidMAMPolicy, self).__init__(tags=tags, location=location)
self.friendly_name = friendly_name
self.description = description
self.app_sharing_from_level = app_sharing_from_level
self.app_sharing_to_level = app_sharing_to_level
self.authentication = authentication
self.clipboard_sharing_level = clipboard_sharing_level
self.data_backup = data_backup
self.file_sharing_save_as = file_sharing_save_as
self.pin = pin
self.pin_num_retry = pin_num_retry
self.device_compliance = device_compliance
self.managed_browser = managed_browser
self.access_recheck_offline_timeout = access_recheck_offline_timeout
self.access_recheck_online_timeout = access_recheck_online_timeout
self.offline_wipe_timeout = offline_wipe_timeout
self.num_of_apps = None
self.group_status = None
self.last_modified_time = None
self.screen_capture = screen_capture
self.file_encryption = file_encryption
|
py
|
1a5a6aafb6e592e5b068ea73d0c7159493edd8f8
|
n1 = int(input("Informe um número: "))
n2 = n1 // 10
u = n1 % 10
n3 = n2 // 10
d = n2 % 10
c = n3 % 10
m = n3 // 10
print(f"Unidade {u}")
print(f"Dezena {d}")
print(f"Centena {c}")
print(f"Milhar {m}")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.